1 /*
2 * Copyright © 2019 Valve Corporation
3 * Copyright © 2018 Red Hat
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #include "radv_meta.h"
26 #include "radv_private.h"
27 #include "vk_format.h"
28
29 static nir_shader *
build_fmask_expand_compute_shader(struct radv_device * device,int samples)30 build_fmask_expand_compute_shader(struct radv_device *device, int samples)
31 {
32 const struct glsl_type *type =
33 glsl_sampler_type(GLSL_SAMPLER_DIM_MS, false, true, GLSL_TYPE_FLOAT);
34 const struct glsl_type *img_type = glsl_image_type(GLSL_SAMPLER_DIM_MS, true, GLSL_TYPE_FLOAT);
35
36 nir_builder b =
37 radv_meta_init_shader(device, MESA_SHADER_COMPUTE, "meta_fmask_expand_cs-%d", samples);
38 b.shader->info.workgroup_size[0] = 8;
39 b.shader->info.workgroup_size[1] = 8;
40
41 nir_variable *input_img = nir_variable_create(b.shader, nir_var_uniform, type, "s_tex");
42 input_img->data.descriptor_set = 0;
43 input_img->data.binding = 0;
44
45 nir_variable *output_img = nir_variable_create(b.shader, nir_var_image, img_type, "out_img");
46 output_img->data.descriptor_set = 0;
47 output_img->data.binding = 1;
48 output_img->data.access = ACCESS_NON_READABLE;
49
50 nir_ssa_def *input_img_deref = &nir_build_deref_var(&b, input_img)->dest.ssa;
51 nir_ssa_def *output_img_deref = &nir_build_deref_var(&b, output_img)->dest.ssa;
52
53 nir_ssa_def *tex_coord = get_global_ids(&b, 3);
54
55 nir_tex_instr *tex_instr[8];
56 for (uint32_t i = 0; i < samples; i++) {
57 tex_instr[i] = nir_tex_instr_create(b.shader, 3);
58
59 nir_tex_instr *tex = tex_instr[i];
60 tex->sampler_dim = GLSL_SAMPLER_DIM_MS;
61 tex->op = nir_texop_txf_ms;
62 tex->src[0].src_type = nir_tex_src_coord;
63 tex->src[0].src = nir_src_for_ssa(tex_coord);
64 tex->src[1].src_type = nir_tex_src_ms_index;
65 tex->src[1].src = nir_src_for_ssa(nir_imm_int(&b, i));
66 tex->src[2].src_type = nir_tex_src_texture_deref;
67 tex->src[2].src = nir_src_for_ssa(input_img_deref);
68 tex->dest_type = nir_type_float32;
69 tex->is_array = true;
70 tex->coord_components = 3;
71
72 nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32, "tex");
73 nir_builder_instr_insert(&b, &tex->instr);
74 }
75
76 nir_ssa_def *img_coord =
77 nir_vec4(&b, nir_channel(&b, tex_coord, 0), nir_channel(&b, tex_coord, 1),
78 nir_channel(&b, tex_coord, 2), nir_ssa_undef(&b, 1, 32));
79
80 for (uint32_t i = 0; i < samples; i++) {
81 nir_ssa_def *outval = &tex_instr[i]->dest.ssa;
82
83 nir_image_deref_store(&b, output_img_deref, img_coord, nir_imm_int(&b, i), outval,
84 nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_MS, .image_array = true);
85 }
86
87 return b.shader;
88 }
89
90 void
radv_expand_fmask_image_inplace(struct radv_cmd_buffer * cmd_buffer,struct radv_image * image,const VkImageSubresourceRange * subresourceRange)91 radv_expand_fmask_image_inplace(struct radv_cmd_buffer *cmd_buffer, struct radv_image *image,
92 const VkImageSubresourceRange *subresourceRange)
93 {
94 struct radv_device *device = cmd_buffer->device;
95 struct radv_meta_saved_state saved_state;
96 const uint32_t samples = image->info.samples;
97 const uint32_t samples_log2 = ffs(samples) - 1;
98 unsigned layer_count = radv_get_layerCount(image, subresourceRange);
99 struct radv_image_view iview;
100
101 radv_meta_save(&saved_state, cmd_buffer,
102 RADV_META_SAVE_COMPUTE_PIPELINE | RADV_META_SAVE_DESCRIPTORS);
103
104 VkPipeline pipeline = device->meta_state.fmask_expand.pipeline[samples_log2];
105
106 radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer), VK_PIPELINE_BIND_POINT_COMPUTE,
107 pipeline);
108
109 cmd_buffer->state.flush_bits |= radv_dst_access_flush(
110 cmd_buffer, VK_ACCESS_2_SHADER_READ_BIT | VK_ACCESS_2_SHADER_WRITE_BIT, image);
111
112 radv_image_view_init(&iview, device,
113 &(VkImageViewCreateInfo){
114 .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
115 .image = radv_image_to_handle(image),
116 .viewType = radv_meta_get_view_type(image),
117 .format = vk_format_no_srgb(image->vk.format),
118 .subresourceRange =
119 {
120 .aspectMask = subresourceRange->aspectMask,
121 .baseMipLevel = 0,
122 .levelCount = 1,
123 .baseArrayLayer = subresourceRange->baseArrayLayer,
124 .layerCount = layer_count,
125 },
126 },
127 0, NULL);
128
129 radv_meta_push_descriptor_set(
130 cmd_buffer, VK_PIPELINE_BIND_POINT_COMPUTE,
131 cmd_buffer->device->meta_state.fmask_expand.p_layout, 0, /* set */
132 2, /* descriptorWriteCount */
133 (VkWriteDescriptorSet[]){{.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
134 .dstBinding = 0,
135 .dstArrayElement = 0,
136 .descriptorCount = 1,
137 .descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE,
138 .pImageInfo =
139 (VkDescriptorImageInfo[]){
140 {.sampler = VK_NULL_HANDLE,
141 .imageView = radv_image_view_to_handle(&iview),
142 .imageLayout = VK_IMAGE_LAYOUT_GENERAL},
143 }},
144 {.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
145 .dstBinding = 1,
146 .dstArrayElement = 0,
147 .descriptorCount = 1,
148 .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
149 .pImageInfo = (VkDescriptorImageInfo[]){
150 {.sampler = VK_NULL_HANDLE,
151 .imageView = radv_image_view_to_handle(&iview),
152 .imageLayout = VK_IMAGE_LAYOUT_GENERAL},
153 }}});
154
155 radv_unaligned_dispatch(cmd_buffer, image->info.width, image->info.height, layer_count);
156
157 radv_image_view_finish(&iview);
158
159 radv_meta_restore(&saved_state, cmd_buffer);
160
161 cmd_buffer->state.flush_bits |=
162 RADV_CMD_FLAG_CS_PARTIAL_FLUSH |
163 radv_src_access_flush(cmd_buffer, VK_ACCESS_2_SHADER_WRITE_BIT, image);
164
165 /* Re-initialize FMASK in fully expanded mode. */
166 cmd_buffer->state.flush_bits |= radv_init_fmask(cmd_buffer, image, subresourceRange);
167 }
168
169 void
radv_device_finish_meta_fmask_expand_state(struct radv_device * device)170 radv_device_finish_meta_fmask_expand_state(struct radv_device *device)
171 {
172 struct radv_meta_state *state = &device->meta_state;
173
174 for (uint32_t i = 0; i < MAX_SAMPLES_LOG2; ++i) {
175 radv_DestroyPipeline(radv_device_to_handle(device), state->fmask_expand.pipeline[i],
176 &state->alloc);
177 }
178 radv_DestroyPipelineLayout(radv_device_to_handle(device), state->fmask_expand.p_layout,
179 &state->alloc);
180
181 device->vk.dispatch_table.DestroyDescriptorSetLayout(
182 radv_device_to_handle(device), state->fmask_expand.ds_layout, &state->alloc);
183 }
184
185 static VkResult
create_fmask_expand_pipeline(struct radv_device * device,int samples,VkPipeline * pipeline)186 create_fmask_expand_pipeline(struct radv_device *device, int samples, VkPipeline *pipeline)
187 {
188 struct radv_meta_state *state = &device->meta_state;
189 VkResult result;
190 nir_shader *cs = build_fmask_expand_compute_shader(device, samples);
191 ;
192
193 VkPipelineShaderStageCreateInfo pipeline_shader_stage = {
194 .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
195 .stage = VK_SHADER_STAGE_COMPUTE_BIT,
196 .module = vk_shader_module_handle_from_nir(cs),
197 .pName = "main",
198 .pSpecializationInfo = NULL,
199 };
200
201 VkComputePipelineCreateInfo vk_pipeline_info = {
202 .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
203 .stage = pipeline_shader_stage,
204 .flags = 0,
205 .layout = state->fmask_expand.p_layout,
206 };
207
208 result = radv_CreateComputePipelines(radv_device_to_handle(device),
209 radv_pipeline_cache_to_handle(&state->cache), 1,
210 &vk_pipeline_info, NULL, pipeline);
211
212 ralloc_free(cs);
213 return result;
214 }
215
216 VkResult
radv_device_init_meta_fmask_expand_state(struct radv_device * device)217 radv_device_init_meta_fmask_expand_state(struct radv_device *device)
218 {
219 struct radv_meta_state *state = &device->meta_state;
220 VkResult result;
221
222 VkDescriptorSetLayoutCreateInfo ds_create_info = {
223 .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
224 .flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR,
225 .bindingCount = 2,
226 .pBindings = (VkDescriptorSetLayoutBinding[]){
227 {.binding = 0,
228 .descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE,
229 .descriptorCount = 1,
230 .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
231 .pImmutableSamplers = NULL},
232 {.binding = 1,
233 .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
234 .descriptorCount = 1,
235 .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
236 .pImmutableSamplers = NULL},
237 }};
238
239 result = radv_CreateDescriptorSetLayout(radv_device_to_handle(device), &ds_create_info,
240 &state->alloc, &state->fmask_expand.ds_layout);
241 if (result != VK_SUCCESS)
242 return result;
243
244 VkPipelineLayoutCreateInfo color_create_info = {
245 .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
246 .setLayoutCount = 1,
247 .pSetLayouts = &state->fmask_expand.ds_layout,
248 .pushConstantRangeCount = 0,
249 .pPushConstantRanges = NULL,
250 };
251
252 result = radv_CreatePipelineLayout(radv_device_to_handle(device), &color_create_info,
253 &state->alloc, &state->fmask_expand.p_layout);
254 if (result != VK_SUCCESS)
255 return result;
256
257 for (uint32_t i = 0; i < MAX_SAMPLES_LOG2; i++) {
258 uint32_t samples = 1 << i;
259 result = create_fmask_expand_pipeline(device, samples, &state->fmask_expand.pipeline[i]);
260 if (result != VK_SUCCESS)
261 return result;
262 }
263
264 return VK_SUCCESS;
265 }
266