1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #ifndef ANV_NIR_H
25 #define ANV_NIR_H
26
27 #include "nir/nir.h"
28 #include "anv_private.h"
29
30 #ifdef __cplusplus
31 extern "C" {
32 #endif
33
34 bool anv_check_for_primitive_replication(nir_shader **shaders,
35 struct anv_graphics_pipeline *pipeline);
36
37 bool anv_nir_lower_multiview(nir_shader *shader,
38 struct anv_graphics_pipeline *pipeline);
39
40 bool anv_nir_lower_ycbcr_textures(nir_shader *shader,
41 const struct anv_pipeline_layout *layout);
42
43 static inline nir_address_format
anv_nir_ssbo_addr_format(const struct anv_physical_device * pdevice,bool robust_buffer_access)44 anv_nir_ssbo_addr_format(const struct anv_physical_device *pdevice,
45 bool robust_buffer_access)
46 {
47 if (pdevice->has_a64_buffer_access) {
48 if (robust_buffer_access)
49 return nir_address_format_64bit_bounded_global;
50 else
51 return nir_address_format_64bit_global_32bit_offset;
52 } else {
53 return nir_address_format_32bit_index_offset;
54 }
55 }
56
57 static inline nir_address_format
anv_nir_ubo_addr_format(const struct anv_physical_device * pdevice,bool robust_buffer_access)58 anv_nir_ubo_addr_format(const struct anv_physical_device *pdevice,
59 bool robust_buffer_access)
60 {
61 if (pdevice->has_a64_buffer_access) {
62 if (robust_buffer_access)
63 return nir_address_format_64bit_bounded_global;
64 else
65 return nir_address_format_64bit_global_32bit_offset;
66 } else {
67 return nir_address_format_32bit_index_offset;
68 }
69 }
70
71 bool anv_nir_lower_ubo_loads(nir_shader *shader);
72
73 void anv_nir_apply_pipeline_layout(const struct anv_physical_device *pdevice,
74 bool robust_buffer_access,
75 const struct anv_pipeline_layout *layout,
76 nir_shader *shader,
77 struct anv_pipeline_bind_map *map);
78
79 void anv_nir_compute_push_layout(const struct anv_physical_device *pdevice,
80 bool robust_buffer_access,
81 nir_shader *nir,
82 struct brw_stage_prog_data *prog_data,
83 struct anv_pipeline_bind_map *map,
84 void *mem_ctx);
85
86 void anv_nir_validate_push_layout(struct brw_stage_prog_data *prog_data,
87 struct anv_pipeline_bind_map *map);
88
89 bool anv_nir_add_base_work_group_id(nir_shader *shader);
90
91 #ifdef __cplusplus
92 }
93 #endif
94
95 #endif /* ANV_NIR_H */
96