• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #ifndef BRW_NIR_H
25 #define BRW_NIR_H
26 
27 #include "brw_reg.h"
28 #include "compiler/nir/nir.h"
29 #include "brw_compiler.h"
30 
31 #ifdef __cplusplus
32 extern "C" {
33 #endif
34 
35 int type_size_scalar(const struct glsl_type *type);
36 int type_size_vec4(const struct glsl_type *type);
37 int type_size_dvec4(const struct glsl_type *type);
38 
39 static inline int
type_size_scalar_bytes(const struct glsl_type * type)40 type_size_scalar_bytes(const struct glsl_type *type)
41 {
42    return type_size_scalar(type) * 4;
43 }
44 
45 static inline int
type_size_vec4_bytes(const struct glsl_type * type)46 type_size_vec4_bytes(const struct glsl_type *type)
47 {
48    return type_size_vec4(type) * 16;
49 }
50 
51 /* Flags set in the instr->pass_flags field by i965 analysis passes */
52 enum {
53    BRW_NIR_NON_BOOLEAN           = 0x0,
54 
55    /* Indicates that the given instruction's destination is a boolean
56     * value but that it needs to be resolved before it can be used.
57     * On Gen <= 5, CMP instructions return a 32-bit value where the bottom
58     * bit represents the actual true/false value of the compare and the top
59     * 31 bits are undefined.  In order to use this value, we have to do a
60     * "resolve" operation by replacing the value of the CMP with -(x & 1)
61     * to sign-extend the bottom bit to 0/~0.
62     */
63    BRW_NIR_BOOLEAN_NEEDS_RESOLVE = 0x1,
64 
65    /* Indicates that the given instruction's destination is a boolean
66     * value that has intentionally been left unresolved.  Not all boolean
67     * values need to be resolved immediately.  For instance, if we have
68     *
69     *    CMP r1 r2 r3
70     *    CMP r4 r5 r6
71     *    AND r7 r1 r4
72     *
73     * We don't have to resolve the result of the two CMP instructions
74     * immediately because the AND still does an AND of the bottom bits.
75     * Instead, we can save ourselves instructions by delaying the resolve
76     * until after the AND.  The result of the two CMP instructions is left
77     * as BRW_NIR_BOOLEAN_UNRESOLVED.
78     */
79    BRW_NIR_BOOLEAN_UNRESOLVED    = 0x2,
80 
81    /* Indicates a that the given instruction's destination is a boolean
82     * value that does not need a resolve.  For instance, if you AND two
83     * values that are BRW_NIR_BOOLEAN_NEEDS_RESOLVE then we know that both
84     * values will be 0/~0 before we get them and the result of the AND is
85     * also guaranteed to be 0/~0 and does not need a resolve.
86     */
87    BRW_NIR_BOOLEAN_NO_RESOLVE    = 0x3,
88 
89    /* A mask to mask the boolean status values off of instr->pass_flags */
90    BRW_NIR_BOOLEAN_MASK          = 0x3,
91 };
92 
93 void brw_nir_analyze_boolean_resolves(nir_shader *nir);
94 
95 nir_shader *brw_preprocess_nir(const struct brw_compiler *compiler,
96                                nir_shader *nir);
97 
98 void
99 brw_nir_link_shaders(const struct brw_compiler *compiler,
100                      nir_shader **producer, nir_shader **consumer);
101 
102 bool brw_nir_lower_cs_intrinsics(nir_shader *nir,
103                                  unsigned dispatch_width);
104 void brw_nir_lower_vs_inputs(nir_shader *nir,
105                              const uint8_t *vs_attrib_wa_flags);
106 void brw_nir_lower_vue_inputs(nir_shader *nir,
107                               const struct brw_vue_map *vue_map);
108 void brw_nir_lower_tes_inputs(nir_shader *nir, const struct brw_vue_map *vue);
109 void brw_nir_lower_fs_inputs(nir_shader *nir,
110                              const struct gen_device_info *devinfo,
111                              const struct brw_wm_prog_key *key);
112 void brw_nir_lower_vue_outputs(nir_shader *nir, bool is_scalar);
113 void brw_nir_lower_tcs_outputs(nir_shader *nir, const struct brw_vue_map *vue,
114                                GLenum tes_primitive_mode);
115 void brw_nir_lower_fs_outputs(nir_shader *nir);
116 void brw_nir_lower_cs_shared(nir_shader *nir);
117 
118 nir_shader *brw_postprocess_nir(nir_shader *nir,
119                                 const struct brw_compiler *compiler,
120                                 bool is_scalar);
121 
122 bool brw_nir_apply_attribute_workarounds(nir_shader *nir,
123                                          const uint8_t *attrib_wa_flags);
124 
125 bool brw_nir_apply_trig_workarounds(nir_shader *nir);
126 
127 void brw_nir_apply_tcs_quads_workaround(nir_shader *nir);
128 
129 nir_shader *brw_nir_apply_sampler_key(nir_shader *nir,
130                                       const struct brw_compiler *compiler,
131                                       const struct brw_sampler_prog_key_data *key,
132                                       bool is_scalar);
133 
134 enum brw_reg_type brw_type_for_nir_type(const struct gen_device_info *devinfo,
135                                         nir_alu_type type);
136 
137 enum glsl_base_type brw_glsl_base_type_for_nir_type(nir_alu_type type);
138 
139 void brw_nir_setup_glsl_uniforms(void *mem_ctx, nir_shader *shader,
140                                  const struct gl_program *prog,
141                                  struct brw_stage_prog_data *stage_prog_data,
142                                  bool is_scalar);
143 
144 void brw_nir_setup_arb_uniforms(void *mem_ctx, nir_shader *shader,
145                                 struct gl_program *prog,
146                                 struct brw_stage_prog_data *stage_prog_data);
147 
148 void brw_nir_lower_patch_vertices_in_to_uniform(nir_shader *nir);
149 
150 void brw_nir_analyze_ubo_ranges(const struct brw_compiler *compiler,
151                                 nir_shader *nir,
152                                 struct brw_ubo_range out_ranges[4]);
153 
154 bool brw_nir_opt_peephole_ffma(nir_shader *shader);
155 
156 nir_shader *brw_nir_optimize(nir_shader *nir,
157                              const struct brw_compiler *compiler,
158                              bool is_scalar);
159 
160 #define BRW_NIR_FRAG_OUTPUT_INDEX_SHIFT 0
161 #define BRW_NIR_FRAG_OUTPUT_INDEX_MASK INTEL_MASK(0, 0)
162 #define BRW_NIR_FRAG_OUTPUT_LOCATION_SHIFT 1
163 #define BRW_NIR_FRAG_OUTPUT_LOCATION_MASK INTEL_MASK(31, 1)
164 
165 #ifdef __cplusplus
166 }
167 #endif
168 
169 #endif /* BRW_NIR_H */
170