• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2022 Collabora Ltd. and Red Hat Inc.
3  * SPDX-License-Identifier: MIT
4  */
5 #ifndef NVK_MME_H
6 #define NVK_MME_H 1
7 
8 #include "mme_builder.h"
9 #include "nvk_private.h"
10 
11 #include "nak.h"
12 
13 struct nv_device_info;
14 
15 enum nvk_mme {
16    NVK_MME_SELECT_CB0,
17    NVK_MME_BIND_CBUF_DESC,
18    NVK_MME_CLEAR,
19    NVK_MME_BIND_IB,
20    NVK_MME_BIND_VB,
21    NVK_MME_SET_VB_ENABLES,
22    NVK_MME_SET_VB_STRIDE,
23    NVK_MME_SET_TESS_PARAMS,
24    NVK_MME_SET_SHADING_RATE_CONTROL,
25    NVK_MME_SET_ANTI_ALIAS,
26    NVK_MME_DRAW,
27    NVK_MME_DRAW_INDEXED,
28    NVK_MME_DRAW_INDIRECT,
29    NVK_MME_DRAW_INDEXED_INDIRECT,
30    NVK_MME_DRAW_INDIRECT_COUNT,
31    NVK_MME_DRAW_INDEXED_INDIRECT_COUNT,
32    NVK_MME_ADD_CS_INVOCATIONS,
33    NVK_MME_DISPATCH_INDIRECT,
34    NVK_MME_WRITE_CS_INVOCATIONS,
35    NVK_MME_XFB_COUNTER_LOAD,
36    NVK_MME_XFB_DRAW_INDIRECT,
37    NVK_MME_SET_PRIV_REG,
38    NVK_MME_SET_WRITE_MASK,
39    NVK_MME_SET_CONSERVATIVE_RASTER_STATE,
40    NVK_MME_SET_VIEWPORT_MIN_MAX_Z,
41    NVK_MME_SET_Z_CLAMP,
42 
43    NVK_MME_COUNT,
44 };
45 
46 enum nvk_mme_scratch {
47    /* These are reserved for communicating with FALCON */
48    NVK_MME_SCRATCH_FALCON_0 = 0,
49    NVK_MME_SCRATCH_FALCON_1 = 0,
50    NVK_MME_SCRATCH_FALCON_2 = 0,
51 
52    NVK_MME_SCRATCH_CS_INVOCATIONS_HI,
53    NVK_MME_SCRATCH_CS_INVOCATIONS_LO,
54    NVK_MME_SCRATCH_DRAW_BEGIN,
55    NVK_MME_SCRATCH_DRAW_COUNT,
56    NVK_MME_SCRATCH_DRAW_PAD_DW,
57    NVK_MME_SCRATCH_DRAW_IDX,
58    NVK_MME_SCRATCH_VIEW_MASK,
59    NVK_MME_SCRATCH_WRITE_MASK_DYN,
60    NVK_MME_SCRATCH_WRITE_MASK_PIPELINE,
61    NVK_MME_SCRATCH_CONSERVATIVE_RASTER_STATE,
62 
63    /* Bitfield of enabled vertex buffer bindings */
64    NVK_MME_SCRATCH_VB_ENABLES,
65 
66    /* Tessellation parameters */
67    NVK_MME_SCRATCH_TESS_PARAMS,
68 
69    /* Anti-aliasing state */
70    NVK_MME_SCRATCH_SAMPLE_MASKS_2PASS_0,
71    NVK_MME_SCRATCH_SAMPLE_MASKS_2PASS_1,
72    NVK_MME_SCRATCH_SAMPLE_MASKS_2PASS_2,
73    NVK_MME_SCRATCH_SAMPLE_MASKS_2PASS_3,
74    NVK_MME_SCRATCH_SAMPLE_MASKS_4PASS_0,
75    NVK_MME_SCRATCH_SAMPLE_MASKS_4PASS_1,
76    NVK_MME_SCRATCH_SAMPLE_MASKS_4PASS_2,
77    NVK_MME_SCRATCH_SAMPLE_MASKS_4PASS_3,
78    NVK_MME_SCRATCH_ANTI_ALIAS,
79 
80    /* Shading rate control */
81    NVK_MME_SCRATCH_SHADING_RATE_CONTROL,
82 
83    /* Addres of cb0 */
84    NVK_MME_SCRATCH_CB0_ADDR_HI,
85    NVK_MME_SCRATCH_CB0_ADDR_LO,
86 
87    /* Addres of zero page */
88    NVK_MME_SCRATCH_ZERO_ADDR_HI,
89    NVK_MME_SCRATCH_ZERO_ADDR_LO,
90 
91    /* Shadow copies of values in CB0 */
92    NVK_MME_SCRATCH_CB0_FIRST_VERTEX,
93    NVK_MME_SCRATCH_CB0_DRAW_INDEX,
94    NVK_MME_SCRATCH_CB0_VIEW_INDEX,
95 
96    NVK_MME_SCRATCH_VIEWPORT0_MIN_Z,
97    NVK_MME_SCRATCH_VIEWPORT0_MAX_Z,
98    NVK_MME_SCRATCH_Z_CLAMP = NVK_MME_SCRATCH_VIEWPORT0_MIN_Z
99                              + (NVK_MAX_VIEWPORTS * 2),
100 
101    /* Must be at the end */
102    NVK_MME_NUM_SCRATCH,
103 };
104 
105 #define NVK_SET_MME_SCRATCH(S) (0x3400 + (NVK_MME_SCRATCH_##S) * 4)
106 
107 static inline void
_nvk_mme_load_scratch_to(struct mme_builder * b,struct mme_value val,enum nvk_mme_scratch scratch)108 _nvk_mme_load_scratch_to(struct mme_builder *b, struct mme_value val,
109                          enum nvk_mme_scratch scratch)
110 {
111    mme_state_to(b, val, 0x3400 + scratch * 4);
112 }
113 #define nvk_mme_load_scratch_to(b, v, S) \
114    _nvk_mme_load_scratch_to(b, v, NVK_MME_SCRATCH_##S)
115 
116 static inline struct mme_value
_nvk_mme_load_scratch(struct mme_builder * b,enum nvk_mme_scratch scratch)117 _nvk_mme_load_scratch(struct mme_builder *b, enum nvk_mme_scratch scratch)
118 {
119    struct mme_value val = mme_alloc_reg(b);
120    _nvk_mme_load_scratch_to(b, val, scratch);
121    return val;
122 }
123 #define nvk_mme_load_scratch(b, S) \
124    _nvk_mme_load_scratch(b, NVK_MME_SCRATCH_##S)
125 
126 #define nvk_mme_load_scratch_arr(b, S, i) \
127    _nvk_mme_load_scratch(b, NVK_MME_SCRATCH_##S + i)
128 
129 static inline void
_nvk_mme_store_scratch(struct mme_builder * b,enum nvk_mme_scratch scratch,struct mme_value data)130 _nvk_mme_store_scratch(struct mme_builder *b, enum nvk_mme_scratch scratch,
131                        struct mme_value data)
132 {
133    mme_mthd(b, 0x3400 + scratch * 4);
134    mme_emit(b, data);
135 }
136 #define nvk_mme_store_scratch(b, S, v) \
137    _nvk_mme_store_scratch(b, NVK_MME_SCRATCH_##S, v)
138 
139 static inline void
_nvk_mme_load_to_scratch(struct mme_builder * b,enum nvk_mme_scratch scratch)140 _nvk_mme_load_to_scratch(struct mme_builder *b, enum nvk_mme_scratch scratch)
141 {
142    struct mme_value val = mme_load(b);
143    _nvk_mme_store_scratch(b, scratch, val);
144    mme_free_reg(b, val);
145 }
146 #define nvk_mme_load_to_scratch(b, S) \
147    _nvk_mme_load_to_scratch(b, NVK_MME_SCRATCH_##S)
148 
149 static inline uint32_t
nvk_mme_val_mask(uint16_t val,uint16_t mask)150 nvk_mme_val_mask(uint16_t val, uint16_t mask)
151 {
152    /* If there are bits in val which aren't in mask, it's probably a
153     * programming error on the CPU side.  nvk_mme_set_masked() will still
154     * work in this case but it's worth an assert.
155     */
156    assert(!(val & ~mask));
157 
158    return ((uint32_t)val) | (((uint32_t)mask) << 16);
159 }
160 
161 /* This is a common pattern in NVK.  The input val_mask is a value plus a mask
162  * where the top 16 bits are mask and the bottom 16 bits are data.  src is
163  * copied and the bits in the mask are replaced by the corresponding value
164  * bits in val_mask.
165  */
166 static inline struct mme_value
nvk_mme_set_masked(struct mme_builder * b,struct mme_value src,struct mme_value val_mask)167 nvk_mme_set_masked(struct mme_builder *b, struct mme_value src,
168                    struct mme_value val_mask)
169 {
170    struct mme_value mask = mme_merge(b, mme_zero(), val_mask, 0, 16, 16);
171    struct mme_value val = mme_and_not(b, src, mask);
172 
173    /* Re-use the mask reg for val_mask & mask */
174    mme_and_to(b, mask, val_mask, mask);
175    mme_or_to(b, val, val, mask);
176    mme_free_reg(b, mask);
177 
178    return val;
179 }
180 
181 static void
_nvk_mme_spill(struct mme_builder * b,enum nvk_mme_scratch scratch,struct mme_value val)182 _nvk_mme_spill(struct mme_builder *b, enum nvk_mme_scratch scratch,
183                struct mme_value val)
184 {
185    if (val.type == MME_VALUE_TYPE_REG) {
186       _nvk_mme_store_scratch(b, scratch, val);
187       mme_free_reg(b, val);
188    }
189 }
190 #define nvk_mme_spill(b, S, v) \
191    _nvk_mme_spill(b, NVK_MME_SCRATCH_##S, v)
192 
193 static void
_nvk_mme_unspill(struct mme_builder * b,enum nvk_mme_scratch scratch,struct mme_value val)194 _nvk_mme_unspill(struct mme_builder *b, enum nvk_mme_scratch scratch,
195                  struct mme_value val)
196 {
197    if (val.type == MME_VALUE_TYPE_REG) {
198       mme_realloc_reg(b, val);
199       _nvk_mme_load_scratch_to(b, val, scratch);
200    }
201 }
202 #define nvk_mme_unspill(b, S, v) \
203    _nvk_mme_unspill(b, NVK_MME_SCRATCH_##S, v)
204 
205 typedef void (*nvk_mme_builder_func)(struct mme_builder *b);
206 
207 uint32_t *nvk_build_mme(const struct nv_device_info *devinfo,
208                         enum nvk_mme mme, size_t *size_out);
209 
210 void nvk_mme_select_cb0(struct mme_builder *b);
211 void nvk_mme_bind_cbuf_desc(struct mme_builder *b);
212 void nvk_mme_clear(struct mme_builder *b);
213 void nvk_mme_bind_ib(struct mme_builder *b);
214 void nvk_mme_bind_vb(struct mme_builder *b);
215 void nvk_mme_set_vb_enables(struct mme_builder *b);
216 void nvk_mme_set_vb_stride(struct mme_builder *b);
217 void nvk_mme_set_tess_params(struct mme_builder *b);
218 void nvk_mme_set_shading_rate_control(struct mme_builder *b);
219 void nvk_mme_set_anti_alias(struct mme_builder *b);
220 void nvk_mme_draw(struct mme_builder *b);
221 void nvk_mme_draw_indexed(struct mme_builder *b);
222 void nvk_mme_draw_indirect(struct mme_builder *b);
223 void nvk_mme_draw_indexed_indirect(struct mme_builder *b);
224 void nvk_mme_draw_indirect_count(struct mme_builder *b);
225 void nvk_mme_draw_indexed_indirect_count(struct mme_builder *b);
226 void nvk_mme_add_cs_invocations(struct mme_builder *b);
227 void nvk_mme_dispatch_indirect(struct mme_builder *b);
228 void nvk_mme_write_cs_invocations(struct mme_builder *b);
229 void nvk_mme_xfb_counter_load(struct mme_builder *b);
230 void nvk_mme_xfb_draw_indirect(struct mme_builder *b);
231 void nvk_mme_set_priv_reg(struct mme_builder *b);
232 void nvk_mme_set_write_mask(struct mme_builder *b);
233 void nvk_mme_set_conservative_raster_state(struct mme_builder *b);
234 void nvk_mme_set_viewport_min_max_z(struct mme_builder *b);
235 void nvk_mme_set_z_clamp(struct mme_builder *b);
236 
237 uint32_t nvk_mme_tess_params(enum nak_ts_domain domain,
238                              enum nak_ts_spacing spacing,
239                              enum nak_ts_prims prims);
240 uint32_t nvk_mme_anti_alias_min_sample_shading(float mss);
241 uint32_t nvk_mme_shading_rate_control_sample_shading(bool sample_shading);
242 
243 struct nvk_mme_mthd_data {
244    uint16_t mthd;
245    uint32_t data;
246 };
247 
248 #define NVK_MME_MTHD_DATA_END ((struct nvk_mme_mthd_data) { 0, 0 })
249 
250 struct nvk_mme_test_case {
251    const struct nvk_mme_mthd_data *init;
252    const uint32_t *params;
253    const struct nvk_mme_mthd_data *expected;
254    void (*check)(const struct nv_device_info *devinfo,
255                  const struct nvk_mme_test_case *test,
256                  const struct nvk_mme_mthd_data *results);
257 };
258 
259 extern const struct nvk_mme_test_case nvk_mme_clear_tests[];
260 extern const struct nvk_mme_test_case nvk_mme_bind_vb_tests[];
261 extern const struct nvk_mme_test_case nvk_mme_set_tess_params_tests[];
262 extern const struct nvk_mme_test_case nvk_mme_set_shading_rate_control_tests[];
263 extern const struct nvk_mme_test_case nvk_mme_set_anti_alias_tests[];
264 
265 void nvk_test_all_mmes(const struct nv_device_info *devinfo);
266 
267 #endif /* NVK_MME_H */
268