• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2022 Collabora Ltd. and Red Hat Inc.
3  * SPDX-License-Identifier: MIT
4  */
5 #include "nvk_mme.h"
6 
7 #include "nvk_private.h"
8 
9 #include "mme_sim.h"
10 
11 static const nvk_mme_builder_func mme_builders[NVK_MME_COUNT] = {
12    [NVK_MME_SELECT_CB0]                    = nvk_mme_select_cb0,
13    [NVK_MME_BIND_CBUF_DESC]                = nvk_mme_bind_cbuf_desc,
14    [NVK_MME_CLEAR]                         = nvk_mme_clear,
15    [NVK_MME_BIND_IB]                       = nvk_mme_bind_ib,
16    [NVK_MME_BIND_VB]                       = nvk_mme_bind_vb,
17    [NVK_MME_SET_VB_ENABLES]                = nvk_mme_set_vb_enables,
18    [NVK_MME_SET_VB_STRIDE]                 = nvk_mme_set_vb_stride,
19    [NVK_MME_SET_TESS_PARAMS]               = nvk_mme_set_tess_params,
20    [NVK_MME_SET_SHADING_RATE_CONTROL]      = nvk_mme_set_shading_rate_control,
21    [NVK_MME_SET_ANTI_ALIAS]                = nvk_mme_set_anti_alias,
22    [NVK_MME_DRAW]                          = nvk_mme_draw,
23    [NVK_MME_DRAW_INDEXED]                  = nvk_mme_draw_indexed,
24    [NVK_MME_DRAW_INDIRECT]                 = nvk_mme_draw_indirect,
25    [NVK_MME_DRAW_INDEXED_INDIRECT]         = nvk_mme_draw_indexed_indirect,
26    [NVK_MME_DRAW_INDIRECT_COUNT]           = nvk_mme_draw_indirect_count,
27    [NVK_MME_DRAW_INDEXED_INDIRECT_COUNT]   = nvk_mme_draw_indexed_indirect_count,
28    [NVK_MME_ADD_CS_INVOCATIONS]            = nvk_mme_add_cs_invocations,
29    [NVK_MME_DISPATCH_INDIRECT]             = nvk_mme_dispatch_indirect,
30    [NVK_MME_WRITE_CS_INVOCATIONS]          = nvk_mme_write_cs_invocations,
31    [NVK_MME_XFB_COUNTER_LOAD]              = nvk_mme_xfb_counter_load,
32    [NVK_MME_XFB_DRAW_INDIRECT]             = nvk_mme_xfb_draw_indirect,
33    [NVK_MME_SET_PRIV_REG]                  = nvk_mme_set_priv_reg,
34    [NVK_MME_SET_WRITE_MASK]                = nvk_mme_set_write_mask,
35    [NVK_MME_SET_CONSERVATIVE_RASTER_STATE] = nvk_mme_set_conservative_raster_state,
36    [NVK_MME_SET_VIEWPORT_MIN_MAX_Z]        = nvk_mme_set_viewport_min_max_z,
37    [NVK_MME_SET_Z_CLAMP]                   = nvk_mme_set_z_clamp,
38 };
39 
40 static const struct nvk_mme_test_case *mme_tests[NVK_MME_COUNT] = {
41    [NVK_MME_CLEAR]                         = nvk_mme_clear_tests,
42    [NVK_MME_BIND_VB]                       = nvk_mme_bind_vb_tests,
43    [NVK_MME_SET_TESS_PARAMS]               = nvk_mme_set_tess_params_tests,
44    [NVK_MME_SET_SHADING_RATE_CONTROL]      = nvk_mme_set_shading_rate_control_tests,
45    [NVK_MME_SET_ANTI_ALIAS]                = nvk_mme_set_anti_alias_tests,
46 };
47 
48 uint32_t *
nvk_build_mme(const struct nv_device_info * devinfo,enum nvk_mme mme,size_t * size_out)49 nvk_build_mme(const struct nv_device_info *devinfo,
50               enum nvk_mme mme, size_t *size_out)
51 {
52    struct mme_builder b;
53    mme_builder_init(&b, devinfo);
54 
55    mme_builders[mme](&b);
56 
57    return mme_builder_finish(&b, size_out);
58 }
59 
60 struct nvk_mme_test_state {
61    const struct nvk_mme_test_case *test;
62    struct nvk_mme_mthd_data results[32];
63    uint32_t pi, ei;
64 };
65 
66 static uint32_t
nvk_mme_test_state_load(void * _ts)67 nvk_mme_test_state_load(void *_ts)
68 {
69    struct nvk_mme_test_state *ts = _ts;
70    return ts->test->params[ts->pi++];
71 }
72 
73 static uint32_t
nvk_mme_test_state_state(void * _ts,uint16_t addr)74 nvk_mme_test_state_state(void *_ts, uint16_t addr)
75 {
76    struct nvk_mme_test_state *ts = _ts;
77 
78    /* First, look backwards through the expected data that we've already
79     * written.  This ensures that mthd() impacts state().
80     */
81    for (int32_t i = ts->ei - 1; i >= 0; i--) {
82       if (ts->test->expected[i].mthd == addr)
83          return ts->test->expected[i].data;
84    }
85 
86    /* Now look at init.  We assume the init data is unique */
87    assert(ts->test->init != NULL && "Read uninitialized state");
88    for (uint32_t i = 0;; i++) {
89       if (ts->test->init[i].mthd == 0)
90          unreachable("Read uninitialized state");
91 
92       if (ts->test->init[i].mthd == addr)
93          return ts->test->init[i].data;
94    }
95 }
96 
97 static void
nvk_mme_test_state_mthd(void * _ts,uint16_t addr,uint32_t data)98 nvk_mme_test_state_mthd(void *_ts, uint16_t addr, uint32_t data)
99 {
100    struct nvk_mme_test_state *ts = _ts;
101 
102    assert(ts->ei < ARRAY_SIZE(ts->results));
103    ts->results[ts->ei] = (struct nvk_mme_mthd_data) {
104       .mthd = addr,
105       .data = data,
106    };
107 
108    if (ts->test->expected != NULL) {
109       assert(ts->test->expected[ts->ei].mthd != 0);
110       assert(ts->test->expected[ts->ei].mthd == addr);
111       assert(ts->test->expected[ts->ei].data == data);
112    }
113 
114    ts->ei++;
115 }
116 
117 const struct mme_sim_state_ops nvk_mme_test_state_ops = {
118    .load = nvk_mme_test_state_load,
119    .state = nvk_mme_test_state_state,
120    .mthd = nvk_mme_test_state_mthd,
121 };
122 
123 void
nvk_test_all_mmes(const struct nv_device_info * devinfo)124 nvk_test_all_mmes(const struct nv_device_info *devinfo)
125 {
126    for (uint32_t mme = 0; mme < NVK_MME_COUNT; mme++) {
127       size_t size;
128       uint32_t *dw = nvk_build_mme(devinfo, mme, &size);
129       assert(dw != NULL);
130 
131       if (mme_tests[mme] != NULL) {
132          for (uint32_t i = 0;; i++) {
133             if (mme_tests[mme][i].params == NULL)
134                break;
135 
136             struct nvk_mme_test_state ts = {
137                .test = &mme_tests[mme][i],
138             };
139             mme_sim_core(devinfo, size, dw, &nvk_mme_test_state_ops, &ts);
140             if (ts.test->expected != NULL)
141                assert(ts.test->expected[ts.ei].mthd == 0);
142             if (ts.test->check != NULL)
143                ts.test->check(devinfo, ts.test, ts.results);
144          }
145       }
146 
147       free(dw);
148    }
149 }
150