1 /*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include "si_pipe.h"
26 #include "si_build_pm4.h"
27 #include "sid.h"
28 #include "util/u_memory.h"
29
si_pm4_cmd_begin(struct si_pm4_state * state,unsigned opcode)30 static void si_pm4_cmd_begin(struct si_pm4_state *state, unsigned opcode)
31 {
32 if (!state->max_dw)
33 state->max_dw = ARRAY_SIZE(state->pm4);
34 assert(state->ndw < state->max_dw);
35 assert(opcode <= 254);
36 state->last_opcode = opcode;
37 state->last_pm4 = state->ndw++;
38 }
39
si_pm4_cmd_add(struct si_pm4_state * state,uint32_t dw)40 void si_pm4_cmd_add(struct si_pm4_state *state, uint32_t dw)
41 {
42 if (!state->max_dw)
43 state->max_dw = ARRAY_SIZE(state->pm4);
44 assert(state->ndw < state->max_dw);
45 state->pm4[state->ndw++] = dw;
46 state->last_opcode = 255; /* invalid opcode */
47 }
48
si_pm4_cmd_end(struct si_pm4_state * state,bool predicate)49 static void si_pm4_cmd_end(struct si_pm4_state *state, bool predicate)
50 {
51 unsigned count;
52 count = state->ndw - state->last_pm4 - 2;
53 state->pm4[state->last_pm4] = PKT3(state->last_opcode, count, predicate);
54 }
55
si_pm4_set_reg_custom(struct si_pm4_state * state,unsigned reg,uint32_t val,unsigned opcode,unsigned idx)56 static void si_pm4_set_reg_custom(struct si_pm4_state *state, unsigned reg, uint32_t val,
57 unsigned opcode, unsigned idx)
58 {
59 reg >>= 2;
60
61 if (!state->max_dw)
62 state->max_dw = ARRAY_SIZE(state->pm4);
63
64 assert(state->ndw + 2 <= state->max_dw);
65
66 if (opcode != state->last_opcode || reg != (state->last_reg + 1)) {
67 si_pm4_cmd_begin(state, opcode);
68 state->pm4[state->ndw++] = reg | (idx << 28);
69 }
70
71 assert(reg <= UINT16_MAX);
72 state->last_reg = reg;
73 state->pm4[state->ndw++] = val;
74 si_pm4_cmd_end(state, false);
75 }
76
si_pm4_set_reg(struct si_pm4_state * state,unsigned reg,uint32_t val)77 void si_pm4_set_reg(struct si_pm4_state *state, unsigned reg, uint32_t val)
78 {
79 unsigned opcode;
80
81 SI_CHECK_SHADOWED_REGS(reg, 1);
82
83 if (reg >= SI_CONFIG_REG_OFFSET && reg < SI_CONFIG_REG_END) {
84 opcode = PKT3_SET_CONFIG_REG;
85 reg -= SI_CONFIG_REG_OFFSET;
86
87 } else if (reg >= SI_SH_REG_OFFSET && reg < SI_SH_REG_END) {
88 opcode = PKT3_SET_SH_REG;
89 reg -= SI_SH_REG_OFFSET;
90
91 } else if (reg >= SI_CONTEXT_REG_OFFSET && reg < SI_CONTEXT_REG_END) {
92 opcode = PKT3_SET_CONTEXT_REG;
93 reg -= SI_CONTEXT_REG_OFFSET;
94
95 } else if (reg >= CIK_UCONFIG_REG_OFFSET && reg < CIK_UCONFIG_REG_END) {
96 opcode = PKT3_SET_UCONFIG_REG;
97 reg -= CIK_UCONFIG_REG_OFFSET;
98
99 } else {
100 PRINT_ERR("Invalid register offset %08x!\n", reg);
101 return;
102 }
103
104 si_pm4_set_reg_custom(state, reg, val, opcode, 0);
105 }
106
si_pm4_set_reg_idx3(struct si_pm4_state * state,unsigned reg,uint32_t val)107 void si_pm4_set_reg_idx3(struct si_pm4_state *state, unsigned reg, uint32_t val)
108 {
109 SI_CHECK_SHADOWED_REGS(reg, 1);
110
111 si_pm4_set_reg_custom(state, reg - SI_SH_REG_OFFSET, val, PKT3_SET_SH_REG_INDEX, 3);
112 }
113
si_pm4_clear_state(struct si_pm4_state * state)114 void si_pm4_clear_state(struct si_pm4_state *state)
115 {
116 state->ndw = 0;
117 }
118
si_pm4_free_state(struct si_context * sctx,struct si_pm4_state * state,unsigned idx)119 void si_pm4_free_state(struct si_context *sctx, struct si_pm4_state *state, unsigned idx)
120 {
121 if (!state)
122 return;
123
124 if (idx != ~0) {
125 if (sctx->emitted.array[idx] == state)
126 sctx->emitted.array[idx] = NULL;
127
128 if (sctx->queued.array[idx] == state) {
129 sctx->queued.array[idx] = NULL;
130 sctx->dirty_states &= ~BITFIELD_BIT(idx);
131 }
132 }
133
134 FREE(state);
135 }
136
si_pm4_emit(struct si_context * sctx,struct si_pm4_state * state)137 void si_pm4_emit(struct si_context *sctx, struct si_pm4_state *state)
138 {
139 struct radeon_cmdbuf *cs = &sctx->gfx_cs;
140
141 if (state->is_shader) {
142 radeon_add_to_buffer_list(sctx, &sctx->gfx_cs, ((struct si_shader*)state)->bo,
143 RADEON_USAGE_READ | RADEON_PRIO_SHADER_BINARY);
144 }
145
146 radeon_begin(cs);
147 radeon_emit_array(state->pm4, state->ndw);
148 radeon_end();
149
150 if (state->atom.emit)
151 state->atom.emit(sctx);
152 }
153
si_pm4_reset_emitted(struct si_context * sctx,bool first_cs)154 void si_pm4_reset_emitted(struct si_context *sctx, bool first_cs)
155 {
156 if (!first_cs && sctx->shadowed_regs) {
157 /* Only dirty states that contain buffers, so that they are
158 * added to the buffer list on the next draw call.
159 */
160 for (unsigned i = 0; i < SI_NUM_STATES; i++) {
161 struct si_pm4_state *state = sctx->queued.array[i];
162
163 if (state && state->is_shader) {
164 sctx->emitted.array[i] = NULL;
165 sctx->dirty_states |= 1 << i;
166 }
167 }
168 return;
169 }
170
171 memset(&sctx->emitted, 0, sizeof(sctx->emitted));
172
173 for (unsigned i = 0; i < SI_NUM_STATES; i++) {
174 if (sctx->queued.array[i])
175 sctx->dirty_states |= BITFIELD_BIT(i);
176 }
177 }
178