1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jerome Glisse
25 */
26 #ifndef R600_H
27 #define R600_H
28
29 #include "../../winsys/radeon/drm/radeon_winsys.h"
30 #include "util/u_double_list.h"
31 #include "util/u_transfer.h"
32
33 #define R600_ERR(fmt, args...) \
34 fprintf(stderr, "EE %s:%d %s - "fmt, __FILE__, __LINE__, __func__, ##args)
35
36 struct winsys_handle;
37
38 enum radeon_family {
39 CHIP_UNKNOWN,
40 CHIP_R600,
41 CHIP_RV610,
42 CHIP_RV630,
43 CHIP_RV670,
44 CHIP_RV620,
45 CHIP_RV635,
46 CHIP_RS780,
47 CHIP_RS880,
48 CHIP_RV770,
49 CHIP_RV730,
50 CHIP_RV710,
51 CHIP_RV740,
52 CHIP_CEDAR,
53 CHIP_REDWOOD,
54 CHIP_JUNIPER,
55 CHIP_CYPRESS,
56 CHIP_HEMLOCK,
57 CHIP_PALM,
58 CHIP_SUMO,
59 CHIP_SUMO2,
60 CHIP_BARTS,
61 CHIP_TURKS,
62 CHIP_CAICOS,
63 CHIP_CAYMAN,
64 CHIP_ARUBA,
65 CHIP_LAST,
66 };
67
68 enum chip_class {
69 R600,
70 R700,
71 EVERGREEN,
72 CAYMAN,
73 };
74
75 struct r600_tiling_info {
76 unsigned num_channels;
77 unsigned num_banks;
78 unsigned group_bytes;
79 };
80
81 struct r600_resource {
82 struct u_resource b;
83
84 /* Winsys objects. */
85 struct pb_buffer *buf;
86 struct radeon_winsys_cs_handle *cs_buf;
87
88 /* Resource state. */
89 unsigned domains;
90 };
91
92 #define R600_BLOCK_MAX_BO 32
93 #define R600_BLOCK_MAX_REG 128
94
95 /* each range covers 9 bits of dword space = 512 dwords = 2k bytes */
96 /* there is a block entry for each register so 512 blocks */
97 /* we have no registers to read/write below 0x8000 (0x2000 in dw space) */
98 /* we use some fake offsets at 0x40000 to do evergreen sampler borders so take 0x42000 as a max bound*/
99 #define RANGE_OFFSET_START 0x8000
100 #define HASH_SHIFT 9
101 #define NUM_RANGES (0x42000 - RANGE_OFFSET_START) / (4 << HASH_SHIFT) /* 128 << 9 = 64k */
102
103 #define CTX_RANGE_ID(offset) ((((offset - RANGE_OFFSET_START) >> 2) >> HASH_SHIFT) & 255)
104 #define CTX_BLOCK_ID(offset) (((offset - RANGE_OFFSET_START) >> 2) & ((1 << HASH_SHIFT) - 1))
105
106 struct r600_pipe_reg {
107 uint32_t value;
108 struct r600_block *block;
109 struct r600_resource *bo;
110 enum radeon_bo_usage bo_usage;
111 uint32_t id;
112 };
113
114 struct r600_pipe_state {
115 unsigned id;
116 unsigned nregs;
117 struct r600_pipe_reg regs[R600_BLOCK_MAX_REG];
118 };
119
120 #define R600_BLOCK_STATUS_ENABLED (1 << 0)
121 #define R600_BLOCK_STATUS_DIRTY (1 << 1)
122
123 struct r600_block_reloc {
124 struct r600_resource *bo;
125 enum radeon_bo_usage bo_usage;
126 unsigned bo_pm4_index;
127 };
128
129 struct r600_block {
130 struct list_head list;
131 struct list_head enable_list;
132 unsigned status;
133 unsigned flags;
134 unsigned start_offset;
135 unsigned pm4_ndwords;
136 unsigned nbo;
137 uint16_t nreg;
138 uint16_t nreg_dirty;
139 uint32_t *reg;
140 uint32_t pm4[R600_BLOCK_MAX_REG];
141 unsigned pm4_bo_index[R600_BLOCK_MAX_REG];
142 struct r600_block_reloc reloc[R600_BLOCK_MAX_BO];
143 };
144
145 struct r600_range {
146 struct r600_block **blocks;
147 };
148
149 struct r600_query_buffer {
150 /* The buffer where query results are stored. */
151 struct r600_resource *buf;
152 /* Offset of the next free result after current query data */
153 unsigned results_end;
154 /* If a query buffer is full, a new buffer is created and the old one
155 * is put in here. When we calculate the result, we sum up the samples
156 * from all buffers. */
157 struct r600_query_buffer *previous;
158 };
159
160 struct r600_query {
161 /* The query buffer and how many results are in it. */
162 struct r600_query_buffer buffer;
163 /* The type of query */
164 unsigned type;
165 /* Size of the result in memory for both begin_query and end_query,
166 * this can be one or two numbers, or it could even be a size of a structure. */
167 unsigned result_size;
168 /* The number of dwords for begin_query or end_query. */
169 unsigned num_cs_dw;
170 /* linked list of queries */
171 struct list_head list;
172 };
173
174 struct r600_so_target {
175 struct pipe_stream_output_target b;
176
177 /* The buffer where BUFFER_FILLED_SIZE is stored. */
178 struct r600_resource *filled_size;
179 unsigned stride_in_dw;
180 unsigned so_index;
181 };
182
183 #define R600_CONTEXT_DRAW_PENDING (1 << 0)
184 #define R600_CONTEXT_DST_CACHES_DIRTY (1 << 1)
185 #define R600_PARTIAL_FLUSH (1 << 2)
186
187 struct r600_context;
188 struct r600_screen;
189
190 void r600_get_backend_mask(struct r600_context *ctx);
191 int r600_context_init(struct r600_context *ctx);
192 void r600_context_fini(struct r600_context *ctx);
193 void r600_context_pipe_state_emit(struct r600_context *ctx, struct r600_pipe_state *state, unsigned pkt_flags);
194 void r600_context_pipe_state_set(struct r600_context *ctx, struct r600_pipe_state *state);
195 void r600_context_flush(struct r600_context *ctx, unsigned flags);
196
197 void r600_context_emit_fence(struct r600_context *ctx, struct r600_resource *fence,
198 unsigned offset, unsigned value);
199 void r600_inval_shader_cache(struct r600_context *ctx);
200 void r600_inval_texture_cache(struct r600_context *ctx);
201 void r600_inval_vertex_cache(struct r600_context *ctx);
202 void r600_flush_framebuffer(struct r600_context *ctx, bool flush_now);
203
204 void r600_context_streamout_begin(struct r600_context *ctx);
205 void r600_context_streamout_end(struct r600_context *ctx);
206 void r600_need_cs_space(struct r600_context *ctx, unsigned num_dw, boolean count_draw_in);
207 void r600_context_block_emit_dirty(struct r600_context *ctx, struct r600_block *block, unsigned pkt_flags);
208
209 int evergreen_context_init(struct r600_context *ctx);
210
211 void _r600_pipe_state_add_reg_bo(struct r600_context *ctx,
212 struct r600_pipe_state *state,
213 uint32_t offset, uint32_t value,
214 uint32_t range_id, uint32_t block_id,
215 struct r600_resource *bo,
216 enum radeon_bo_usage usage);
217
218 void _r600_pipe_state_add_reg(struct r600_context *ctx,
219 struct r600_pipe_state *state,
220 uint32_t offset, uint32_t value,
221 uint32_t range_id, uint32_t block_id);
222
223 void r600_pipe_state_add_reg_noblock(struct r600_pipe_state *state,
224 uint32_t offset, uint32_t value,
225 struct r600_resource *bo,
226 enum radeon_bo_usage usage);
227
228 #define r600_pipe_state_add_reg_bo(state, offset, value, bo, usage) _r600_pipe_state_add_reg_bo(rctx, state, offset, value, CTX_RANGE_ID(offset), CTX_BLOCK_ID(offset), bo, usage)
229 #define r600_pipe_state_add_reg(state, offset, value) _r600_pipe_state_add_reg(rctx, state, offset, value, CTX_RANGE_ID(offset), CTX_BLOCK_ID(offset))
230
r600_pipe_state_mod_reg(struct r600_pipe_state * state,uint32_t value)231 static inline void r600_pipe_state_mod_reg(struct r600_pipe_state *state,
232 uint32_t value)
233 {
234 state->regs[state->nregs].value = value;
235 state->nregs++;
236 }
237
238 #endif
239