1 /*
2 * Copyright © 2014-2017 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #ifndef VC5_CL_H
25 #define VC5_CL_H
26
27 #include <stdint.h>
28
29 #include "util/u_math.h"
30 #include "util/macros.h"
31 #include "broadcom/cle/v3d_packet_helpers.h"
32
33 struct v3d_bo;
34 struct v3d_job;
35 struct v3d_cl;
36
37 /**
38 * Undefined structure, used for typechecking that you're passing the pointers
39 * to these functions correctly.
40 */
41 struct v3d_cl_out;
42
43 /** A reference to a BO used in the CL packing functions */
44 struct v3d_cl_reloc {
45 struct v3d_bo *bo;
46 uint32_t offset;
47 };
48
49 static inline void cl_pack_emit_reloc(struct v3d_cl *cl, const struct v3d_cl_reloc *);
50
51 #define __gen_user_data struct v3d_cl
52 #define __gen_address_type struct v3d_cl_reloc
53 #define __gen_address_offset(reloc) (((reloc)->bo ? (reloc)->bo->offset : 0) + \
54 (reloc)->offset)
55 #define __gen_emit_reloc cl_pack_emit_reloc
56 #define __gen_unpack_address(cl, s, e) __unpack_address(cl, s, e)
57
58 static inline struct v3d_cl_reloc
__unpack_address(const uint8_t * cl,uint32_t s,uint32_t e)59 __unpack_address(const uint8_t *cl, uint32_t s, uint32_t e)
60 {
61 struct v3d_cl_reloc reloc =
62 { NULL, __gen_unpack_uint(cl, s, e) << (31 - (e - s)) };
63 return reloc;
64 }
65
66 struct v3d_cl {
67 void *base;
68 struct v3d_job *job;
69 struct v3d_cl_out *next;
70 struct v3d_bo *bo;
71 uint32_t size;
72 };
73
74 void v3d_init_cl(struct v3d_job *job, struct v3d_cl *cl);
75 void v3d_destroy_cl(struct v3d_cl *cl);
76 void v3d_dump_cl(void *cl, uint32_t size, bool is_render);
77 uint32_t v3d_gem_hindex(struct v3d_job *job, struct v3d_bo *bo);
78
79 struct PACKED unaligned_16 { uint16_t x; };
80 struct PACKED unaligned_32 { uint32_t x; };
81
cl_offset(struct v3d_cl * cl)82 static inline uint32_t cl_offset(struct v3d_cl *cl)
83 {
84 return (char *)cl->next - (char *)cl->base;
85 }
86
cl_get_address(struct v3d_cl * cl)87 static inline struct v3d_cl_reloc cl_get_address(struct v3d_cl *cl)
88 {
89 return (struct v3d_cl_reloc){ .bo = cl->bo, .offset = cl_offset(cl) };
90 }
91
92 static inline void
cl_advance(struct v3d_cl_out ** cl,uint32_t n)93 cl_advance(struct v3d_cl_out **cl, uint32_t n)
94 {
95 (*cl) = (struct v3d_cl_out *)((char *)(*cl) + n);
96 }
97
98 static inline struct v3d_cl_out *
cl_start(struct v3d_cl * cl)99 cl_start(struct v3d_cl *cl)
100 {
101 return cl->next;
102 }
103
104 static inline void
cl_end(struct v3d_cl * cl,struct v3d_cl_out * next)105 cl_end(struct v3d_cl *cl, struct v3d_cl_out *next)
106 {
107 cl->next = next;
108 assert(cl_offset(cl) <= cl->size);
109 }
110
111
112 static inline void
put_unaligned_32(struct v3d_cl_out * ptr,uint32_t val)113 put_unaligned_32(struct v3d_cl_out *ptr, uint32_t val)
114 {
115 struct unaligned_32 *p = (void *)ptr;
116 p->x = val;
117 }
118
119 static inline void
put_unaligned_16(struct v3d_cl_out * ptr,uint16_t val)120 put_unaligned_16(struct v3d_cl_out *ptr, uint16_t val)
121 {
122 struct unaligned_16 *p = (void *)ptr;
123 p->x = val;
124 }
125
126 static inline void
cl_u8(struct v3d_cl_out ** cl,uint8_t n)127 cl_u8(struct v3d_cl_out **cl, uint8_t n)
128 {
129 *(uint8_t *)(*cl) = n;
130 cl_advance(cl, 1);
131 }
132
133 static inline void
cl_u16(struct v3d_cl_out ** cl,uint16_t n)134 cl_u16(struct v3d_cl_out **cl, uint16_t n)
135 {
136 put_unaligned_16(*cl, n);
137 cl_advance(cl, 2);
138 }
139
140 static inline void
cl_u32(struct v3d_cl_out ** cl,uint32_t n)141 cl_u32(struct v3d_cl_out **cl, uint32_t n)
142 {
143 put_unaligned_32(*cl, n);
144 cl_advance(cl, 4);
145 }
146
147 static inline void
cl_aligned_u32(struct v3d_cl_out ** cl,uint32_t n)148 cl_aligned_u32(struct v3d_cl_out **cl, uint32_t n)
149 {
150 *(uint32_t *)(*cl) = n;
151 cl_advance(cl, 4);
152 }
153
154 static inline void
cl_aligned_reloc(struct v3d_cl * cl,struct v3d_cl_out ** cl_out,struct v3d_bo * bo,uint32_t offset)155 cl_aligned_reloc(struct v3d_cl *cl,
156 struct v3d_cl_out **cl_out,
157 struct v3d_bo *bo, uint32_t offset)
158 {
159 cl_aligned_u32(cl_out, bo->offset + offset);
160 v3d_job_add_bo(cl->job, bo);
161 }
162
163 static inline void
cl_ptr(struct v3d_cl_out ** cl,void * ptr)164 cl_ptr(struct v3d_cl_out **cl, void *ptr)
165 {
166 *(struct v3d_cl_out **)(*cl) = ptr;
167 cl_advance(cl, sizeof(void *));
168 }
169
170 static inline void
cl_f(struct v3d_cl_out ** cl,float f)171 cl_f(struct v3d_cl_out **cl, float f)
172 {
173 cl_u32(cl, fui(f));
174 }
175
176 static inline void
cl_aligned_f(struct v3d_cl_out ** cl,float f)177 cl_aligned_f(struct v3d_cl_out **cl, float f)
178 {
179 cl_aligned_u32(cl, fui(f));
180 }
181
182 /**
183 * Reference to a BO with its associated offset, used in the pack process.
184 */
185 static inline struct v3d_cl_reloc
cl_address(struct v3d_bo * bo,uint32_t offset)186 cl_address(struct v3d_bo *bo, uint32_t offset)
187 {
188 struct v3d_cl_reloc reloc = {
189 .bo = bo,
190 .offset = offset,
191 };
192 return reloc;
193 }
194
195 uint32_t v3d_cl_ensure_space(struct v3d_cl *cl, uint32_t size, uint32_t align);
196 void v3d_cl_ensure_space_with_branch(struct v3d_cl *cl, uint32_t size);
197
198 #define cl_packet_header(packet) V3DX(packet ## _header)
199 #define cl_packet_length(packet) V3DX(packet ## _length)
200 #define cl_packet_pack(packet) V3DX(packet ## _pack)
201 #define cl_packet_struct(packet) V3DX(packet)
202
203 static inline void *
cl_get_emit_space(struct v3d_cl_out ** cl,size_t size)204 cl_get_emit_space(struct v3d_cl_out **cl, size_t size)
205 {
206 void *addr = *cl;
207 cl_advance(cl, size);
208 return addr;
209 }
210
211 /* Macro for setting up an emit of a CL struct. A temporary unpacked struct
212 * is created, which you get to set fields in of the form:
213 *
214 * cl_emit(bcl, FLAT_SHADE_FLAGS, flags) {
215 * .flags.flat_shade_flags = 1 << 2,
216 * }
217 *
218 * or default values only can be emitted with just:
219 *
220 * cl_emit(bcl, FLAT_SHADE_FLAGS, flags);
221 *
222 * The trick here is that we make a for loop that will execute the body
223 * (either the block or the ';' after the macro invocation) exactly once.
224 */
225 #define cl_emit(cl, packet, name) \
226 for (struct cl_packet_struct(packet) name = { \
227 cl_packet_header(packet) \
228 }, \
229 *_loop_terminate = &name; \
230 __builtin_expect(_loop_terminate != NULL, 1); \
231 ({ \
232 struct v3d_cl_out *cl_out = cl_start(cl); \
233 cl_packet_pack(packet)(cl, (uint8_t *)cl_out, &name); \
234 cl_advance(&cl_out, cl_packet_length(packet)); \
235 cl_end(cl, cl_out); \
236 _loop_terminate = NULL; \
237 })) \
238
239 #define cl_emit_with_prepacked(cl, packet, prepacked, name) \
240 for (struct cl_packet_struct(packet) name = { \
241 cl_packet_header(packet) \
242 }, \
243 *_loop_terminate = &name; \
244 __builtin_expect(_loop_terminate != NULL, 1); \
245 ({ \
246 struct v3d_cl_out *cl_out = cl_start(cl); \
247 uint8_t packed[cl_packet_length(packet)]; \
248 cl_packet_pack(packet)(cl, packed, &name); \
249 for (int _i = 0; _i < cl_packet_length(packet); _i++) \
250 ((uint8_t *)cl_out)[_i] = packed[_i] | (prepacked)[_i]; \
251 cl_advance(&cl_out, cl_packet_length(packet)); \
252 cl_end(cl, cl_out); \
253 _loop_terminate = NULL; \
254 })) \
255
256 #define cl_emit_prepacked_sized(cl, packet, size) do { \
257 memcpy((cl)->next, packet, size); \
258 cl_advance(&(cl)->next, size); \
259 } while (0)
260
261 #define cl_emit_prepacked(cl, packet) \
262 cl_emit_prepacked_sized(cl, packet, sizeof(*(packet)))
263
264 #define v3dx_pack(packed, packet, name) \
265 for (struct cl_packet_struct(packet) name = { \
266 cl_packet_header(packet) \
267 }, \
268 *_loop_terminate = &name; \
269 __builtin_expect(_loop_terminate != NULL, 1); \
270 ({ \
271 cl_packet_pack(packet)(NULL, (uint8_t *)packed, &name); \
272 VG(VALGRIND_CHECK_MEM_IS_DEFINED((uint8_t *)packed, \
273 cl_packet_length(packet))); \
274 _loop_terminate = NULL; \
275 })) \
276
277 /**
278 * Helper function called by the XML-generated pack functions for filling in
279 * an address field in shader records.
280 *
281 * Since we have a private address space as of VC5, our BOs can have lifelong
282 * offsets, and all the kernel needs to know is which BOs need to be paged in
283 * for this exec.
284 */
285 static inline void
cl_pack_emit_reloc(struct v3d_cl * cl,const struct v3d_cl_reloc * reloc)286 cl_pack_emit_reloc(struct v3d_cl *cl, const struct v3d_cl_reloc *reloc)
287 {
288 if (reloc->bo)
289 v3d_job_add_bo(cl->job, reloc->bo);
290 }
291
292 #endif /* VC5_CL_H */
293