• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2014, 2015 Red Hat.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * on the rights to use, copy, modify, merge, publish, distribute, sub
8  * license, and/or sell copies of the Software, and to permit persons to whom
9  * the Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21  * USE OR OTHER DEALINGS IN THE SOFTWARE.
22  */
23 #include <stdint.h>
24 #include <assert.h>
25 #include <string.h>
26 
27 #include "util/format/u_format.h"
28 #include "util/u_memory.h"
29 #include "util/u_math.h"
30 #include "pipe/p_state.h"
31 #include "tgsi/tgsi_dump.h"
32 #include "tgsi/tgsi_parse.h"
33 
34 #include "virgl_context.h"
35 #include "virgl_encode.h"
36 #include "virtio-gpu/virgl_protocol.h"
37 #include "virgl_resource.h"
38 #include "virgl_screen.h"
39 #include "virgl_video.h"
40 
41 #define VIRGL_ENCODE_MAX_DWORDS MIN2(VIRGL_MAX_CMDBUF_DWORDS, VIRGL_CMD0_MAX_DWORDS)
42 
43 #define CONV_FORMAT(f) [PIPE_FORMAT_##f] = VIRGL_FORMAT_##f,
44 
45 static const enum virgl_formats virgl_formats_conv_table[PIPE_FORMAT_COUNT] = {
46    CONV_FORMAT(NONE)
47    CONV_FORMAT(B8G8R8A8_UNORM)
48    CONV_FORMAT(B8G8R8X8_UNORM)
49    CONV_FORMAT(A8R8G8B8_UNORM)
50    CONV_FORMAT(X8R8G8B8_UNORM)
51    CONV_FORMAT(B5G5R5A1_UNORM)
52    CONV_FORMAT(B4G4R4A4_UNORM)
53    CONV_FORMAT(B5G6R5_UNORM)
54    CONV_FORMAT(R10G10B10A2_UNORM)
55    CONV_FORMAT(L8_UNORM)
56    CONV_FORMAT(A8_UNORM)
57    CONV_FORMAT(I8_UNORM)
58    CONV_FORMAT(L8A8_UNORM)
59    CONV_FORMAT(L16_UNORM)
60    CONV_FORMAT(UYVY)
61    CONV_FORMAT(YUYV)
62    CONV_FORMAT(Z16_UNORM)
63    CONV_FORMAT(Z32_UNORM)
64    CONV_FORMAT(Z32_FLOAT)
65    CONV_FORMAT(Z24_UNORM_S8_UINT)
66    CONV_FORMAT(S8_UINT_Z24_UNORM)
67    CONV_FORMAT(Z24X8_UNORM)
68    CONV_FORMAT(X8Z24_UNORM)
69    CONV_FORMAT(S8_UINT)
70    CONV_FORMAT(R64_FLOAT)
71    CONV_FORMAT(R64G64_FLOAT)
72    CONV_FORMAT(R64G64B64_FLOAT)
73    CONV_FORMAT(R64G64B64A64_FLOAT)
74    CONV_FORMAT(R32_FLOAT)
75    CONV_FORMAT(R32G32_FLOAT)
76    CONV_FORMAT(R32G32B32_FLOAT)
77    CONV_FORMAT(R32G32B32A32_FLOAT)
78    CONV_FORMAT(R32_UNORM)
79    CONV_FORMAT(R32G32_UNORM)
80    CONV_FORMAT(R32G32B32_UNORM)
81    CONV_FORMAT(R32G32B32A32_UNORM)
82    CONV_FORMAT(R32_USCALED)
83    CONV_FORMAT(R32G32_USCALED)
84    CONV_FORMAT(R32G32B32_USCALED)
85    CONV_FORMAT(R32G32B32A32_USCALED)
86    CONV_FORMAT(R32_SNORM)
87    CONV_FORMAT(R32G32_SNORM)
88    CONV_FORMAT(R32G32B32_SNORM)
89    CONV_FORMAT(R32G32B32A32_SNORM)
90    CONV_FORMAT(R32_SSCALED)
91    CONV_FORMAT(R32G32_SSCALED)
92    CONV_FORMAT(R32G32B32_SSCALED)
93    CONV_FORMAT(R32G32B32A32_SSCALED)
94    CONV_FORMAT(R16_UNORM)
95    CONV_FORMAT(R16G16_UNORM)
96    CONV_FORMAT(R16G16B16_UNORM)
97    CONV_FORMAT(R16G16B16A16_UNORM)
98    CONV_FORMAT(R16_USCALED)
99    CONV_FORMAT(R16G16_USCALED)
100    CONV_FORMAT(R16G16B16_USCALED)
101    CONV_FORMAT(R16G16B16A16_USCALED)
102    CONV_FORMAT(R16_SNORM)
103    CONV_FORMAT(R16G16_SNORM)
104    CONV_FORMAT(R16G16B16_SNORM)
105    CONV_FORMAT(R16G16B16A16_SNORM)
106    CONV_FORMAT(R16_SSCALED)
107    CONV_FORMAT(R16G16_SSCALED)
108    CONV_FORMAT(R16G16B16_SSCALED)
109    CONV_FORMAT(R16G16B16A16_SSCALED)
110    CONV_FORMAT(R8_UNORM)
111    CONV_FORMAT(R8G8_UNORM)
112    CONV_FORMAT(R8G8B8_UNORM)
113    CONV_FORMAT(R8G8B8A8_UNORM)
114    CONV_FORMAT(X8B8G8R8_UNORM)
115    CONV_FORMAT(R8_USCALED)
116    CONV_FORMAT(R8G8_USCALED)
117    CONV_FORMAT(R8G8B8_USCALED)
118    CONV_FORMAT(R8G8B8A8_USCALED)
119    CONV_FORMAT(R8_SNORM)
120    CONV_FORMAT(R8G8_SNORM)
121    CONV_FORMAT(R8G8B8_SNORM)
122    CONV_FORMAT(R8G8B8A8_SNORM)
123    CONV_FORMAT(R8_SSCALED)
124    CONV_FORMAT(R8G8_SSCALED)
125    CONV_FORMAT(R8G8B8_SSCALED)
126    CONV_FORMAT(R8G8B8A8_SSCALED)
127    CONV_FORMAT(R32_FIXED)
128    CONV_FORMAT(R32G32_FIXED)
129    CONV_FORMAT(R32G32B32_FIXED)
130    CONV_FORMAT(R32G32B32A32_FIXED)
131    CONV_FORMAT(R16_FLOAT)
132    CONV_FORMAT(R16G16_FLOAT)
133    CONV_FORMAT(R16G16B16_FLOAT)
134    CONV_FORMAT(R16G16B16A16_FLOAT)
135    CONV_FORMAT(L8_SRGB)
136    CONV_FORMAT(L8A8_SRGB)
137    CONV_FORMAT(R8G8B8_SRGB)
138    CONV_FORMAT(A8B8G8R8_SRGB)
139    CONV_FORMAT(X8B8G8R8_SRGB)
140    CONV_FORMAT(B8G8R8A8_SRGB)
141    CONV_FORMAT(B8G8R8X8_SRGB)
142    CONV_FORMAT(A8R8G8B8_SRGB)
143    CONV_FORMAT(X8R8G8B8_SRGB)
144    CONV_FORMAT(R8G8B8A8_SRGB)
145    CONV_FORMAT(DXT1_RGB)
146    CONV_FORMAT(DXT1_RGBA)
147    CONV_FORMAT(DXT3_RGBA)
148    CONV_FORMAT(DXT5_RGBA)
149    CONV_FORMAT(DXT1_SRGB)
150    CONV_FORMAT(DXT1_SRGBA)
151    CONV_FORMAT(DXT3_SRGBA)
152    CONV_FORMAT(DXT5_SRGBA)
153    CONV_FORMAT(RGTC1_UNORM)
154    CONV_FORMAT(RGTC1_SNORM)
155    CONV_FORMAT(RGTC2_UNORM)
156    CONV_FORMAT(RGTC2_SNORM)
157    CONV_FORMAT(R8G8_B8G8_UNORM)
158    CONV_FORMAT(G8R8_G8B8_UNORM)
159    CONV_FORMAT(R8SG8SB8UX8U_NORM)
160    CONV_FORMAT(R5SG5SB6U_NORM)
161    CONV_FORMAT(A8B8G8R8_UNORM)
162    CONV_FORMAT(B5G5R5X1_UNORM)
163    CONV_FORMAT(R10G10B10A2_USCALED)
164    CONV_FORMAT(R11G11B10_FLOAT)
165    CONV_FORMAT(R9G9B9E5_FLOAT)
166    CONV_FORMAT(Z32_FLOAT_S8X24_UINT)
167    CONV_FORMAT(R1_UNORM)
168    CONV_FORMAT(R10G10B10X2_USCALED)
169    CONV_FORMAT(R10G10B10X2_SNORM)
170    CONV_FORMAT(L4A4_UNORM)
171    CONV_FORMAT(B10G10R10A2_UNORM)
172    CONV_FORMAT(R10SG10SB10SA2U_NORM)
173    CONV_FORMAT(R8G8Bx_SNORM)
174    CONV_FORMAT(R8G8B8X8_UNORM)
175    CONV_FORMAT(B4G4R4X4_UNORM)
176    CONV_FORMAT(X24S8_UINT)
177    CONV_FORMAT(S8X24_UINT)
178    CONV_FORMAT(X32_S8X24_UINT)
179    CONV_FORMAT(B2G3R3_UNORM)
180    CONV_FORMAT(L16A16_UNORM)
181    CONV_FORMAT(A16_UNORM)
182    CONV_FORMAT(I16_UNORM)
183    CONV_FORMAT(LATC1_UNORM)
184    CONV_FORMAT(LATC1_SNORM)
185    CONV_FORMAT(LATC2_UNORM)
186    CONV_FORMAT(LATC2_SNORM)
187    CONV_FORMAT(A8_SNORM)
188    CONV_FORMAT(L8_SNORM)
189    CONV_FORMAT(L8A8_SNORM)
190    CONV_FORMAT(I8_SNORM)
191    CONV_FORMAT(A16_SNORM)
192    CONV_FORMAT(L16_SNORM)
193    CONV_FORMAT(L16A16_SNORM)
194    CONV_FORMAT(I16_SNORM)
195    CONV_FORMAT(A16_FLOAT)
196    CONV_FORMAT(L16_FLOAT)
197    CONV_FORMAT(L16A16_FLOAT)
198    CONV_FORMAT(I16_FLOAT)
199    CONV_FORMAT(A32_FLOAT)
200    CONV_FORMAT(L32_FLOAT)
201    CONV_FORMAT(L32A32_FLOAT)
202    CONV_FORMAT(I32_FLOAT)
203    CONV_FORMAT(YV12)
204    CONV_FORMAT(YV16)
205    CONV_FORMAT(IYUV)
206    CONV_FORMAT(NV12)
207    CONV_FORMAT(NV21)
208    CONV_FORMAT(A4R4_UNORM)
209    CONV_FORMAT(R4A4_UNORM)
210    CONV_FORMAT(R8A8_UNORM)
211    CONV_FORMAT(A8R8_UNORM)
212    CONV_FORMAT(R10G10B10A2_SSCALED)
213    CONV_FORMAT(R10G10B10A2_SNORM)
214    CONV_FORMAT(B10G10R10A2_USCALED)
215    CONV_FORMAT(B10G10R10A2_SSCALED)
216    CONV_FORMAT(B10G10R10A2_SNORM)
217    CONV_FORMAT(R8_UINT)
218    CONV_FORMAT(R8G8_UINT)
219    CONV_FORMAT(R8G8B8_UINT)
220    CONV_FORMAT(R8G8B8A8_UINT)
221    CONV_FORMAT(R8_SINT)
222    CONV_FORMAT(R8G8_SINT)
223    CONV_FORMAT(R8G8B8_SINT)
224    CONV_FORMAT(R8G8B8A8_SINT)
225    CONV_FORMAT(R16_UINT)
226    CONV_FORMAT(R16G16_UINT)
227    CONV_FORMAT(R16G16B16_UINT)
228    CONV_FORMAT(R16G16B16A16_UINT)
229    CONV_FORMAT(R16_SINT)
230    CONV_FORMAT(R16G16_SINT)
231    CONV_FORMAT(R16G16B16_SINT)
232    CONV_FORMAT(R16G16B16A16_SINT)
233    CONV_FORMAT(R32_UINT)
234    CONV_FORMAT(R32G32_UINT)
235    CONV_FORMAT(R32G32B32_UINT)
236    CONV_FORMAT(R32G32B32A32_UINT)
237    CONV_FORMAT(R32_SINT)
238    CONV_FORMAT(R32G32_SINT)
239    CONV_FORMAT(R32G32B32_SINT)
240    CONV_FORMAT(R32G32B32A32_SINT)
241    CONV_FORMAT(A8_UINT)
242    CONV_FORMAT(I8_UINT)
243    CONV_FORMAT(L8_UINT)
244    CONV_FORMAT(L8A8_UINT)
245    CONV_FORMAT(A8_SINT)
246    CONV_FORMAT(I8_SINT)
247    CONV_FORMAT(L8_SINT)
248    CONV_FORMAT(L8A8_SINT)
249    CONV_FORMAT(A16_UINT)
250    CONV_FORMAT(I16_UINT)
251    CONV_FORMAT(L16_UINT)
252    CONV_FORMAT(L16A16_UINT)
253    CONV_FORMAT(A16_SINT)
254    CONV_FORMAT(I16_SINT)
255    CONV_FORMAT(L16_SINT)
256    CONV_FORMAT(L16A16_SINT)
257    CONV_FORMAT(A32_UINT)
258    CONV_FORMAT(I32_UINT)
259    CONV_FORMAT(L32_UINT)
260    CONV_FORMAT(L32A32_UINT)
261    CONV_FORMAT(A32_SINT)
262    CONV_FORMAT(I32_SINT)
263    CONV_FORMAT(L32_SINT)
264    CONV_FORMAT(L32A32_SINT)
265    CONV_FORMAT(B10G10R10A2_UINT)
266    CONV_FORMAT(ETC1_RGB8)
267    CONV_FORMAT(R8G8_R8B8_UNORM)
268    CONV_FORMAT(G8R8_B8R8_UNORM)
269    CONV_FORMAT(R8G8B8X8_SNORM)
270    CONV_FORMAT(R8G8B8X8_SRGB)
271    CONV_FORMAT(R8G8B8X8_UINT)
272    CONV_FORMAT(R8G8B8X8_SINT)
273    CONV_FORMAT(B10G10R10X2_UNORM)
274    CONV_FORMAT(R16G16B16X16_UNORM)
275    CONV_FORMAT(R16G16B16X16_SNORM)
276    CONV_FORMAT(R16G16B16X16_FLOAT)
277    CONV_FORMAT(R16G16B16X16_UINT)
278    CONV_FORMAT(R16G16B16X16_SINT)
279    CONV_FORMAT(R32G32B32X32_FLOAT)
280    CONV_FORMAT(R32G32B32X32_UINT)
281    CONV_FORMAT(R32G32B32X32_SINT)
282    CONV_FORMAT(R8A8_SNORM)
283    CONV_FORMAT(R16A16_UNORM)
284    CONV_FORMAT(R16A16_SNORM)
285    CONV_FORMAT(R16A16_FLOAT)
286    CONV_FORMAT(R32A32_FLOAT)
287    CONV_FORMAT(R8A8_UINT)
288    CONV_FORMAT(R8A8_SINT)
289    CONV_FORMAT(R16A16_UINT)
290    CONV_FORMAT(R16A16_SINT)
291    CONV_FORMAT(R32A32_UINT)
292    CONV_FORMAT(R32A32_SINT)
293    CONV_FORMAT(R10G10B10A2_UINT)
294    CONV_FORMAT(B5G6R5_SRGB)
295    CONV_FORMAT(BPTC_RGBA_UNORM)
296    CONV_FORMAT(BPTC_SRGBA)
297    CONV_FORMAT(BPTC_RGB_FLOAT)
298    CONV_FORMAT(BPTC_RGB_UFLOAT)
299    CONV_FORMAT(G8R8_UNORM)
300    CONV_FORMAT(G8R8_SNORM)
301    CONV_FORMAT(G16R16_UNORM)
302    CONV_FORMAT(G16R16_SNORM)
303    CONV_FORMAT(A8B8G8R8_SNORM)
304    CONV_FORMAT(X8B8G8R8_SNORM)
305    CONV_FORMAT(ETC2_RGB8)
306    CONV_FORMAT(ETC2_SRGB8)
307    CONV_FORMAT(ETC2_RGB8A1)
308    CONV_FORMAT(ETC2_SRGB8A1)
309    CONV_FORMAT(ETC2_RGBA8)
310    CONV_FORMAT(ETC2_SRGBA8)
311    CONV_FORMAT(ETC2_R11_UNORM)
312    CONV_FORMAT(ETC2_R11_SNORM)
313    CONV_FORMAT(ETC2_RG11_UNORM)
314    CONV_FORMAT(ETC2_RG11_SNORM)
315    CONV_FORMAT(ASTC_4x4)
316    CONV_FORMAT(ASTC_5x4)
317    CONV_FORMAT(ASTC_5x5)
318    CONV_FORMAT(ASTC_6x5)
319    CONV_FORMAT(ASTC_6x6)
320    CONV_FORMAT(ASTC_8x5)
321    CONV_FORMAT(ASTC_8x6)
322    CONV_FORMAT(ASTC_8x8)
323    CONV_FORMAT(ASTC_10x5)
324    CONV_FORMAT(ASTC_10x6)
325    CONV_FORMAT(ASTC_10x8)
326    CONV_FORMAT(ASTC_10x10)
327    CONV_FORMAT(ASTC_12x10)
328    CONV_FORMAT(ASTC_12x12)
329    CONV_FORMAT(ASTC_4x4_SRGB)
330    CONV_FORMAT(ASTC_5x4_SRGB)
331    CONV_FORMAT(ASTC_5x5_SRGB)
332    CONV_FORMAT(ASTC_6x5_SRGB)
333    CONV_FORMAT(ASTC_6x6_SRGB)
334    CONV_FORMAT(ASTC_8x5_SRGB)
335    CONV_FORMAT(ASTC_8x6_SRGB)
336    CONV_FORMAT(ASTC_8x8_SRGB)
337    CONV_FORMAT(ASTC_10x5_SRGB)
338    CONV_FORMAT(ASTC_10x6_SRGB)
339    CONV_FORMAT(ASTC_10x8_SRGB)
340    CONV_FORMAT(ASTC_10x10_SRGB)
341    CONV_FORMAT(ASTC_12x10_SRGB)
342    CONV_FORMAT(ASTC_12x12_SRGB)
343    CONV_FORMAT(R10G10B10X2_UNORM)
344    CONV_FORMAT(A4B4G4R4_UNORM)
345    CONV_FORMAT(R8_SRGB)
346    CONV_FORMAT(R8G8_SRGB)
347    CONV_FORMAT(P010)
348    CONV_FORMAT(P012)
349    CONV_FORMAT(P016)
350    CONV_FORMAT(B8G8R8_UNORM)
351    CONV_FORMAT(R3G3B2_UNORM)
352    CONV_FORMAT(R4G4B4A4_UNORM)
353    CONV_FORMAT(R5G5B5A1_UNORM)
354    CONV_FORMAT(R5G6B5_UNORM)
355    CONV_FORMAT(Y8_400_UNORM)
356    CONV_FORMAT(Y8_U8_V8_444_UNORM)
357    CONV_FORMAT(Y8_U8_V8_422_UNORM)
358    CONV_FORMAT(NV16)
359    CONV_FORMAT(Y8_UNORM)
360    CONV_FORMAT(YVYU)
361    CONV_FORMAT(Z16_UNORM_S8_UINT)
362    CONV_FORMAT(Z24_UNORM_S8_UINT_AS_R8G8B8A8)
363    CONV_FORMAT(A1B5G5R5_UINT)
364    CONV_FORMAT(A1B5G5R5_UNORM)
365    CONV_FORMAT(A1R5G5B5_UINT)
366    CONV_FORMAT(A1R5G5B5_UNORM)
367    CONV_FORMAT(A2B10G10R10_UINT)
368    CONV_FORMAT(A2B10G10R10_UNORM)
369    CONV_FORMAT(A2R10G10B10_UINT)
370    CONV_FORMAT(A2R10G10B10_UNORM)
371    CONV_FORMAT(A4B4G4R4_UINT)
372    CONV_FORMAT(A4R4G4B4_UINT)
373    CONV_FORMAT(A4R4G4B4_UNORM)
374    CONV_FORMAT(A8B8G8R8_SINT)
375    CONV_FORMAT(A8B8G8R8_SSCALED)
376    CONV_FORMAT(A8B8G8R8_UINT)
377    CONV_FORMAT(A8B8G8R8_USCALED)
378    CONV_FORMAT(A8R8G8B8_SINT)
379    CONV_FORMAT(A8R8G8B8_SNORM)
380    CONV_FORMAT(A8R8G8B8_UINT)
381    CONV_FORMAT(ASTC_3x3x3)
382    CONV_FORMAT(ASTC_3x3x3_SRGB)
383    CONV_FORMAT(ASTC_4x3x3)
384    CONV_FORMAT(ASTC_4x3x3_SRGB)
385    CONV_FORMAT(ASTC_4x4x3)
386    CONV_FORMAT(ASTC_4x4x3_SRGB)
387    CONV_FORMAT(ASTC_4x4x4)
388    CONV_FORMAT(ASTC_4x4x4_SRGB)
389    CONV_FORMAT(ASTC_5x4x4)
390    CONV_FORMAT(ASTC_5x4x4_SRGB)
391    CONV_FORMAT(ASTC_5x5x4)
392    CONV_FORMAT(ASTC_5x5x4_SRGB)
393    CONV_FORMAT(ASTC_5x5x5)
394    CONV_FORMAT(ASTC_5x5x5_SRGB)
395    CONV_FORMAT(ASTC_6x5x5)
396    CONV_FORMAT(ASTC_6x5x5_SRGB)
397    CONV_FORMAT(ASTC_6x6x5)
398    CONV_FORMAT(ASTC_6x6x5_SRGB)
399    CONV_FORMAT(ASTC_6x6x6)
400    CONV_FORMAT(ASTC_6x6x6_SRGB)
401    CONV_FORMAT(ATC_RGB)
402    CONV_FORMAT(ATC_RGBA_EXPLICIT)
403    CONV_FORMAT(ATC_RGBA_INTERPOLATED)
404    CONV_FORMAT(AYUV)
405    CONV_FORMAT(B10G10R10A2_SINT)
406    CONV_FORMAT(B10G10R10X2_SINT)
407    CONV_FORMAT(B10G10R10X2_SNORM)
408    CONV_FORMAT(B2G3R3_UINT)
409    CONV_FORMAT(B4G4R4A4_UINT)
410    CONV_FORMAT(B5G5R5A1_UINT)
411    CONV_FORMAT(B5G6R5_UINT)
412    CONV_FORMAT(B8G8R8A8_SINT)
413    CONV_FORMAT(B8G8R8A8_SNORM)
414    CONV_FORMAT(B8G8R8A8_SSCALED)
415    CONV_FORMAT(B8G8R8A8_UINT)
416    CONV_FORMAT(B8G8R8A8_USCALED)
417    CONV_FORMAT(B8G8_R8G8_UNORM)
418    CONV_FORMAT(B8G8R8_SINT)
419    CONV_FORMAT(B8G8R8_SNORM)
420    CONV_FORMAT(B8G8R8_SRGB)
421    CONV_FORMAT(B8G8R8_SSCALED)
422    CONV_FORMAT(B8G8R8_UINT)
423    CONV_FORMAT(B8G8R8_USCALED)
424    CONV_FORMAT(B8G8R8X8_SINT)
425    CONV_FORMAT(B8G8R8X8_SNORM)
426    CONV_FORMAT(B8G8R8X8_UINT)
427    CONV_FORMAT(B8R8_G8R8_UNORM)
428    CONV_FORMAT(FXT1_RGB)
429    CONV_FORMAT(FXT1_RGBA)
430    CONV_FORMAT(G16R16_SINT)
431    CONV_FORMAT(G8B8_G8R8_UNORM)
432    CONV_FORMAT(G8_B8_R8_420_UNORM)
433    CONV_FORMAT(G8_B8R8_420_UNORM)
434    CONV_FORMAT(G8R8_SINT)
435    CONV_FORMAT(P030)
436    CONV_FORMAT(R10G10B10A2_SINT)
437    CONV_FORMAT(R10G10B10X2_SINT)
438    CONV_FORMAT(R3G3B2_UINT)
439    CONV_FORMAT(R4G4B4A4_UINT)
440    CONV_FORMAT(R4G4B4X4_UNORM)
441    CONV_FORMAT(R5G5B5A1_UINT)
442    CONV_FORMAT(R5G5B5X1_UNORM)
443    CONV_FORMAT(R5G6B5_SRGB)
444    CONV_FORMAT(R5G6B5_UINT)
445    CONV_FORMAT(R64G64B64A64_SINT)
446    CONV_FORMAT(R64G64B64A64_UINT)
447    CONV_FORMAT(R64G64B64_SINT)
448    CONV_FORMAT(R64G64B64_UINT)
449    CONV_FORMAT(R64G64_SINT)
450    CONV_FORMAT(R64G64_UINT)
451    CONV_FORMAT(R64_SINT)
452    CONV_FORMAT(R64_UINT)
453    CONV_FORMAT(R8_B8_G8_420_UNORM)
454    CONV_FORMAT(R8_B8G8_420_UNORM)
455    CONV_FORMAT(R8B8_R8G8_UNORM)
456    CONV_FORMAT(R8_G8_B8_420_UNORM)
457    CONV_FORMAT(R8_G8B8_420_UNORM)
458    CONV_FORMAT(R8_G8_B8_UNORM)
459    CONV_FORMAT(VYUY)
460    CONV_FORMAT(X1B5G5R5_UNORM)
461    CONV_FORMAT(X1R5G5B5_UNORM)
462    CONV_FORMAT(XYUV)
463    CONV_FORMAT(X8B8G8R8_SINT)
464    CONV_FORMAT(X8R8G8B8_SINT)
465    CONV_FORMAT(X8R8G8B8_SNORM)
466    CONV_FORMAT(Y16_U16_V16_420_UNORM)
467    CONV_FORMAT(Y16_U16_V16_422_UNORM)
468    CONV_FORMAT(Y16_U16V16_422_UNORM)
469    CONV_FORMAT(Y16_U16_V16_444_UNORM)
470    CONV_FORMAT(Y210)
471    CONV_FORMAT(Y212)
472    CONV_FORMAT(Y216)
473    CONV_FORMAT(Y410)
474    CONV_FORMAT(Y412)
475    CONV_FORMAT(Y416)
476    CONV_FORMAT(NV15)
477    CONV_FORMAT(NV20)
478    CONV_FORMAT(Y8_U8_V8_440_UNORM)
479    CONV_FORMAT(R10_G10B10_420_UNORM)
480    CONV_FORMAT(R10_G10B10_422_UNORM)
481    CONV_FORMAT(X6G10_X6B10X6R10_420_UNORM)
482    CONV_FORMAT(X4G12_X4B12X4R12_420_UNORM)
483    CONV_FORMAT(X6R10_UNORM)
484    CONV_FORMAT(X6R10X6G10_UNORM)
485    CONV_FORMAT(X4R12_UNORM)
486    CONV_FORMAT(X4R12X4G12_UNORM)
487    CONV_FORMAT(R8_G8B8_422_UNORM)
488 };
489 #undef CONV_FORMAT
490 
pipe_to_virgl_format(enum pipe_format format)491 enum virgl_formats pipe_to_virgl_format(enum pipe_format format)
492 {
493    enum virgl_formats vformat = virgl_formats_conv_table[format];
494    if (format != PIPE_FORMAT_NONE && !vformat)
495       debug_printf("VIRGL: pipe format %s not in the format table\n", util_format_name(format));
496    return vformat;
497 }
498 
virgl_to_pipe_format(enum virgl_formats format)499 enum pipe_format virgl_to_pipe_format(enum virgl_formats format)
500 {
501    enum pipe_format pformat;
502 
503    for (pformat = PIPE_FORMAT_NONE; pformat < PIPE_FORMAT_COUNT; pformat++)
504       if (virgl_formats_conv_table[pformat] == format)
505           return pformat;
506 
507    debug_printf("VIRGL: virgl format %u not in the format table\n", format);
508    return PIPE_FORMAT_NONE;
509 }
510 
virgl_encoder_write_cmd_dword(struct virgl_context * ctx,uint32_t dword)511 static int virgl_encoder_write_cmd_dword(struct virgl_context *ctx,
512                                         uint32_t dword)
513 {
514    int len = (dword >> 16);
515 
516    if ((ctx->cbuf->cdw + len + 1) > VIRGL_MAX_CMDBUF_DWORDS)
517       ctx->base.flush(&ctx->base, NULL, 0);
518 
519    virgl_encoder_write_dword(ctx->cbuf, dword);
520    return 0;
521 }
522 
virgl_encoder_emit_resource(struct virgl_screen * vs,struct virgl_cmd_buf * buf,struct virgl_resource * res)523 static void virgl_encoder_emit_resource(struct virgl_screen *vs,
524                                         struct virgl_cmd_buf *buf,
525                                         struct virgl_resource *res)
526 {
527    struct virgl_winsys *vws = vs->vws;
528    if (res && res->hw_res)
529       vws->emit_res(vws, buf, res->hw_res, true);
530    else {
531       virgl_encoder_write_dword(buf, 0);
532    }
533 }
534 
virgl_encoder_write_res(struct virgl_context * ctx,struct virgl_resource * res)535 static void virgl_encoder_write_res(struct virgl_context *ctx,
536                                     struct virgl_resource *res)
537 {
538    struct virgl_screen *vs = virgl_screen(ctx->base.screen);
539    virgl_encoder_emit_resource(vs, ctx->cbuf, res);
540 }
541 
virgl_encode_bind_object(struct virgl_context * ctx,uint32_t handle,uint32_t object)542 int virgl_encode_bind_object(struct virgl_context *ctx,
543                             uint32_t handle, uint32_t object)
544 {
545    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BIND_OBJECT, object, 1));
546    virgl_encoder_write_dword(ctx->cbuf, handle);
547    return 0;
548 }
549 
virgl_encode_delete_object(struct virgl_context * ctx,uint32_t handle,uint32_t object)550 int virgl_encode_delete_object(struct virgl_context *ctx,
551                               uint32_t handle, uint32_t object)
552 {
553    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_DESTROY_OBJECT, object, 1));
554    virgl_encoder_write_dword(ctx->cbuf, handle);
555    return 0;
556 }
557 
virgl_encode_blend_state(struct virgl_context * ctx,uint32_t handle,const struct pipe_blend_state * blend_state)558 int virgl_encode_blend_state(struct virgl_context *ctx,
559                             uint32_t handle,
560                             const struct pipe_blend_state *blend_state)
561 {
562    uint32_t tmp;
563    int i;
564 
565    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_BLEND, VIRGL_OBJ_BLEND_SIZE));
566    virgl_encoder_write_dword(ctx->cbuf, handle);
567 
568    tmp =
569       VIRGL_OBJ_BLEND_S0_INDEPENDENT_BLEND_ENABLE(blend_state->independent_blend_enable) |
570       VIRGL_OBJ_BLEND_S0_LOGICOP_ENABLE(blend_state->logicop_enable) |
571       VIRGL_OBJ_BLEND_S0_DITHER(blend_state->dither) |
572       VIRGL_OBJ_BLEND_S0_ALPHA_TO_COVERAGE(blend_state->alpha_to_coverage) |
573       VIRGL_OBJ_BLEND_S0_ALPHA_TO_ONE(blend_state->alpha_to_one);
574 
575    virgl_encoder_write_dword(ctx->cbuf, tmp);
576 
577    tmp = VIRGL_OBJ_BLEND_S1_LOGICOP_FUNC(blend_state->logicop_func);
578    virgl_encoder_write_dword(ctx->cbuf, tmp);
579 
580    for (i = 0; i < VIRGL_MAX_COLOR_BUFS; i++) {
581       /* We use alpha src factor to pass the advanced blend equation value
582        * to the host. By doing so, we don't have to change the protocol.
583        */
584       uint32_t alpha = (i == 0 && blend_state->advanced_blend_func)
585                         ? blend_state->advanced_blend_func
586                         : blend_state->rt[i].alpha_src_factor;
587       tmp =
588          VIRGL_OBJ_BLEND_S2_RT_BLEND_ENABLE(blend_state->rt[i].blend_enable) |
589          VIRGL_OBJ_BLEND_S2_RT_RGB_FUNC(blend_state->rt[i].rgb_func) |
590          VIRGL_OBJ_BLEND_S2_RT_RGB_SRC_FACTOR(blend_state->rt[i].rgb_src_factor) |
591          VIRGL_OBJ_BLEND_S2_RT_RGB_DST_FACTOR(blend_state->rt[i].rgb_dst_factor)|
592          VIRGL_OBJ_BLEND_S2_RT_ALPHA_FUNC(blend_state->rt[i].alpha_func) |
593          VIRGL_OBJ_BLEND_S2_RT_ALPHA_SRC_FACTOR(alpha) |
594          VIRGL_OBJ_BLEND_S2_RT_ALPHA_DST_FACTOR(blend_state->rt[i].alpha_dst_factor) |
595          VIRGL_OBJ_BLEND_S2_RT_COLORMASK(blend_state->rt[i].colormask);
596       virgl_encoder_write_dword(ctx->cbuf, tmp);
597    }
598    return 0;
599 }
600 
virgl_encode_dsa_state(struct virgl_context * ctx,uint32_t handle,const struct pipe_depth_stencil_alpha_state * dsa_state)601 int virgl_encode_dsa_state(struct virgl_context *ctx,
602                           uint32_t handle,
603                           const struct pipe_depth_stencil_alpha_state *dsa_state)
604 {
605    uint32_t tmp;
606    int i;
607    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_DSA, VIRGL_OBJ_DSA_SIZE));
608    virgl_encoder_write_dword(ctx->cbuf, handle);
609 
610    tmp = VIRGL_OBJ_DSA_S0_DEPTH_ENABLE(dsa_state->depth_enabled) |
611       VIRGL_OBJ_DSA_S0_DEPTH_WRITEMASK(dsa_state->depth_writemask) |
612       VIRGL_OBJ_DSA_S0_DEPTH_FUNC(dsa_state->depth_func) |
613       VIRGL_OBJ_DSA_S0_ALPHA_ENABLED(dsa_state->alpha_enabled) |
614       VIRGL_OBJ_DSA_S0_ALPHA_FUNC(dsa_state->alpha_func);
615    virgl_encoder_write_dword(ctx->cbuf, tmp);
616 
617    for (i = 0; i < 2; i++) {
618       tmp = VIRGL_OBJ_DSA_S1_STENCIL_ENABLED(dsa_state->stencil[i].enabled) |
619          VIRGL_OBJ_DSA_S1_STENCIL_FUNC(dsa_state->stencil[i].func) |
620          VIRGL_OBJ_DSA_S1_STENCIL_FAIL_OP(dsa_state->stencil[i].fail_op) |
621          VIRGL_OBJ_DSA_S1_STENCIL_ZPASS_OP(dsa_state->stencil[i].zpass_op) |
622          VIRGL_OBJ_DSA_S1_STENCIL_ZFAIL_OP(dsa_state->stencil[i].zfail_op) |
623          VIRGL_OBJ_DSA_S1_STENCIL_VALUEMASK(dsa_state->stencil[i].valuemask) |
624          VIRGL_OBJ_DSA_S1_STENCIL_WRITEMASK(dsa_state->stencil[i].writemask);
625       virgl_encoder_write_dword(ctx->cbuf, tmp);
626    }
627 
628    virgl_encoder_write_dword(ctx->cbuf, fui(dsa_state->alpha_ref_value));
629    return 0;
630 }
virgl_encode_rasterizer_state(struct virgl_context * ctx,uint32_t handle,const struct pipe_rasterizer_state * state)631 int virgl_encode_rasterizer_state(struct virgl_context *ctx,
632                                   uint32_t handle,
633                                   const struct pipe_rasterizer_state *state)
634 {
635    uint32_t tmp;
636 
637    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_RASTERIZER, VIRGL_OBJ_RS_SIZE));
638    virgl_encoder_write_dword(ctx->cbuf, handle);
639 
640    tmp = VIRGL_OBJ_RS_S0_FLATSHADE(state->flatshade) |
641       VIRGL_OBJ_RS_S0_DEPTH_CLIP(state->depth_clip_near) |
642       VIRGL_OBJ_RS_S0_CLIP_HALFZ(state->clip_halfz) |
643       VIRGL_OBJ_RS_S0_RASTERIZER_DISCARD(state->rasterizer_discard) |
644       VIRGL_OBJ_RS_S0_FLATSHADE_FIRST(state->flatshade_first) |
645       VIRGL_OBJ_RS_S0_LIGHT_TWOSIZE(state->light_twoside) |
646       VIRGL_OBJ_RS_S0_SPRITE_COORD_MODE(state->sprite_coord_mode) |
647       VIRGL_OBJ_RS_S0_POINT_QUAD_RASTERIZATION(state->point_quad_rasterization) |
648       VIRGL_OBJ_RS_S0_CULL_FACE(state->cull_face) |
649       VIRGL_OBJ_RS_S0_FILL_FRONT(state->fill_front) |
650       VIRGL_OBJ_RS_S0_FILL_BACK(state->fill_back) |
651       VIRGL_OBJ_RS_S0_SCISSOR(state->scissor) |
652       VIRGL_OBJ_RS_S0_FRONT_CCW(state->front_ccw) |
653       VIRGL_OBJ_RS_S0_CLAMP_VERTEX_COLOR(state->clamp_vertex_color) |
654       VIRGL_OBJ_RS_S0_CLAMP_FRAGMENT_COLOR(state->clamp_fragment_color) |
655       VIRGL_OBJ_RS_S0_OFFSET_LINE(state->offset_line) |
656       VIRGL_OBJ_RS_S0_OFFSET_POINT(state->offset_point) |
657       VIRGL_OBJ_RS_S0_OFFSET_TRI(state->offset_tri) |
658       VIRGL_OBJ_RS_S0_POLY_SMOOTH(state->poly_smooth) |
659       VIRGL_OBJ_RS_S0_POLY_STIPPLE_ENABLE(state->poly_stipple_enable) |
660       VIRGL_OBJ_RS_S0_POINT_SMOOTH(state->point_smooth) |
661       VIRGL_OBJ_RS_S0_POINT_SIZE_PER_VERTEX(state->point_size_per_vertex) |
662       VIRGL_OBJ_RS_S0_MULTISAMPLE(state->multisample) |
663       VIRGL_OBJ_RS_S0_LINE_SMOOTH(state->line_smooth) |
664       VIRGL_OBJ_RS_S0_LINE_STIPPLE_ENABLE(state->line_stipple_enable) |
665       VIRGL_OBJ_RS_S0_LINE_LAST_PIXEL(state->line_last_pixel) |
666       VIRGL_OBJ_RS_S0_HALF_PIXEL_CENTER(state->half_pixel_center) |
667       VIRGL_OBJ_RS_S0_BOTTOM_EDGE_RULE(state->bottom_edge_rule) |
668       VIRGL_OBJ_RS_S0_FORCE_PERSAMPLE_INTERP(state->force_persample_interp);
669 
670    virgl_encoder_write_dword(ctx->cbuf, tmp); /* S0 */
671    virgl_encoder_write_dword(ctx->cbuf, fui(state->point_size)); /* S1 */
672    virgl_encoder_write_dword(ctx->cbuf, state->sprite_coord_enable); /* S2 */
673    tmp = VIRGL_OBJ_RS_S3_LINE_STIPPLE_PATTERN(state->line_stipple_pattern) |
674       VIRGL_OBJ_RS_S3_LINE_STIPPLE_FACTOR(state->line_stipple_factor) |
675       VIRGL_OBJ_RS_S3_CLIP_PLANE_ENABLE(state->clip_plane_enable);
676    virgl_encoder_write_dword(ctx->cbuf, tmp); /* S3 */
677    virgl_encoder_write_dword(ctx->cbuf, fui(state->line_width)); /* S4 */
678    virgl_encoder_write_dword(ctx->cbuf, fui(state->offset_units)); /* S5 */
679    virgl_encoder_write_dword(ctx->cbuf, fui(state->offset_scale)); /* S6 */
680    virgl_encoder_write_dword(ctx->cbuf, fui(state->offset_clamp)); /* S7 */
681    return 0;
682 }
683 
virgl_emit_shader_header(struct virgl_context * ctx,uint32_t handle,uint32_t len,uint32_t type,uint32_t offlen,uint32_t num_tokens)684 static void virgl_emit_shader_header(struct virgl_context *ctx,
685                                      uint32_t handle, uint32_t len,
686                                      uint32_t type, uint32_t offlen,
687                                      uint32_t num_tokens)
688 {
689    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_SHADER, len));
690    virgl_encoder_write_dword(ctx->cbuf, handle);
691    virgl_encoder_write_dword(ctx->cbuf, type);
692    virgl_encoder_write_dword(ctx->cbuf, offlen);
693    virgl_encoder_write_dword(ctx->cbuf, num_tokens);
694 }
695 
virgl_emit_shader_streamout(struct virgl_context * ctx,const struct pipe_stream_output_info * so_info)696 static void virgl_emit_shader_streamout(struct virgl_context *ctx,
697                                         const struct pipe_stream_output_info *so_info)
698 {
699    int num_outputs = 0;
700    int i;
701    uint32_t tmp;
702 
703    if (so_info)
704       num_outputs = so_info->num_outputs;
705 
706    virgl_encoder_write_dword(ctx->cbuf, num_outputs);
707    if (num_outputs) {
708       for (i = 0; i < 4; i++)
709          virgl_encoder_write_dword(ctx->cbuf, so_info->stride[i]);
710 
711       for (i = 0; i < so_info->num_outputs; i++) {
712          tmp =
713            VIRGL_OBJ_SHADER_SO_OUTPUT_REGISTER_INDEX(so_info->output[i].register_index) |
714            VIRGL_OBJ_SHADER_SO_OUTPUT_START_COMPONENT(so_info->output[i].start_component) |
715            VIRGL_OBJ_SHADER_SO_OUTPUT_NUM_COMPONENTS(so_info->output[i].num_components) |
716            VIRGL_OBJ_SHADER_SO_OUTPUT_BUFFER(so_info->output[i].output_buffer) |
717            VIRGL_OBJ_SHADER_SO_OUTPUT_DST_OFFSET(so_info->output[i].dst_offset);
718          virgl_encoder_write_dword(ctx->cbuf, tmp);
719          virgl_encoder_write_dword(ctx->cbuf, so_info->output[i].stream);
720       }
721    }
722 }
723 
virgl_encode_shader_state(struct virgl_context * ctx,uint32_t handle,enum pipe_shader_type type,const struct pipe_stream_output_info * so_info,uint32_t cs_req_local_mem,const struct tgsi_token * tokens)724 int virgl_encode_shader_state(struct virgl_context *ctx,
725                               uint32_t handle,
726                               enum pipe_shader_type type,
727                               const struct pipe_stream_output_info *so_info,
728                               uint32_t cs_req_local_mem,
729                               const struct tgsi_token *tokens)
730 {
731    char *str, *sptr;
732    uint32_t shader_len, len;
733    bool bret;
734    int num_tokens = tgsi_num_tokens(tokens);
735    int str_total_size = 65536;
736    int retry_size = 1;
737    uint32_t left_bytes, base_hdr_size, strm_hdr_size, thispass;
738    bool first_pass;
739    str = CALLOC(1, str_total_size);
740    if (!str)
741       return -1;
742 
743    do {
744       int old_size;
745 
746       bret = tgsi_dump_str(tokens, TGSI_DUMP_FLOAT_AS_HEX, str, str_total_size);
747       if (bret == false) {
748          if (virgl_debug & VIRGL_DEBUG_VERBOSE)
749             debug_printf("Failed to translate shader in available space - trying again\n");
750          old_size = str_total_size;
751          str_total_size = 65536 * retry_size;
752          retry_size *= 2;
753          str = REALLOC(str, old_size, str_total_size);
754          if (!str)
755             return -1;
756       }
757    } while (bret == false && retry_size < 1024);
758 
759    if (bret == false)
760       return -1;
761 
762    if (virgl_debug & VIRGL_DEBUG_TGSI)
763       debug_printf("TGSI:\n---8<---\n%s\n---8<---\n", str);
764 
765    /* virglrenderer before addbd9c5058dcc9d561b20ab747aed58c53499da mis-counts
766     * the tokens needed for a BARRIER, so ask it to allocate some more space.
767     */
768    const char *barrier = str;
769    while ((barrier = strstr(barrier + 1, "BARRIER")))
770       num_tokens++;
771 
772    shader_len = strlen(str) + 1;
773 
774    left_bytes = shader_len;
775 
776    base_hdr_size = 5;
777    strm_hdr_size = so_info->num_outputs ? so_info->num_outputs * 2 + 4 : 0;
778    first_pass = true;
779    sptr = str;
780    while (left_bytes) {
781       uint32_t length, offlen;
782       int hdr_len = base_hdr_size + (first_pass ? strm_hdr_size : 0);
783       if (ctx->cbuf->cdw + hdr_len + 1 >= VIRGL_ENCODE_MAX_DWORDS)
784          ctx->base.flush(&ctx->base, NULL, 0);
785 
786       thispass = (VIRGL_ENCODE_MAX_DWORDS - ctx->cbuf->cdw - hdr_len - 1) * 4;
787 
788       length = MIN2(thispass, left_bytes);
789       len = ((length + 3) / 4) + hdr_len;
790 
791       if (first_pass)
792          offlen = VIRGL_OBJ_SHADER_OFFSET_VAL(shader_len);
793       else
794          offlen = VIRGL_OBJ_SHADER_OFFSET_VAL((uintptr_t)sptr - (uintptr_t)str) | VIRGL_OBJ_SHADER_OFFSET_CONT;
795 
796       virgl_emit_shader_header(ctx, handle, len, virgl_shader_stage_convert(type), offlen, num_tokens);
797 
798       if (type == PIPE_SHADER_COMPUTE)
799          virgl_encoder_write_dword(ctx->cbuf, cs_req_local_mem);
800       else
801          virgl_emit_shader_streamout(ctx, first_pass ? so_info : NULL);
802 
803       virgl_encoder_write_block(ctx->cbuf, (uint8_t *)sptr, length);
804 
805       sptr += length;
806       first_pass = false;
807       left_bytes -= length;
808    }
809 
810    FREE(str);
811    return 0;
812 }
813 
814 
virgl_encode_clear(struct virgl_context * ctx,unsigned buffers,const union pipe_color_union * color,double depth,unsigned stencil)815 int virgl_encode_clear(struct virgl_context *ctx,
816                       unsigned buffers,
817                       const union pipe_color_union *color,
818                       double depth, unsigned stencil)
819 {
820    int i;
821    uint64_t qword;
822 
823    STATIC_ASSERT(sizeof(qword) == sizeof(depth));
824    memcpy(&qword, &depth, sizeof(qword));
825 
826    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CLEAR, 0, VIRGL_OBJ_CLEAR_SIZE));
827    virgl_encoder_write_dword(ctx->cbuf, buffers);
828    for (i = 0; i < 4; i++)
829       virgl_encoder_write_dword(ctx->cbuf, color->ui[i]);
830    virgl_encoder_write_qword(ctx->cbuf, qword);
831    virgl_encoder_write_dword(ctx->cbuf, stencil);
832    return 0;
833 }
834 
virgl_encode_clear_texture(struct virgl_context * ctx,struct virgl_resource * res,unsigned int level,const struct pipe_box * box,const void * data)835 int virgl_encode_clear_texture(struct virgl_context *ctx,
836                                struct virgl_resource *res,
837                                unsigned int level,
838                                const struct pipe_box *box,
839                                const void *data)
840 {
841    const struct util_format_description *desc = util_format_description(res->b.format);
842    unsigned block_bits = desc->block.bits;
843    uint32_t arr[4] = {0};
844    /* The spec describe <data> as a pointer to an array of between one
845     * and four components of texel data that will be used as the source
846     * for the constant fill value.
847     * Here, we are just copying the memory into <arr>. We do not try to
848     * re-create the data array. The host part will take care of interpreting
849     * the memory and applying the correct format to the clear call.
850     */
851    memcpy(&arr, data, block_bits / 8);
852 
853    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CLEAR_TEXTURE, 0, VIRGL_CLEAR_TEXTURE_SIZE));
854    virgl_encoder_write_res(ctx, res);
855    virgl_encoder_write_dword(ctx->cbuf, level);
856    virgl_encoder_write_dword(ctx->cbuf, box->x);
857    virgl_encoder_write_dword(ctx->cbuf, box->y);
858    virgl_encoder_write_dword(ctx->cbuf, box->z);
859    virgl_encoder_write_dword(ctx->cbuf, box->width);
860    virgl_encoder_write_dword(ctx->cbuf, box->height);
861    virgl_encoder_write_dword(ctx->cbuf, box->depth);
862    for (unsigned i = 0; i < 4; i++)
863       virgl_encoder_write_dword(ctx->cbuf, arr[i]);
864    return 0;
865 }
866 
virgl_encoder_set_framebuffer_state(struct virgl_context * ctx,const struct pipe_framebuffer_state * state)867 int virgl_encoder_set_framebuffer_state(struct virgl_context *ctx,
868                                        const struct pipe_framebuffer_state *state)
869 {
870    struct virgl_surface *zsurf = virgl_surface(state->zsbuf);
871    int i;
872 
873    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_FRAMEBUFFER_STATE, 0, VIRGL_SET_FRAMEBUFFER_STATE_SIZE(state->nr_cbufs)));
874    virgl_encoder_write_dword(ctx->cbuf, state->nr_cbufs);
875    virgl_encoder_write_dword(ctx->cbuf, zsurf ? zsurf->handle : 0);
876    for (i = 0; i < state->nr_cbufs; i++) {
877       struct virgl_surface *surf = virgl_surface(state->cbufs[i]);
878       virgl_encoder_write_dword(ctx->cbuf, surf ? surf->handle : 0);
879    }
880 
881    struct virgl_screen *rs = virgl_screen(ctx->base.screen);
882    if (rs->caps.caps.v2.capability_bits & VIRGL_CAP_FB_NO_ATTACH) {
883       virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_FRAMEBUFFER_STATE_NO_ATTACH, 0, VIRGL_SET_FRAMEBUFFER_STATE_NO_ATTACH_SIZE));
884       virgl_encoder_write_dword(ctx->cbuf, state->width | (state->height << 16));
885       virgl_encoder_write_dword(ctx->cbuf, state->layers | (state->samples << 16));
886    }
887    return 0;
888 }
889 
virgl_encoder_set_viewport_states(struct virgl_context * ctx,int start_slot,int num_viewports,const struct pipe_viewport_state * states)890 int virgl_encoder_set_viewport_states(struct virgl_context *ctx,
891                                       int start_slot,
892                                       int num_viewports,
893                                       const struct pipe_viewport_state *states)
894 {
895    int i,v;
896    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_VIEWPORT_STATE, 0, VIRGL_SET_VIEWPORT_STATE_SIZE(num_viewports)));
897    virgl_encoder_write_dword(ctx->cbuf, start_slot);
898    for (v = 0; v < num_viewports; v++) {
899       for (i = 0; i < 3; i++)
900          virgl_encoder_write_dword(ctx->cbuf, fui(states[v].scale[i]));
901       for (i = 0; i < 3; i++)
902          virgl_encoder_write_dword(ctx->cbuf, fui(states[v].translate[i]));
903    }
904    return 0;
905 }
906 
virgl_encoder_create_vertex_elements(struct virgl_context * ctx,uint32_t handle,unsigned num_elements,const struct pipe_vertex_element * element)907 int virgl_encoder_create_vertex_elements(struct virgl_context *ctx,
908                                         uint32_t handle,
909                                         unsigned num_elements,
910                                         const struct pipe_vertex_element *element)
911 {
912    int i;
913    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_VERTEX_ELEMENTS, VIRGL_OBJ_VERTEX_ELEMENTS_SIZE(num_elements)));
914    virgl_encoder_write_dword(ctx->cbuf, handle);
915    for (i = 0; i < num_elements; i++) {
916       virgl_encoder_write_dword(ctx->cbuf, element[i].src_offset);
917       virgl_encoder_write_dword(ctx->cbuf, element[i].instance_divisor);
918       virgl_encoder_write_dword(ctx->cbuf, element[i].vertex_buffer_index);
919       virgl_encoder_write_dword(ctx->cbuf, pipe_to_virgl_format(element[i].src_format));
920    }
921    return 0;
922 }
923 
virgl_encoder_set_vertex_buffers(struct virgl_context * ctx,unsigned num_buffers,const struct pipe_vertex_buffer * buffers)924 int virgl_encoder_set_vertex_buffers(struct virgl_context *ctx,
925                                     unsigned num_buffers,
926                                     const struct pipe_vertex_buffer *buffers)
927 {
928    int i;
929    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_VERTEX_BUFFERS, 0, VIRGL_SET_VERTEX_BUFFERS_SIZE(num_buffers)));
930    for (i = 0; i < num_buffers; i++) {
931       struct virgl_resource *res = virgl_resource(buffers[i].buffer.resource);
932       virgl_encoder_write_dword(ctx->cbuf, ctx->vertex_elements ? ctx->vertex_elements->strides[i] : 0);
933       virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_offset);
934       virgl_encoder_write_res(ctx, res);
935    }
936    return 0;
937 }
938 
virgl_encoder_set_index_buffer(struct virgl_context * ctx,const struct virgl_indexbuf * ib)939 int virgl_encoder_set_index_buffer(struct virgl_context *ctx,
940                                   const struct virgl_indexbuf *ib)
941 {
942    int length = VIRGL_SET_INDEX_BUFFER_SIZE(ib);
943    struct virgl_resource *res = NULL;
944    if (ib)
945       res = virgl_resource(ib->buffer);
946 
947    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_INDEX_BUFFER, 0, length));
948    virgl_encoder_write_res(ctx, res);
949    if (ib) {
950       virgl_encoder_write_dword(ctx->cbuf, ib->index_size);
951       virgl_encoder_write_dword(ctx->cbuf, ib->offset);
952    }
953    return 0;
954 }
955 
virgl_encoder_draw_vbo(struct virgl_context * ctx,const struct pipe_draw_info * info,unsigned drawid_offset,const struct pipe_draw_indirect_info * indirect,const struct pipe_draw_start_count_bias * draw)956 int virgl_encoder_draw_vbo(struct virgl_context *ctx,
957                            const struct pipe_draw_info *info,
958                            unsigned drawid_offset,
959                            const struct pipe_draw_indirect_info *indirect,
960                            const struct pipe_draw_start_count_bias *draw)
961 {
962    uint32_t length = VIRGL_DRAW_VBO_SIZE;
963    if (info->mode == MESA_PRIM_PATCHES || drawid_offset > 0)
964       length = VIRGL_DRAW_VBO_SIZE_TESS;
965    if (indirect && indirect->buffer)
966       length = VIRGL_DRAW_VBO_SIZE_INDIRECT;
967    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_DRAW_VBO, 0, length));
968    virgl_encoder_write_dword(ctx->cbuf, draw->start);
969    virgl_encoder_write_dword(ctx->cbuf, draw->count);
970    virgl_encoder_write_dword(ctx->cbuf, info->mode);
971    virgl_encoder_write_dword(ctx->cbuf, !!info->index_size);
972    virgl_encoder_write_dword(ctx->cbuf, info->instance_count);
973    virgl_encoder_write_dword(ctx->cbuf, info->index_size ? draw->index_bias : 0);
974    virgl_encoder_write_dword(ctx->cbuf, info->start_instance);
975    virgl_encoder_write_dword(ctx->cbuf, info->primitive_restart);
976    virgl_encoder_write_dword(ctx->cbuf, info->primitive_restart ? info->restart_index : 0);
977    virgl_encoder_write_dword(ctx->cbuf, info->index_bounds_valid ? info->min_index : 0);
978    virgl_encoder_write_dword(ctx->cbuf, info->index_bounds_valid ? info->max_index : ~0);
979    if (indirect && indirect->count_from_stream_output)
980       virgl_encoder_write_dword(ctx->cbuf, indirect->count_from_stream_output->buffer_size);
981    else
982       virgl_encoder_write_dword(ctx->cbuf, 0);
983    if (length >= VIRGL_DRAW_VBO_SIZE_TESS) {
984       virgl_encoder_write_dword(ctx->cbuf, ctx->patch_vertices); /* vertices per patch */
985       virgl_encoder_write_dword(ctx->cbuf, drawid_offset); /* drawid */
986    }
987    if (length == VIRGL_DRAW_VBO_SIZE_INDIRECT) {
988       virgl_encoder_write_res(ctx, virgl_resource(indirect->buffer));
989       virgl_encoder_write_dword(ctx->cbuf, indirect->offset);
990       virgl_encoder_write_dword(ctx->cbuf, indirect->stride); /* indirect stride */
991       virgl_encoder_write_dword(ctx->cbuf, indirect->draw_count); /* indirect draw count */
992       virgl_encoder_write_dword(ctx->cbuf, indirect->indirect_draw_count_offset); /* indirect draw count offset */
993       if (indirect->indirect_draw_count)
994          virgl_encoder_write_res(ctx, virgl_resource(indirect->indirect_draw_count));
995       else
996          virgl_encoder_write_dword(ctx->cbuf, 0); /* indirect draw count handle */
997    }
998    return 0;
999 }
1000 
virgl_encoder_create_surface_common(struct virgl_context * ctx,uint32_t handle,struct virgl_resource * res,const struct pipe_surface * templat)1001 static int virgl_encoder_create_surface_common(struct virgl_context *ctx,
1002                                                uint32_t handle,
1003                                                struct virgl_resource *res,
1004                                                const struct pipe_surface *templat)
1005 {
1006    virgl_encoder_write_dword(ctx->cbuf, handle);
1007    virgl_encoder_write_res(ctx, res);
1008    virgl_encoder_write_dword(ctx->cbuf, pipe_to_virgl_format(templat->format));
1009 
1010    assert(templat->texture->target != PIPE_BUFFER);
1011    virgl_encoder_write_dword(ctx->cbuf, templat->u.tex.level);
1012    virgl_encoder_write_dword(ctx->cbuf, templat->u.tex.first_layer | (templat->u.tex.last_layer << 16));
1013 
1014    return 0;
1015 }
1016 
virgl_encoder_create_surface(struct virgl_context * ctx,uint32_t handle,struct virgl_resource * res,const struct pipe_surface * templat)1017 int virgl_encoder_create_surface(struct virgl_context *ctx,
1018                                  uint32_t handle,
1019                                  struct virgl_resource *res,
1020                                  const struct pipe_surface *templat)
1021 {
1022    if (templat->nr_samples > 0) {
1023       ASSERTED struct virgl_screen *rs = virgl_screen(ctx->base.screen);
1024       assert(rs->caps.caps.v2.capability_bits_v2 & VIRGL_CAP_V2_IMPLICIT_MSAA);
1025 
1026       virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_MSAA_SURFACE, VIRGL_OBJ_MSAA_SURFACE_SIZE));
1027       virgl_encoder_create_surface_common(ctx, handle, res, templat);
1028       virgl_encoder_write_dword(ctx->cbuf, templat->nr_samples);
1029    } else {
1030       virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_SURFACE, VIRGL_OBJ_SURFACE_SIZE));
1031       virgl_encoder_create_surface_common(ctx, handle, res, templat);
1032    }
1033 
1034    return 0;
1035 }
1036 
virgl_encoder_create_so_target(struct virgl_context * ctx,uint32_t handle,struct virgl_resource * res,unsigned buffer_offset,unsigned buffer_size)1037 int virgl_encoder_create_so_target(struct virgl_context *ctx,
1038                                   uint32_t handle,
1039                                   struct virgl_resource *res,
1040                                   unsigned buffer_offset,
1041                                   unsigned buffer_size)
1042 {
1043    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_STREAMOUT_TARGET, VIRGL_OBJ_STREAMOUT_SIZE));
1044    virgl_encoder_write_dword(ctx->cbuf, handle);
1045    virgl_encoder_write_res(ctx, res);
1046    virgl_encoder_write_dword(ctx->cbuf, buffer_offset);
1047    virgl_encoder_write_dword(ctx->cbuf, buffer_size);
1048    return 0;
1049 }
1050 
1051 enum virgl_transfer3d_encode_stride {
1052    /* The stride and layer_stride are explicitly specified in the command. */
1053    virgl_transfer3d_explicit_stride,
1054    /* The stride and layer_stride are inferred by the host. In this case, the
1055     * host will use the image stride and layer_stride for the specified level.
1056     */
1057    virgl_transfer3d_host_inferred_stride,
1058 };
1059 
virgl_encoder_transfer3d_common(struct virgl_screen * vs,struct virgl_cmd_buf * buf,struct virgl_transfer * xfer,enum virgl_transfer3d_encode_stride encode_stride)1060 static void virgl_encoder_transfer3d_common(struct virgl_screen *vs,
1061                                             struct virgl_cmd_buf *buf,
1062                                             struct virgl_transfer *xfer,
1063                                             enum virgl_transfer3d_encode_stride encode_stride)
1064 
1065 {
1066    struct pipe_transfer *transfer = &xfer->base;
1067    unsigned stride;
1068    uintptr_t layer_stride;
1069 
1070    if (encode_stride == virgl_transfer3d_explicit_stride) {
1071       stride = transfer->stride;
1072       layer_stride = transfer->layer_stride;
1073    } else if (encode_stride == virgl_transfer3d_host_inferred_stride) {
1074       stride = 0;
1075       layer_stride = 0;
1076    } else {
1077       assert(!"Invalid virgl_transfer3d_encode_stride value");
1078    }
1079 
1080    /* We cannot use virgl_encoder_emit_resource with transfer->resource here
1081     * because transfer->resource might have a different virgl_hw_res than what
1082     * this transfer targets, which is saved in xfer->hw_res.
1083     */
1084    vs->vws->emit_res(vs->vws, buf, xfer->hw_res, true);
1085    virgl_encoder_write_dword(buf, transfer->level);
1086    virgl_encoder_write_dword(buf, transfer->usage);
1087    virgl_encoder_write_dword(buf, stride);
1088    virgl_encoder_write_dword(buf, layer_stride);
1089    virgl_encoder_write_dword(buf, transfer->box.x);
1090    virgl_encoder_write_dword(buf, transfer->box.y);
1091    virgl_encoder_write_dword(buf, transfer->box.z);
1092    virgl_encoder_write_dword(buf, transfer->box.width);
1093    virgl_encoder_write_dword(buf, transfer->box.height);
1094    virgl_encoder_write_dword(buf, transfer->box.depth);
1095 }
1096 
virgl_encoder_flush_frontbuffer(struct virgl_context * ctx,struct virgl_resource * res)1097 int virgl_encoder_flush_frontbuffer(struct virgl_context *ctx,
1098                                    struct virgl_resource *res)
1099 {
1100 //   virgl_encoder_write_dword(ctx->cbuf, VIRGL_CMD0(VIRGL_CCMD_FLUSH_FRONTUBFFER, 0, 1));
1101 //   virgl_encoder_write_dword(ctx->cbuf, res_handle);
1102    return 0;
1103 }
1104 
virgl_encode_sampler_state(struct virgl_context * ctx,uint32_t handle,const struct pipe_sampler_state * state)1105 int virgl_encode_sampler_state(struct virgl_context *ctx,
1106                               uint32_t handle,
1107                               const struct pipe_sampler_state *state)
1108 {
1109    uint32_t tmp;
1110    int i;
1111    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_SAMPLER_STATE, VIRGL_OBJ_SAMPLER_STATE_SIZE));
1112    virgl_encoder_write_dword(ctx->cbuf, handle);
1113 
1114    tmp = VIRGL_OBJ_SAMPLE_STATE_S0_WRAP_S(state->wrap_s) |
1115       VIRGL_OBJ_SAMPLE_STATE_S0_WRAP_T(state->wrap_t) |
1116       VIRGL_OBJ_SAMPLE_STATE_S0_WRAP_R(state->wrap_r) |
1117       VIRGL_OBJ_SAMPLE_STATE_S0_MIN_IMG_FILTER(state->min_img_filter) |
1118       VIRGL_OBJ_SAMPLE_STATE_S0_MIN_MIP_FILTER(state->min_mip_filter) |
1119       VIRGL_OBJ_SAMPLE_STATE_S0_MAG_IMG_FILTER(state->mag_img_filter) |
1120       VIRGL_OBJ_SAMPLE_STATE_S0_COMPARE_MODE(state->compare_mode) |
1121       VIRGL_OBJ_SAMPLE_STATE_S0_COMPARE_FUNC(state->compare_func) |
1122       VIRGL_OBJ_SAMPLE_STATE_S0_SEAMLESS_CUBE_MAP(state->seamless_cube_map) |
1123       VIRGL_OBJ_SAMPLE_STATE_S0_MAX_ANISOTROPY(state->max_anisotropy);
1124 
1125    virgl_encoder_write_dword(ctx->cbuf, tmp);
1126    virgl_encoder_write_dword(ctx->cbuf, fui(state->lod_bias));
1127    virgl_encoder_write_dword(ctx->cbuf, fui(state->min_lod));
1128    virgl_encoder_write_dword(ctx->cbuf, fui(state->max_lod));
1129    for (i = 0; i <  4; i++)
1130       virgl_encoder_write_dword(ctx->cbuf, state->border_color.ui[i]);
1131    return 0;
1132 }
1133 
1134 
virgl_encode_sampler_view(struct virgl_context * ctx,uint32_t handle,struct virgl_resource * res,const struct pipe_sampler_view * state)1135 int virgl_encode_sampler_view(struct virgl_context *ctx,
1136                              uint32_t handle,
1137                              struct virgl_resource *res,
1138                              const struct pipe_sampler_view *state)
1139 {
1140    unsigned elem_size = util_format_get_blocksize(state->format);
1141    struct virgl_screen *rs = virgl_screen(ctx->base.screen);
1142    uint32_t tmp;
1143    uint32_t dword_fmt_target = pipe_to_virgl_format(state->format);
1144    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_SAMPLER_VIEW, VIRGL_OBJ_SAMPLER_VIEW_SIZE));
1145    virgl_encoder_write_dword(ctx->cbuf, handle);
1146    virgl_encoder_write_res(ctx, res);
1147    if (rs->caps.caps.v2.capability_bits & VIRGL_CAP_TEXTURE_VIEW)
1148      dword_fmt_target |= (state->target << 24);
1149    virgl_encoder_write_dword(ctx->cbuf, dword_fmt_target);
1150    if (res->b.target == PIPE_BUFFER) {
1151       virgl_encoder_write_dword(ctx->cbuf, state->u.buf.offset / elem_size);
1152       virgl_encoder_write_dword(ctx->cbuf, (state->u.buf.offset + state->u.buf.size) / elem_size - 1);
1153    } else {
1154       if (res->metadata.plane) {
1155          assert(state->u.tex.first_layer == 0 && state->u.tex.last_layer == 0);
1156          virgl_encoder_write_dword(ctx->cbuf, res->metadata.plane);
1157       } else {
1158          virgl_encoder_write_dword(ctx->cbuf, state->u.tex.first_layer | state->u.tex.last_layer << 16);
1159       }
1160       virgl_encoder_write_dword(ctx->cbuf, state->u.tex.first_level | state->u.tex.last_level << 8);
1161    }
1162    tmp = VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_R(state->swizzle_r) |
1163       VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_G(state->swizzle_g) |
1164       VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_B(state->swizzle_b) |
1165       VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_A(state->swizzle_a);
1166    virgl_encoder_write_dword(ctx->cbuf, tmp);
1167    return 0;
1168 }
1169 
virgl_encode_set_sampler_views(struct virgl_context * ctx,enum pipe_shader_type shader_type,uint32_t start_slot,uint32_t num_views,struct virgl_sampler_view ** views)1170 int virgl_encode_set_sampler_views(struct virgl_context *ctx,
1171                                   enum pipe_shader_type shader_type,
1172                                   uint32_t start_slot,
1173                                   uint32_t num_views,
1174                                   struct virgl_sampler_view **views)
1175 {
1176    int i;
1177    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SAMPLER_VIEWS, 0, VIRGL_SET_SAMPLER_VIEWS_SIZE(num_views)));
1178    virgl_encoder_write_dword(ctx->cbuf, virgl_shader_stage_convert(shader_type));
1179    virgl_encoder_write_dword(ctx->cbuf, start_slot);
1180    for (i = 0; i < num_views; i++) {
1181       uint32_t handle = views[i] ? views[i]->handle : 0;
1182       virgl_encoder_write_dword(ctx->cbuf, handle);
1183    }
1184    return 0;
1185 }
1186 
virgl_encode_bind_sampler_states(struct virgl_context * ctx,enum pipe_shader_type shader_type,uint32_t start_slot,uint32_t num_handles,uint32_t * handles)1187 int virgl_encode_bind_sampler_states(struct virgl_context *ctx,
1188                                     enum pipe_shader_type shader_type,
1189                                     uint32_t start_slot,
1190                                     uint32_t num_handles,
1191                                     uint32_t *handles)
1192 {
1193    int i;
1194    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BIND_SAMPLER_STATES, 0, VIRGL_BIND_SAMPLER_STATES(num_handles)));
1195    virgl_encoder_write_dword(ctx->cbuf, virgl_shader_stage_convert(shader_type));
1196    virgl_encoder_write_dword(ctx->cbuf, start_slot);
1197    for (i = 0; i < num_handles; i++)
1198       virgl_encoder_write_dword(ctx->cbuf, handles[i]);
1199    return 0;
1200 }
1201 
virgl_encoder_write_constant_buffer(struct virgl_context * ctx,enum pipe_shader_type shader,uint32_t index,uint32_t size,const void * data)1202 int virgl_encoder_write_constant_buffer(struct virgl_context *ctx,
1203                                        enum pipe_shader_type shader,
1204                                        uint32_t index,
1205                                        uint32_t size,
1206                                        const void *data)
1207 {
1208    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_CONSTANT_BUFFER, 0, size + 2));
1209    virgl_encoder_write_dword(ctx->cbuf, virgl_shader_stage_convert(shader));
1210    virgl_encoder_write_dword(ctx->cbuf, index);
1211    if (data)
1212       virgl_encoder_write_block(ctx->cbuf, data, size * 4);
1213    return 0;
1214 }
1215 
virgl_encoder_set_uniform_buffer(struct virgl_context * ctx,enum pipe_shader_type shader,uint32_t index,uint32_t offset,uint32_t length,struct virgl_resource * res)1216 int virgl_encoder_set_uniform_buffer(struct virgl_context *ctx,
1217                                      enum pipe_shader_type shader,
1218                                      uint32_t index,
1219                                      uint32_t offset,
1220                                      uint32_t length,
1221                                      struct virgl_resource *res)
1222 {
1223    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_UNIFORM_BUFFER, 0, VIRGL_SET_UNIFORM_BUFFER_SIZE));
1224    virgl_encoder_write_dword(ctx->cbuf, virgl_shader_stage_convert(shader));
1225    virgl_encoder_write_dword(ctx->cbuf, index);
1226    virgl_encoder_write_dword(ctx->cbuf, offset);
1227    virgl_encoder_write_dword(ctx->cbuf, length);
1228    virgl_encoder_write_res(ctx, res);
1229    return 0;
1230 }
1231 
1232 
virgl_encoder_set_stencil_ref(struct virgl_context * ctx,const struct pipe_stencil_ref * ref)1233 int virgl_encoder_set_stencil_ref(struct virgl_context *ctx,
1234                                  const struct pipe_stencil_ref *ref)
1235 {
1236    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_STENCIL_REF, 0, VIRGL_SET_STENCIL_REF_SIZE));
1237    virgl_encoder_write_dword(ctx->cbuf, VIRGL_STENCIL_REF_VAL(ref->ref_value[0] , (ref->ref_value[1])));
1238    return 0;
1239 }
1240 
virgl_encoder_set_blend_color(struct virgl_context * ctx,const struct pipe_blend_color * color)1241 int virgl_encoder_set_blend_color(struct virgl_context *ctx,
1242                                  const struct pipe_blend_color *color)
1243 {
1244    int i;
1245    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_BLEND_COLOR, 0, VIRGL_SET_BLEND_COLOR_SIZE));
1246    for (i = 0; i < 4; i++)
1247       virgl_encoder_write_dword(ctx->cbuf, fui(color->color[i]));
1248    return 0;
1249 }
1250 
virgl_encoder_set_scissor_state(struct virgl_context * ctx,unsigned start_slot,int num_scissors,const struct pipe_scissor_state * ss)1251 int virgl_encoder_set_scissor_state(struct virgl_context *ctx,
1252                                     unsigned start_slot,
1253                                     int num_scissors,
1254                                     const struct pipe_scissor_state *ss)
1255 {
1256    int i;
1257    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SCISSOR_STATE, 0, VIRGL_SET_SCISSOR_STATE_SIZE(num_scissors)));
1258    virgl_encoder_write_dword(ctx->cbuf, start_slot);
1259    for (i = 0; i < num_scissors; i++) {
1260       virgl_encoder_write_dword(ctx->cbuf, (ss[i].minx | ss[i].miny << 16));
1261       virgl_encoder_write_dword(ctx->cbuf, (ss[i].maxx | ss[i].maxy << 16));
1262    }
1263    return 0;
1264 }
1265 
virgl_encoder_set_polygon_stipple(struct virgl_context * ctx,const struct pipe_poly_stipple * ps)1266 void virgl_encoder_set_polygon_stipple(struct virgl_context *ctx,
1267                                       const struct pipe_poly_stipple *ps)
1268 {
1269    int i;
1270    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_POLYGON_STIPPLE, 0, VIRGL_POLYGON_STIPPLE_SIZE));
1271    for (i = 0; i < VIRGL_POLYGON_STIPPLE_SIZE; i++) {
1272       virgl_encoder_write_dword(ctx->cbuf, ps->stipple[i]);
1273    }
1274 }
1275 
virgl_encoder_set_sample_mask(struct virgl_context * ctx,unsigned sample_mask)1276 void virgl_encoder_set_sample_mask(struct virgl_context *ctx,
1277                                   unsigned sample_mask)
1278 {
1279    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SAMPLE_MASK, 0, VIRGL_SET_SAMPLE_MASK_SIZE));
1280    virgl_encoder_write_dword(ctx->cbuf, sample_mask);
1281 }
1282 
virgl_encoder_set_min_samples(struct virgl_context * ctx,unsigned min_samples)1283 void virgl_encoder_set_min_samples(struct virgl_context *ctx,
1284                                   unsigned min_samples)
1285 {
1286    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_MIN_SAMPLES, 0, VIRGL_SET_MIN_SAMPLES_SIZE));
1287    virgl_encoder_write_dword(ctx->cbuf, min_samples);
1288 }
1289 
virgl_encoder_set_clip_state(struct virgl_context * ctx,const struct pipe_clip_state * clip)1290 void virgl_encoder_set_clip_state(struct virgl_context *ctx,
1291                                  const struct pipe_clip_state *clip)
1292 {
1293    int i, j;
1294    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_CLIP_STATE, 0, VIRGL_SET_CLIP_STATE_SIZE));
1295    for (i = 0; i < VIRGL_MAX_CLIP_PLANES; i++) {
1296       for (j = 0; j < 4; j++) {
1297          virgl_encoder_write_dword(ctx->cbuf, fui(clip->ucp[i][j]));
1298       }
1299    }
1300 }
1301 
virgl_encode_resource_copy_region(struct virgl_context * ctx,struct virgl_resource * dst_res,unsigned dst_level,unsigned dstx,unsigned dsty,unsigned dstz,struct virgl_resource * src_res,unsigned src_level,const struct pipe_box * src_box)1302 int virgl_encode_resource_copy_region(struct virgl_context *ctx,
1303                                      struct virgl_resource *dst_res,
1304                                      unsigned dst_level,
1305                                      unsigned dstx, unsigned dsty, unsigned dstz,
1306                                      struct virgl_resource *src_res,
1307                                      unsigned src_level,
1308                                      const struct pipe_box *src_box)
1309 {
1310    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_RESOURCE_COPY_REGION, 0, VIRGL_CMD_RESOURCE_COPY_REGION_SIZE));
1311    virgl_encoder_write_res(ctx, dst_res);
1312    virgl_encoder_write_dword(ctx->cbuf, dst_level);
1313    virgl_encoder_write_dword(ctx->cbuf, dstx);
1314    virgl_encoder_write_dword(ctx->cbuf, dsty);
1315    virgl_encoder_write_dword(ctx->cbuf, dstz);
1316    virgl_encoder_write_res(ctx, src_res);
1317    virgl_encoder_write_dword(ctx->cbuf, src_level);
1318    virgl_encoder_write_dword(ctx->cbuf, src_box->x);
1319    virgl_encoder_write_dword(ctx->cbuf, src_box->y);
1320    virgl_encoder_write_dword(ctx->cbuf, src_box->z);
1321    virgl_encoder_write_dword(ctx->cbuf, src_box->width);
1322    virgl_encoder_write_dword(ctx->cbuf, src_box->height);
1323    virgl_encoder_write_dword(ctx->cbuf, src_box->depth);
1324    return 0;
1325 }
1326 
virgl_encode_blit(struct virgl_context * ctx,struct virgl_resource * dst_res,struct virgl_resource * src_res,const struct pipe_blit_info * blit)1327 int virgl_encode_blit(struct virgl_context *ctx,
1328                      struct virgl_resource *dst_res,
1329                      struct virgl_resource *src_res,
1330                      const struct pipe_blit_info *blit)
1331 {
1332    uint32_t tmp;
1333    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BLIT, 0, VIRGL_CMD_BLIT_SIZE));
1334    tmp = VIRGL_CMD_BLIT_S0_MASK(blit->mask) |
1335       VIRGL_CMD_BLIT_S0_FILTER(blit->filter) |
1336       VIRGL_CMD_BLIT_S0_SCISSOR_ENABLE(blit->scissor_enable) |
1337       VIRGL_CMD_BLIT_S0_RENDER_CONDITION_ENABLE(blit->render_condition_enable) |
1338       VIRGL_CMD_BLIT_S0_ALPHA_BLEND(blit->alpha_blend);
1339    virgl_encoder_write_dword(ctx->cbuf, tmp);
1340    virgl_encoder_write_dword(ctx->cbuf, (blit->scissor.minx | blit->scissor.miny << 16));
1341    virgl_encoder_write_dword(ctx->cbuf, (blit->scissor.maxx | blit->scissor.maxy << 16));
1342 
1343    virgl_encoder_write_res(ctx, dst_res);
1344    virgl_encoder_write_dword(ctx->cbuf, blit->dst.level);
1345    virgl_encoder_write_dword(ctx->cbuf, pipe_to_virgl_format(blit->dst.format));
1346    virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.x);
1347    virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.y);
1348    virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.z);
1349    virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.width);
1350    virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.height);
1351    virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.depth);
1352 
1353    virgl_encoder_write_res(ctx, src_res);
1354    virgl_encoder_write_dword(ctx->cbuf, blit->src.level);
1355    virgl_encoder_write_dword(ctx->cbuf, pipe_to_virgl_format(blit->src.format));
1356    virgl_encoder_write_dword(ctx->cbuf, blit->src.box.x);
1357    virgl_encoder_write_dword(ctx->cbuf, blit->src.box.y);
1358    virgl_encoder_write_dword(ctx->cbuf, blit->src.box.z);
1359    virgl_encoder_write_dword(ctx->cbuf, blit->src.box.width);
1360    virgl_encoder_write_dword(ctx->cbuf, blit->src.box.height);
1361    virgl_encoder_write_dword(ctx->cbuf, blit->src.box.depth);
1362    return 0;
1363 }
1364 
virgl_encoder_create_query(struct virgl_context * ctx,uint32_t handle,uint query_type,uint query_index,struct virgl_resource * res,uint32_t offset)1365 int virgl_encoder_create_query(struct virgl_context *ctx,
1366                               uint32_t handle,
1367                               uint query_type,
1368                               uint query_index,
1369                               struct virgl_resource *res,
1370                               uint32_t offset)
1371 {
1372    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_QUERY, VIRGL_OBJ_QUERY_SIZE));
1373    virgl_encoder_write_dword(ctx->cbuf, handle);
1374    virgl_encoder_write_dword(ctx->cbuf, ((query_type & 0xffff) | (query_index << 16)));
1375    virgl_encoder_write_dword(ctx->cbuf, offset);
1376    virgl_encoder_write_res(ctx, res);
1377    return 0;
1378 }
1379 
virgl_encoder_begin_query(struct virgl_context * ctx,uint32_t handle)1380 int virgl_encoder_begin_query(struct virgl_context *ctx,
1381                              uint32_t handle)
1382 {
1383    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BEGIN_QUERY, 0, 1));
1384    virgl_encoder_write_dword(ctx->cbuf, handle);
1385    return 0;
1386 }
1387 
virgl_encoder_end_query(struct virgl_context * ctx,uint32_t handle)1388 int virgl_encoder_end_query(struct virgl_context *ctx,
1389                            uint32_t handle)
1390 {
1391    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_END_QUERY, 0, 1));
1392    virgl_encoder_write_dword(ctx->cbuf, handle);
1393    return 0;
1394 }
1395 
virgl_encoder_get_query_result(struct virgl_context * ctx,uint32_t handle,bool wait)1396 int virgl_encoder_get_query_result(struct virgl_context *ctx,
1397                                   uint32_t handle, bool wait)
1398 {
1399    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_GET_QUERY_RESULT, 0, 2));
1400    virgl_encoder_write_dword(ctx->cbuf, handle);
1401    virgl_encoder_write_dword(ctx->cbuf, wait ? 1 : 0);
1402    return 0;
1403 }
1404 
virgl_encoder_render_condition(struct virgl_context * ctx,uint32_t handle,bool condition,enum pipe_render_cond_flag mode)1405 int virgl_encoder_render_condition(struct virgl_context *ctx,
1406                                   uint32_t handle, bool condition,
1407                                   enum pipe_render_cond_flag mode)
1408 {
1409    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_RENDER_CONDITION, 0, VIRGL_RENDER_CONDITION_SIZE));
1410    virgl_encoder_write_dword(ctx->cbuf, handle);
1411    virgl_encoder_write_dword(ctx->cbuf, condition);
1412    virgl_encoder_write_dword(ctx->cbuf, mode);
1413    return 0;
1414 }
1415 
virgl_encoder_set_so_targets(struct virgl_context * ctx,unsigned num_targets,struct pipe_stream_output_target ** targets,unsigned append_bitmask)1416 int virgl_encoder_set_so_targets(struct virgl_context *ctx,
1417                                 unsigned num_targets,
1418                                 struct pipe_stream_output_target **targets,
1419                                 unsigned append_bitmask)
1420 {
1421    int i;
1422 
1423    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_STREAMOUT_TARGETS, 0, num_targets + 1));
1424    virgl_encoder_write_dword(ctx->cbuf, append_bitmask);
1425    for (i = 0; i < num_targets; i++) {
1426       struct virgl_so_target *tg = virgl_so_target(targets[i]);
1427       virgl_encoder_write_dword(ctx->cbuf, tg ? tg->handle : 0);
1428    }
1429    return 0;
1430 }
1431 
1432 
virgl_encoder_set_sub_ctx(struct virgl_context * ctx,uint32_t sub_ctx_id)1433 int virgl_encoder_set_sub_ctx(struct virgl_context *ctx, uint32_t sub_ctx_id)
1434 {
1435    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SUB_CTX, 0, 1));
1436    virgl_encoder_write_dword(ctx->cbuf, sub_ctx_id);
1437    return 0;
1438 }
1439 
virgl_encoder_create_sub_ctx(struct virgl_context * ctx,uint32_t sub_ctx_id)1440 int virgl_encoder_create_sub_ctx(struct virgl_context *ctx, uint32_t sub_ctx_id)
1441 {
1442    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_SUB_CTX, 0, 1));
1443    virgl_encoder_write_dword(ctx->cbuf, sub_ctx_id);
1444    return 0;
1445 }
1446 
virgl_encoder_destroy_sub_ctx(struct virgl_context * ctx,uint32_t sub_ctx_id)1447 int virgl_encoder_destroy_sub_ctx(struct virgl_context *ctx, uint32_t sub_ctx_id)
1448 {
1449    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_DESTROY_SUB_CTX, 0, 1));
1450    virgl_encoder_write_dword(ctx->cbuf, sub_ctx_id);
1451    return 0;
1452 }
1453 
virgl_encode_link_shader(struct virgl_context * ctx,uint32_t * handles)1454 int virgl_encode_link_shader(struct virgl_context *ctx, uint32_t *handles)
1455 {
1456    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_LINK_SHADER, 0, VIRGL_LINK_SHADER_SIZE));
1457    virgl_encoder_write_dword(ctx->cbuf, handles[PIPE_SHADER_VERTEX]);
1458    virgl_encoder_write_dword(ctx->cbuf, handles[PIPE_SHADER_FRAGMENT]);
1459    virgl_encoder_write_dword(ctx->cbuf, handles[PIPE_SHADER_GEOMETRY]);
1460    virgl_encoder_write_dword(ctx->cbuf, handles[PIPE_SHADER_TESS_CTRL]);
1461    virgl_encoder_write_dword(ctx->cbuf, handles[PIPE_SHADER_TESS_EVAL]);
1462    virgl_encoder_write_dword(ctx->cbuf, handles[PIPE_SHADER_COMPUTE]);
1463    return 0;
1464 }
1465 
virgl_encode_bind_shader(struct virgl_context * ctx,uint32_t handle,enum pipe_shader_type type)1466 int virgl_encode_bind_shader(struct virgl_context *ctx,
1467                              uint32_t handle,
1468                              enum pipe_shader_type type)
1469 {
1470    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BIND_SHADER, 0, 2));
1471    virgl_encoder_write_dword(ctx->cbuf, handle);
1472    virgl_encoder_write_dword(ctx->cbuf, virgl_shader_stage_convert(type));
1473    return 0;
1474 }
1475 
virgl_encode_set_tess_state(struct virgl_context * ctx,const float outer[4],const float inner[2])1476 int virgl_encode_set_tess_state(struct virgl_context *ctx,
1477                                 const float outer[4],
1478                                 const float inner[2])
1479 {
1480    int i;
1481    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_TESS_STATE, 0, 6));
1482    for (i = 0; i < 4; i++)
1483       virgl_encoder_write_dword(ctx->cbuf, fui(outer[i]));
1484    for (i = 0; i < 2; i++)
1485       virgl_encoder_write_dword(ctx->cbuf, fui(inner[i]));
1486    return 0;
1487 }
1488 
virgl_encode_set_shader_buffers(struct virgl_context * ctx,enum pipe_shader_type shader,unsigned start_slot,unsigned count,const struct pipe_shader_buffer * buffers)1489 int virgl_encode_set_shader_buffers(struct virgl_context *ctx,
1490                                     enum pipe_shader_type shader,
1491                                     unsigned start_slot, unsigned count,
1492                                     const struct pipe_shader_buffer *buffers)
1493 {
1494    int i;
1495    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SHADER_BUFFERS, 0, VIRGL_SET_SHADER_BUFFER_SIZE(count)));
1496 
1497    virgl_encoder_write_dword(ctx->cbuf, virgl_shader_stage_convert(shader));
1498    virgl_encoder_write_dword(ctx->cbuf, start_slot);
1499    for (i = 0; i < count; i++) {
1500       if (buffers && buffers[i].buffer) {
1501          struct virgl_resource *res = virgl_resource(buffers[i].buffer);
1502          virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_offset);
1503          virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_size);
1504          virgl_encoder_write_res(ctx, res);
1505 
1506          util_range_add(&res->b, &res->valid_buffer_range, buffers[i].buffer_offset,
1507                buffers[i].buffer_offset + buffers[i].buffer_size);
1508          virgl_resource_dirty(res, 0);
1509       } else {
1510          virgl_encoder_write_dword(ctx->cbuf, 0);
1511          virgl_encoder_write_dword(ctx->cbuf, 0);
1512          virgl_encoder_write_dword(ctx->cbuf, 0);
1513       }
1514    }
1515    return 0;
1516 }
1517 
virgl_encode_set_hw_atomic_buffers(struct virgl_context * ctx,unsigned start_slot,unsigned count,const struct pipe_shader_buffer * buffers)1518 int virgl_encode_set_hw_atomic_buffers(struct virgl_context *ctx,
1519                                        unsigned start_slot, unsigned count,
1520                                        const struct pipe_shader_buffer *buffers)
1521 {
1522    int i;
1523    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_ATOMIC_BUFFERS, 0, VIRGL_SET_ATOMIC_BUFFER_SIZE(count)));
1524 
1525    virgl_encoder_write_dword(ctx->cbuf, start_slot);
1526    for (i = 0; i < count; i++) {
1527       if (buffers && buffers[i].buffer) {
1528          struct virgl_resource *res = virgl_resource(buffers[i].buffer);
1529          virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_offset);
1530          virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_size);
1531          virgl_encoder_write_res(ctx, res);
1532 
1533          util_range_add(&res->b, &res->valid_buffer_range, buffers[i].buffer_offset,
1534                buffers[i].buffer_offset + buffers[i].buffer_size);
1535          virgl_resource_dirty(res, 0);
1536       } else {
1537          virgl_encoder_write_dword(ctx->cbuf, 0);
1538          virgl_encoder_write_dword(ctx->cbuf, 0);
1539          virgl_encoder_write_dword(ctx->cbuf, 0);
1540       }
1541    }
1542    return 0;
1543 }
1544 
virgl_encode_set_shader_images(struct virgl_context * ctx,enum pipe_shader_type shader,unsigned start_slot,unsigned count,const struct pipe_image_view * images)1545 int virgl_encode_set_shader_images(struct virgl_context *ctx,
1546                                    enum pipe_shader_type shader,
1547                                    unsigned start_slot, unsigned count,
1548                                    const struct pipe_image_view *images)
1549 {
1550    int i;
1551    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SHADER_IMAGES, 0, VIRGL_SET_SHADER_IMAGE_SIZE(count)));
1552 
1553    virgl_encoder_write_dword(ctx->cbuf, virgl_shader_stage_convert(shader));
1554    virgl_encoder_write_dword(ctx->cbuf, start_slot);
1555    for (i = 0; i < count; i++) {
1556       if (images && images[i].resource) {
1557          struct virgl_resource *res = virgl_resource(images[i].resource);
1558          virgl_encoder_write_dword(ctx->cbuf, pipe_to_virgl_format(images[i].format));
1559          virgl_encoder_write_dword(ctx->cbuf, images[i].access);
1560          virgl_encoder_write_dword(ctx->cbuf, images[i].u.buf.offset);
1561          virgl_encoder_write_dword(ctx->cbuf, images[i].u.buf.size);
1562          virgl_encoder_write_res(ctx, res);
1563 
1564          if (res->b.target == PIPE_BUFFER) {
1565             util_range_add(&res->b, &res->valid_buffer_range, images[i].u.buf.offset,
1566                   images[i].u.buf.offset + images[i].u.buf.size);
1567          }
1568          virgl_resource_dirty(res, images[i].u.tex.level);
1569       } else {
1570          virgl_encoder_write_dword(ctx->cbuf, 0);
1571          virgl_encoder_write_dword(ctx->cbuf, 0);
1572          virgl_encoder_write_dword(ctx->cbuf, 0);
1573          virgl_encoder_write_dword(ctx->cbuf, 0);
1574          virgl_encoder_write_dword(ctx->cbuf, 0);
1575       }
1576    }
1577    return 0;
1578 }
1579 
virgl_encode_memory_barrier(struct virgl_context * ctx,unsigned flags)1580 int virgl_encode_memory_barrier(struct virgl_context *ctx,
1581                                 unsigned flags)
1582 {
1583    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_MEMORY_BARRIER, 0, 1));
1584    virgl_encoder_write_dword(ctx->cbuf, flags);
1585    return 0;
1586 }
1587 
virgl_encode_launch_grid(struct virgl_context * ctx,const struct pipe_grid_info * grid_info)1588 int virgl_encode_launch_grid(struct virgl_context *ctx,
1589                              const struct pipe_grid_info *grid_info)
1590 {
1591    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_LAUNCH_GRID, 0, VIRGL_LAUNCH_GRID_SIZE));
1592    virgl_encoder_write_dword(ctx->cbuf, grid_info->block[0]);
1593    virgl_encoder_write_dword(ctx->cbuf, grid_info->block[1]);
1594    virgl_encoder_write_dword(ctx->cbuf, grid_info->block[2]);
1595    virgl_encoder_write_dword(ctx->cbuf, grid_info->grid[0]);
1596    virgl_encoder_write_dword(ctx->cbuf, grid_info->grid[1]);
1597    virgl_encoder_write_dword(ctx->cbuf, grid_info->grid[2]);
1598    if (grid_info->indirect) {
1599       struct virgl_resource *res = virgl_resource(grid_info->indirect);
1600       virgl_encoder_write_res(ctx, res);
1601    } else
1602       virgl_encoder_write_dword(ctx->cbuf, 0);
1603    virgl_encoder_write_dword(ctx->cbuf, grid_info->indirect_offset);
1604    return 0;
1605 }
1606 
virgl_encode_texture_barrier(struct virgl_context * ctx,unsigned flags)1607 int virgl_encode_texture_barrier(struct virgl_context *ctx,
1608                                  unsigned flags)
1609 {
1610    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_TEXTURE_BARRIER, 0, 1));
1611    virgl_encoder_write_dword(ctx->cbuf, flags);
1612    return 0;
1613 }
1614 
virgl_encode_host_debug_flagstring(struct virgl_context * ctx,const char * flagstring)1615 int virgl_encode_host_debug_flagstring(struct virgl_context *ctx,
1616                                        const char *flagstring)
1617 {
1618    unsigned long slen = strlen(flagstring) + 1;
1619    uint32_t sslen;
1620    uint32_t string_length;
1621 
1622    if (!slen)
1623       return 0;
1624 
1625    if (slen > 4 * 0xffff) {
1626       debug_printf("VIRGL: host debug flag string too long, will be truncated\n");
1627       slen = 4 * 0xffff;
1628    }
1629 
1630    sslen = (uint32_t )(slen + 3) / 4;
1631    string_length = (uint32_t)MIN2(sslen * 4, slen);
1632 
1633    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_DEBUG_FLAGS, 0, sslen));
1634    virgl_encoder_write_block(ctx->cbuf, (const uint8_t *)flagstring, string_length);
1635    return 0;
1636 }
1637 
virgl_encode_tweak(struct virgl_context * ctx,enum vrend_tweak_type tweak,uint32_t value)1638 int virgl_encode_tweak(struct virgl_context *ctx, enum vrend_tweak_type tweak, uint32_t value)
1639 {
1640    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_TWEAKS, 0, VIRGL_SET_TWEAKS_SIZE));
1641    virgl_encoder_write_dword(ctx->cbuf, tweak);
1642    virgl_encoder_write_dword(ctx->cbuf, value);
1643    return 0;
1644 }
1645 
1646 
virgl_encode_get_query_result_qbo(struct virgl_context * ctx,uint32_t handle,struct virgl_resource * res,bool wait,uint32_t result_type,uint32_t offset,uint32_t index)1647 int virgl_encode_get_query_result_qbo(struct virgl_context *ctx,
1648                                       uint32_t handle,
1649                                       struct virgl_resource *res, bool wait,
1650                                       uint32_t result_type,
1651                                       uint32_t offset,
1652                                       uint32_t index)
1653 {
1654    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_GET_QUERY_RESULT_QBO, 0, VIRGL_QUERY_RESULT_QBO_SIZE));
1655    virgl_encoder_write_dword(ctx->cbuf, handle);
1656    virgl_encoder_write_res(ctx, res);
1657    virgl_encoder_write_dword(ctx->cbuf, wait ? 1 : 0);
1658    virgl_encoder_write_dword(ctx->cbuf, result_type);
1659    virgl_encoder_write_dword(ctx->cbuf, offset);
1660    virgl_encoder_write_dword(ctx->cbuf, index);
1661    return 0;
1662 }
1663 
virgl_encode_transfer(struct virgl_screen * vs,struct virgl_cmd_buf * buf,struct virgl_transfer * trans,uint32_t direction)1664 void virgl_encode_transfer(struct virgl_screen *vs, struct virgl_cmd_buf *buf,
1665                            struct virgl_transfer *trans, uint32_t direction)
1666 {
1667    uint32_t command;
1668    struct virgl_resource *vres = virgl_resource(trans->base.resource);
1669    enum virgl_transfer3d_encode_stride stride_type =
1670         virgl_transfer3d_host_inferred_stride;
1671 
1672    if (trans->base.box.depth == 1 && trans->base.level == 0 &&
1673        trans->base.resource->target == PIPE_TEXTURE_2D &&
1674        vres->blob_mem == VIRGL_BLOB_MEM_HOST3D_GUEST)
1675       stride_type = virgl_transfer3d_explicit_stride;
1676 
1677    command = VIRGL_CMD0(VIRGL_CCMD_TRANSFER3D, 0, VIRGL_TRANSFER3D_SIZE);
1678    virgl_encoder_write_dword(buf, command);
1679    virgl_encoder_transfer3d_common(vs, buf, trans, stride_type);
1680    virgl_encoder_write_dword(buf, trans->offset);
1681    virgl_encoder_write_dword(buf, direction);
1682 }
1683 
virgl_encode_copy_transfer(struct virgl_context * ctx,struct virgl_transfer * trans)1684 void virgl_encode_copy_transfer(struct virgl_context *ctx,
1685                                 struct virgl_transfer *trans)
1686 {
1687    uint32_t command;
1688    struct virgl_screen *vs = virgl_screen(ctx->base.screen);
1689    // set always synchronized to 1, second bit is used for direction
1690    uint32_t direction_and_synchronized = VIRGL_COPY_TRANSFER3D_FLAGS_SYNCHRONIZED;
1691 
1692    if (vs->caps.caps.v2.capability_bits_v2 & VIRGL_CAP_V2_COPY_TRANSFER_BOTH_DIRECTIONS) {
1693       if (trans->direction == VIRGL_TRANSFER_TO_HOST) {
1694          // do nothing, as 0 means transfer to host
1695       } else if (trans->direction == VIRGL_TRANSFER_FROM_HOST) {
1696          direction_and_synchronized |= VIRGL_COPY_TRANSFER3D_FLAGS_READ_FROM_HOST;
1697       } else {
1698          // something wrong happened here
1699          assert(0);
1700       }
1701    }
1702    assert(trans->copy_src_hw_res);
1703    command = VIRGL_CMD0(VIRGL_CCMD_COPY_TRANSFER3D, 0, VIRGL_COPY_TRANSFER3D_SIZE);
1704 
1705    virgl_encoder_write_cmd_dword(ctx, command);
1706    /* Copy transfers need to explicitly specify the stride, since it may differ
1707     * from the image stride.
1708     */
1709    virgl_encoder_transfer3d_common(vs, ctx->cbuf, trans, virgl_transfer3d_explicit_stride);
1710    vs->vws->emit_res(vs->vws, ctx->cbuf, trans->copy_src_hw_res, true);
1711    virgl_encoder_write_dword(ctx->cbuf, trans->copy_src_offset);
1712    virgl_encoder_write_dword(ctx->cbuf, direction_and_synchronized);
1713 }
1714 
virgl_encode_end_transfers(struct virgl_cmd_buf * buf)1715 void virgl_encode_end_transfers(struct virgl_cmd_buf *buf)
1716 {
1717    uint32_t command, diff;
1718    diff = VIRGL_MAX_TBUF_DWORDS - buf->cdw;
1719    if (diff) {
1720       command = VIRGL_CMD0(VIRGL_CCMD_END_TRANSFERS, 0, diff - 1);
1721       virgl_encoder_write_dword(buf, command);
1722    }
1723 }
1724 
virgl_encode_get_memory_info(struct virgl_context * ctx,struct virgl_resource * res)1725 void virgl_encode_get_memory_info(struct virgl_context *ctx, struct virgl_resource *res)
1726 {
1727    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_GET_MEMORY_INFO, 0, 1));
1728    virgl_encoder_write_res(ctx, res);
1729 }
1730 
virgl_encode_emit_string_marker(struct virgl_context * ctx,const char * message,int len)1731 void virgl_encode_emit_string_marker(struct virgl_context *ctx,
1732                                      const char *message, int len)
1733 {
1734    /* len is guaranteed to be non-negative but be defensive */
1735    assert(len >= 0);
1736    if (len <= 0)
1737       return;
1738 
1739    if (len > 4 * 0xffff) {
1740       debug_printf("VIRGL: host debug flag string too long, will be truncated\n");
1741       len = 4 * 0xffff;
1742    }
1743 
1744    uint32_t buf_len = (uint32_t )(len + 3) / 4 + 1;
1745    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_EMIT_STRING_MARKER, 0, buf_len));
1746    virgl_encoder_write_dword(ctx->cbuf, len);
1747    virgl_encoder_write_block(ctx->cbuf, (const uint8_t *)message, len);
1748 }
1749 
virgl_encode_create_video_codec(struct virgl_context * ctx,struct virgl_video_codec * cdc)1750 void virgl_encode_create_video_codec(struct virgl_context *ctx,
1751                                      struct virgl_video_codec *cdc)
1752 {
1753    struct virgl_screen *rs = virgl_screen(ctx->base.screen);
1754    uint32_t len = rs->caps.caps.v2.host_feature_check_version >= 14 ? 8 : 7;
1755 
1756    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_VIDEO_CODEC, 0, len));
1757    virgl_encoder_write_dword(ctx->cbuf, cdc->handle);
1758    virgl_encoder_write_dword(ctx->cbuf, cdc->base.profile);
1759    virgl_encoder_write_dword(ctx->cbuf, cdc->base.entrypoint);
1760    virgl_encoder_write_dword(ctx->cbuf, cdc->base.chroma_format);
1761    virgl_encoder_write_dword(ctx->cbuf, cdc->base.level);
1762    virgl_encoder_write_dword(ctx->cbuf, cdc->base.width);
1763    virgl_encoder_write_dword(ctx->cbuf, cdc->base.height);
1764    if (rs->caps.caps.v2.host_feature_check_version >= 14)
1765        virgl_encoder_write_dword(ctx->cbuf, cdc->base.max_references);
1766 }
1767 
virgl_encode_destroy_video_codec(struct virgl_context * ctx,struct virgl_video_codec * cdc)1768 void virgl_encode_destroy_video_codec(struct virgl_context *ctx,
1769                                       struct virgl_video_codec *cdc)
1770 {
1771    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_DESTROY_VIDEO_CODEC, 0, 1));
1772    virgl_encoder_write_dword(ctx->cbuf, cdc->handle);
1773 }
1774 
virgl_encode_create_video_buffer(struct virgl_context * ctx,struct virgl_video_buffer * vbuf)1775 void virgl_encode_create_video_buffer(struct virgl_context *ctx,
1776                                       struct virgl_video_buffer *vbuf)
1777 {
1778    unsigned i;
1779 
1780    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_VIDEO_BUFFER, 0,
1781                                                  4 + vbuf->num_planes));
1782    virgl_encoder_write_dword(ctx->cbuf, vbuf->handle);
1783    virgl_encoder_write_dword(ctx->cbuf, pipe_to_virgl_format(vbuf->buf->buffer_format));
1784    virgl_encoder_write_dword(ctx->cbuf, vbuf->buf->width);
1785    virgl_encoder_write_dword(ctx->cbuf, vbuf->buf->height);
1786    for (i = 0; i < vbuf->num_planes; i++)
1787        virgl_encoder_write_res(ctx, virgl_resource(vbuf->plane_views[i]->texture));
1788 }
1789 
virgl_encode_destroy_video_buffer(struct virgl_context * ctx,struct virgl_video_buffer * buf)1790 void virgl_encode_destroy_video_buffer(struct virgl_context *ctx,
1791                                        struct virgl_video_buffer *buf)
1792 {
1793    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_DESTROY_VIDEO_BUFFER, 0, 1));
1794    virgl_encoder_write_dword(ctx->cbuf, buf->handle);
1795 }
1796 
virgl_encode_begin_frame(struct virgl_context * ctx,struct virgl_video_codec * cdc,struct virgl_video_buffer * buf)1797 void virgl_encode_begin_frame(struct virgl_context *ctx,
1798                               struct virgl_video_codec *cdc,
1799                               struct virgl_video_buffer *buf)
1800 {
1801    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BEGIN_FRAME, 0, 2));
1802    virgl_encoder_write_dword(ctx->cbuf, cdc->handle);
1803    virgl_encoder_write_dword(ctx->cbuf, buf->handle);
1804 }
1805 
virgl_encode_decode_bitstream(struct virgl_context * ctx,struct virgl_video_codec * cdc,struct virgl_video_buffer * buf,void * desc,uint32_t desc_size)1806 void virgl_encode_decode_bitstream(struct virgl_context *ctx,
1807                                    struct virgl_video_codec *cdc,
1808                                    struct virgl_video_buffer *buf,
1809                                    void *desc, uint32_t desc_size)
1810 {
1811    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_DECODE_BITSTREAM, 0, 5));
1812    virgl_encoder_write_dword(ctx->cbuf, cdc->handle);
1813    virgl_encoder_write_dword(ctx->cbuf, buf->handle);
1814    virgl_encoder_write_res(ctx, virgl_resource(cdc->desc_buffers[cdc->cur_buffer]));
1815    virgl_encoder_write_res(ctx, virgl_resource(cdc->bs_buffers[cdc->cur_buffer]));
1816    virgl_encoder_write_dword(ctx->cbuf, cdc->bs_size);
1817 }
1818 
virgl_encode_encode_bitstream(struct virgl_context * ctx,struct virgl_video_codec * cdc,struct virgl_video_buffer * buf,struct virgl_resource * tgt)1819 void virgl_encode_encode_bitstream(struct virgl_context *ctx,
1820                                    struct virgl_video_codec *cdc,
1821                                    struct virgl_video_buffer *buf,
1822                                    struct virgl_resource *tgt)
1823 {
1824    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_ENCODE_BITSTREAM, 0, 5));
1825    virgl_encoder_write_dword(ctx->cbuf, cdc->handle);
1826    virgl_encoder_write_dword(ctx->cbuf, buf->handle);
1827    virgl_encoder_write_res(ctx, tgt);
1828    virgl_encoder_write_res(ctx, virgl_resource(cdc->desc_buffers[cdc->cur_buffer]));
1829    virgl_encoder_write_res(ctx, virgl_resource(cdc->feed_buffers[cdc->cur_buffer]));
1830 }
1831 
virgl_encode_end_frame(struct virgl_context * ctx,struct virgl_video_codec * cdc,struct virgl_video_buffer * buf)1832 void virgl_encode_end_frame(struct virgl_context *ctx,
1833                             struct virgl_video_codec *cdc,
1834                             struct virgl_video_buffer *buf)
1835 {
1836    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_END_FRAME, 0, 2));
1837    virgl_encoder_write_dword(ctx->cbuf, cdc->handle);
1838    virgl_encoder_write_dword(ctx->cbuf, buf->handle);
1839 }
1840 
virgl_encode_clear_surface(struct virgl_context * ctx,struct pipe_surface * surf,unsigned buffers,const union pipe_color_union * color,unsigned dstx,unsigned dsty,unsigned width,unsigned height,bool render_condition_enabled)1841 int virgl_encode_clear_surface(struct virgl_context *ctx,
1842                                struct pipe_surface *surf,
1843                                unsigned buffers,
1844                                const union pipe_color_union *color,
1845                                unsigned dstx, unsigned dsty,
1846                                unsigned width, unsigned height,
1847                                bool render_condition_enabled)
1848 {
1849    int i;
1850    uint32_t tmp;
1851    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CLEAR_SURFACE, 0, VIRGL_CLEAR_SURFACE_SIZE));
1852 
1853    tmp = VIRGL_CLEAR_SURFACE_S0_RENDER_CONDITION(render_condition_enabled) |
1854          VIRGL_CLEAR_SURFACE_S0_BUFFERS(buffers);
1855 
1856    virgl_encoder_write_dword(ctx->cbuf, tmp);
1857    virgl_encoder_write_dword(ctx->cbuf, virgl_surface(surf)->handle);
1858 
1859    for (i = 0; i < 4; i++)
1860       virgl_encoder_write_dword(ctx->cbuf, color->ui[i]);
1861 
1862    virgl_encoder_write_dword(ctx->cbuf, dstx);
1863    virgl_encoder_write_dword(ctx->cbuf, dsty);
1864    virgl_encoder_write_dword(ctx->cbuf, width);
1865    virgl_encoder_write_dword(ctx->cbuf, height);
1866 
1867    return 0;
1868 }
1869