1 /*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28 #include <linux/kernel.h>
29 #include "drmP.h"
30 #include "radeon.h"
31 #include "r600d.h"
32 #include "r600_reg_safe.h"
33
34 static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
35 struct radeon_cs_reloc **cs_reloc);
36 static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
37 struct radeon_cs_reloc **cs_reloc);
38 typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**);
39 static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm;
40 extern void r600_cs_legacy_get_tiling_conf(struct drm_device *dev, u32 *npipes, u32 *nbanks, u32 *group_size);
41
42
43 struct r600_cs_track {
44 /* configuration we miror so that we use same code btw kms/ums */
45 u32 group_size;
46 u32 nbanks;
47 u32 npipes;
48 /* value we track */
49 u32 sq_config;
50 u32 nsamples;
51 u32 cb_color_base_last[8];
52 struct radeon_bo *cb_color_bo[8];
53 u64 cb_color_bo_mc[8];
54 u32 cb_color_bo_offset[8];
55 struct radeon_bo *cb_color_frag_bo[8]; /* unused */
56 struct radeon_bo *cb_color_tile_bo[8]; /* unused */
57 u32 cb_color_info[8];
58 u32 cb_color_view[8];
59 u32 cb_color_size_idx[8]; /* unused */
60 u32 cb_target_mask;
61 u32 cb_shader_mask; /* unused */
62 u32 cb_color_size[8];
63 u32 vgt_strmout_en;
64 u32 vgt_strmout_buffer_en;
65 struct radeon_bo *vgt_strmout_bo[4];
66 u64 vgt_strmout_bo_mc[4]; /* unused */
67 u32 vgt_strmout_bo_offset[4];
68 u32 vgt_strmout_size[4];
69 u32 db_depth_control;
70 u32 db_depth_info;
71 u32 db_depth_size_idx;
72 u32 db_depth_view;
73 u32 db_depth_size;
74 u32 db_offset;
75 struct radeon_bo *db_bo;
76 u64 db_bo_mc;
77 bool sx_misc_kill_all_prims;
78 bool cb_dirty;
79 bool db_dirty;
80 bool streamout_dirty;
81 struct radeon_bo *htile_bo;
82 u64 htile_offset;
83 u32 htile_surface;
84 };
85
86 #define FMT_8_BIT(fmt, vc) [fmt] = { 1, 1, 1, vc, CHIP_R600 }
87 #define FMT_16_BIT(fmt, vc) [fmt] = { 1, 1, 2, vc, CHIP_R600 }
88 #define FMT_24_BIT(fmt) [fmt] = { 1, 1, 4, 0, CHIP_R600 }
89 #define FMT_32_BIT(fmt, vc) [fmt] = { 1, 1, 4, vc, CHIP_R600 }
90 #define FMT_48_BIT(fmt) [fmt] = { 1, 1, 8, 0, CHIP_R600 }
91 #define FMT_64_BIT(fmt, vc) [fmt] = { 1, 1, 8, vc, CHIP_R600 }
92 #define FMT_96_BIT(fmt) [fmt] = { 1, 1, 12, 0, CHIP_R600 }
93 #define FMT_128_BIT(fmt, vc) [fmt] = { 1, 1, 16,vc, CHIP_R600 }
94
95 struct gpu_formats {
96 unsigned blockwidth;
97 unsigned blockheight;
98 unsigned blocksize;
99 unsigned valid_color;
100 enum radeon_family min_family;
101 };
102
103 static const struct gpu_formats color_formats_table[] = {
104 /* 8 bit */
105 FMT_8_BIT(V_038004_COLOR_8, 1),
106 FMT_8_BIT(V_038004_COLOR_4_4, 1),
107 FMT_8_BIT(V_038004_COLOR_3_3_2, 1),
108 FMT_8_BIT(V_038004_FMT_1, 0),
109
110 /* 16-bit */
111 FMT_16_BIT(V_038004_COLOR_16, 1),
112 FMT_16_BIT(V_038004_COLOR_16_FLOAT, 1),
113 FMT_16_BIT(V_038004_COLOR_8_8, 1),
114 FMT_16_BIT(V_038004_COLOR_5_6_5, 1),
115 FMT_16_BIT(V_038004_COLOR_6_5_5, 1),
116 FMT_16_BIT(V_038004_COLOR_1_5_5_5, 1),
117 FMT_16_BIT(V_038004_COLOR_4_4_4_4, 1),
118 FMT_16_BIT(V_038004_COLOR_5_5_5_1, 1),
119
120 /* 24-bit */
121 FMT_24_BIT(V_038004_FMT_8_8_8),
122
123 /* 32-bit */
124 FMT_32_BIT(V_038004_COLOR_32, 1),
125 FMT_32_BIT(V_038004_COLOR_32_FLOAT, 1),
126 FMT_32_BIT(V_038004_COLOR_16_16, 1),
127 FMT_32_BIT(V_038004_COLOR_16_16_FLOAT, 1),
128 FMT_32_BIT(V_038004_COLOR_8_24, 1),
129 FMT_32_BIT(V_038004_COLOR_8_24_FLOAT, 1),
130 FMT_32_BIT(V_038004_COLOR_24_8, 1),
131 FMT_32_BIT(V_038004_COLOR_24_8_FLOAT, 1),
132 FMT_32_BIT(V_038004_COLOR_10_11_11, 1),
133 FMT_32_BIT(V_038004_COLOR_10_11_11_FLOAT, 1),
134 FMT_32_BIT(V_038004_COLOR_11_11_10, 1),
135 FMT_32_BIT(V_038004_COLOR_11_11_10_FLOAT, 1),
136 FMT_32_BIT(V_038004_COLOR_2_10_10_10, 1),
137 FMT_32_BIT(V_038004_COLOR_8_8_8_8, 1),
138 FMT_32_BIT(V_038004_COLOR_10_10_10_2, 1),
139 FMT_32_BIT(V_038004_FMT_5_9_9_9_SHAREDEXP, 0),
140 FMT_32_BIT(V_038004_FMT_32_AS_8, 0),
141 FMT_32_BIT(V_038004_FMT_32_AS_8_8, 0),
142
143 /* 48-bit */
144 FMT_48_BIT(V_038004_FMT_16_16_16),
145 FMT_48_BIT(V_038004_FMT_16_16_16_FLOAT),
146
147 /* 64-bit */
148 FMT_64_BIT(V_038004_COLOR_X24_8_32_FLOAT, 1),
149 FMT_64_BIT(V_038004_COLOR_32_32, 1),
150 FMT_64_BIT(V_038004_COLOR_32_32_FLOAT, 1),
151 FMT_64_BIT(V_038004_COLOR_16_16_16_16, 1),
152 FMT_64_BIT(V_038004_COLOR_16_16_16_16_FLOAT, 1),
153
154 FMT_96_BIT(V_038004_FMT_32_32_32),
155 FMT_96_BIT(V_038004_FMT_32_32_32_FLOAT),
156
157 /* 128-bit */
158 FMT_128_BIT(V_038004_COLOR_32_32_32_32, 1),
159 FMT_128_BIT(V_038004_COLOR_32_32_32_32_FLOAT, 1),
160
161 [V_038004_FMT_GB_GR] = { 2, 1, 4, 0 },
162 [V_038004_FMT_BG_RG] = { 2, 1, 4, 0 },
163
164 /* block compressed formats */
165 [V_038004_FMT_BC1] = { 4, 4, 8, 0 },
166 [V_038004_FMT_BC2] = { 4, 4, 16, 0 },
167 [V_038004_FMT_BC3] = { 4, 4, 16, 0 },
168 [V_038004_FMT_BC4] = { 4, 4, 8, 0 },
169 [V_038004_FMT_BC5] = { 4, 4, 16, 0},
170 [V_038004_FMT_BC6] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */
171 [V_038004_FMT_BC7] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */
172
173 /* The other Evergreen formats */
174 [V_038004_FMT_32_AS_32_32_32_32] = { 1, 1, 4, 0, CHIP_CEDAR},
175 };
176
r600_fmt_is_valid_color(u32 format)177 bool r600_fmt_is_valid_color(u32 format)
178 {
179 if (format >= ARRAY_SIZE(color_formats_table))
180 return false;
181
182 if (color_formats_table[format].valid_color)
183 return true;
184
185 return false;
186 }
187
r600_fmt_is_valid_texture(u32 format,enum radeon_family family)188 bool r600_fmt_is_valid_texture(u32 format, enum radeon_family family)
189 {
190 if (format >= ARRAY_SIZE(color_formats_table))
191 return false;
192
193 if (family < color_formats_table[format].min_family)
194 return false;
195
196 if (color_formats_table[format].blockwidth > 0)
197 return true;
198
199 return false;
200 }
201
r600_fmt_get_blocksize(u32 format)202 int r600_fmt_get_blocksize(u32 format)
203 {
204 if (format >= ARRAY_SIZE(color_formats_table))
205 return 0;
206
207 return color_formats_table[format].blocksize;
208 }
209
r600_fmt_get_nblocksx(u32 format,u32 w)210 int r600_fmt_get_nblocksx(u32 format, u32 w)
211 {
212 unsigned bw;
213
214 if (format >= ARRAY_SIZE(color_formats_table))
215 return 0;
216
217 bw = color_formats_table[format].blockwidth;
218 if (bw == 0)
219 return 0;
220
221 return (w + bw - 1) / bw;
222 }
223
r600_fmt_get_nblocksy(u32 format,u32 h)224 int r600_fmt_get_nblocksy(u32 format, u32 h)
225 {
226 unsigned bh;
227
228 if (format >= ARRAY_SIZE(color_formats_table))
229 return 0;
230
231 bh = color_formats_table[format].blockheight;
232 if (bh == 0)
233 return 0;
234
235 return (h + bh - 1) / bh;
236 }
237
238 struct array_mode_checker {
239 int array_mode;
240 u32 group_size;
241 u32 nbanks;
242 u32 npipes;
243 u32 nsamples;
244 u32 blocksize;
245 };
246
247 /* returns alignment in pixels for pitch/height/depth and bytes for base */
r600_get_array_mode_alignment(struct array_mode_checker * values,u32 * pitch_align,u32 * height_align,u32 * depth_align,u64 * base_align)248 static int r600_get_array_mode_alignment(struct array_mode_checker *values,
249 u32 *pitch_align,
250 u32 *height_align,
251 u32 *depth_align,
252 u64 *base_align)
253 {
254 u32 tile_width = 8;
255 u32 tile_height = 8;
256 u32 macro_tile_width = values->nbanks;
257 u32 macro_tile_height = values->npipes;
258 u32 tile_bytes = tile_width * tile_height * values->blocksize * values->nsamples;
259 u32 macro_tile_bytes = macro_tile_width * macro_tile_height * tile_bytes;
260
261 switch (values->array_mode) {
262 case ARRAY_LINEAR_GENERAL:
263 /* technically tile_width/_height for pitch/height */
264 *pitch_align = 1; /* tile_width */
265 *height_align = 1; /* tile_height */
266 *depth_align = 1;
267 *base_align = 1;
268 break;
269 case ARRAY_LINEAR_ALIGNED:
270 *pitch_align = max((u32)64, (u32)(values->group_size / values->blocksize));
271 *height_align = 1;
272 *depth_align = 1;
273 *base_align = values->group_size;
274 break;
275 case ARRAY_1D_TILED_THIN1:
276 *pitch_align = max((u32)tile_width,
277 (u32)(values->group_size /
278 (tile_height * values->blocksize * values->nsamples)));
279 *height_align = tile_height;
280 *depth_align = 1;
281 *base_align = values->group_size;
282 break;
283 case ARRAY_2D_TILED_THIN1:
284 *pitch_align = max((u32)macro_tile_width * tile_width,
285 (u32)((values->group_size * values->nbanks) /
286 (values->blocksize * values->nsamples * tile_width)));
287 *height_align = macro_tile_height * tile_height;
288 *depth_align = 1;
289 *base_align = max(macro_tile_bytes,
290 (*pitch_align) * values->blocksize * (*height_align) * values->nsamples);
291 break;
292 default:
293 return -EINVAL;
294 }
295
296 return 0;
297 }
298
r600_cs_track_init(struct r600_cs_track * track)299 static void r600_cs_track_init(struct r600_cs_track *track)
300 {
301 int i;
302
303 /* assume DX9 mode */
304 track->sq_config = DX9_CONSTS;
305 for (i = 0; i < 8; i++) {
306 track->cb_color_base_last[i] = 0;
307 track->cb_color_size[i] = 0;
308 track->cb_color_size_idx[i] = 0;
309 track->cb_color_info[i] = 0;
310 track->cb_color_view[i] = 0xFFFFFFFF;
311 track->cb_color_bo[i] = NULL;
312 track->cb_color_bo_offset[i] = 0xFFFFFFFF;
313 track->cb_color_bo_mc[i] = 0xFFFFFFFF;
314 }
315 track->cb_target_mask = 0xFFFFFFFF;
316 track->cb_shader_mask = 0xFFFFFFFF;
317 track->cb_dirty = true;
318 track->db_bo = NULL;
319 track->db_bo_mc = 0xFFFFFFFF;
320 /* assume the biggest format and that htile is enabled */
321 track->db_depth_info = 7 | (1 << 25);
322 track->db_depth_view = 0xFFFFC000;
323 track->db_depth_size = 0xFFFFFFFF;
324 track->db_depth_size_idx = 0;
325 track->db_depth_control = 0xFFFFFFFF;
326 track->db_dirty = true;
327 track->htile_bo = NULL;
328 track->htile_offset = 0xFFFFFFFF;
329 track->htile_surface = 0;
330
331 for (i = 0; i < 4; i++) {
332 track->vgt_strmout_size[i] = 0;
333 track->vgt_strmout_bo[i] = NULL;
334 track->vgt_strmout_bo_offset[i] = 0xFFFFFFFF;
335 track->vgt_strmout_bo_mc[i] = 0xFFFFFFFF;
336 }
337 track->streamout_dirty = true;
338 track->sx_misc_kill_all_prims = false;
339 }
340
r600_cs_track_validate_cb(struct radeon_cs_parser * p,int i)341 static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
342 {
343 struct r600_cs_track *track = p->track;
344 u32 slice_tile_max, size, tmp;
345 u32 height, height_align, pitch, pitch_align, depth_align;
346 u64 base_offset, base_align;
347 struct array_mode_checker array_check;
348 volatile u32 *ib = p->ib->ptr;
349 unsigned array_mode;
350 u32 format;
351
352 if (G_0280A0_TILE_MODE(track->cb_color_info[i])) {
353 dev_warn(p->dev, "FMASK or CMASK buffer are not supported by this kernel\n");
354 return -EINVAL;
355 }
356 size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i];
357 format = G_0280A0_FORMAT(track->cb_color_info[i]);
358 if (!r600_fmt_is_valid_color(format)) {
359 dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n",
360 __func__, __LINE__, format,
361 i, track->cb_color_info[i]);
362 return -EINVAL;
363 }
364 /* pitch in pixels */
365 pitch = (G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1) * 8;
366 slice_tile_max = G_028060_SLICE_TILE_MAX(track->cb_color_size[i]) + 1;
367 slice_tile_max *= 64;
368 height = slice_tile_max / pitch;
369 if (height > 8192)
370 height = 8192;
371 array_mode = G_0280A0_ARRAY_MODE(track->cb_color_info[i]);
372
373 base_offset = track->cb_color_bo_mc[i] + track->cb_color_bo_offset[i];
374 array_check.array_mode = array_mode;
375 array_check.group_size = track->group_size;
376 array_check.nbanks = track->nbanks;
377 array_check.npipes = track->npipes;
378 array_check.nsamples = track->nsamples;
379 array_check.blocksize = r600_fmt_get_blocksize(format);
380 if (r600_get_array_mode_alignment(&array_check,
381 &pitch_align, &height_align, &depth_align, &base_align)) {
382 dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
383 G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i,
384 track->cb_color_info[i]);
385 return -EINVAL;
386 }
387 switch (array_mode) {
388 case V_0280A0_ARRAY_LINEAR_GENERAL:
389 break;
390 case V_0280A0_ARRAY_LINEAR_ALIGNED:
391 break;
392 case V_0280A0_ARRAY_1D_TILED_THIN1:
393 /* avoid breaking userspace */
394 if (height > 7)
395 height &= ~0x7;
396 break;
397 case V_0280A0_ARRAY_2D_TILED_THIN1:
398 break;
399 default:
400 dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
401 G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i,
402 track->cb_color_info[i]);
403 return -EINVAL;
404 }
405
406 if (!IS_ALIGNED(pitch, pitch_align)) {
407 dev_warn(p->dev, "%s:%d cb pitch (%d, 0x%x, %d) invalid\n",
408 __func__, __LINE__, pitch, pitch_align, array_mode);
409 return -EINVAL;
410 }
411 if (!IS_ALIGNED(height, height_align)) {
412 dev_warn(p->dev, "%s:%d cb height (%d, 0x%x, %d) invalid\n",
413 __func__, __LINE__, height, height_align, array_mode);
414 return -EINVAL;
415 }
416 if (!IS_ALIGNED(base_offset, base_align)) {
417 dev_warn(p->dev, "%s offset[%d] 0x%llx 0x%llx, %d not aligned\n", __func__, i,
418 base_offset, base_align, array_mode);
419 return -EINVAL;
420 }
421
422 /* check offset */
423 tmp = r600_fmt_get_nblocksy(format, height) * r600_fmt_get_nblocksx(format, pitch) * r600_fmt_get_blocksize(format);
424 switch (array_mode) {
425 default:
426 case V_0280A0_ARRAY_LINEAR_GENERAL:
427 case V_0280A0_ARRAY_LINEAR_ALIGNED:
428 tmp += track->cb_color_view[i] & 0xFF;
429 break;
430 case V_0280A0_ARRAY_1D_TILED_THIN1:
431 case V_0280A0_ARRAY_2D_TILED_THIN1:
432 tmp += G_028080_SLICE_MAX(track->cb_color_view[i]) * tmp;
433 break;
434 }
435 if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) {
436 if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) {
437 /* the initial DDX does bad things with the CB size occasionally */
438 /* it rounds up height too far for slice tile max but the BO is smaller */
439 /* r600c,g also seem to flush at bad times in some apps resulting in
440 * bogus values here. So for linear just allow anything to avoid breaking
441 * broken userspace.
442 */
443 } else {
444 dev_warn(p->dev, "%s offset[%d] %d %d %d %lu too big (%d %d) (%d %d %d)\n",
445 __func__, i, array_mode,
446 track->cb_color_bo_offset[i], tmp,
447 radeon_bo_size(track->cb_color_bo[i]),
448 pitch, height, r600_fmt_get_nblocksx(format, pitch),
449 r600_fmt_get_nblocksy(format, height),
450 r600_fmt_get_blocksize(format));
451 return -EINVAL;
452 }
453 }
454 /* limit max tile */
455 tmp = (height * pitch) >> 6;
456 if (tmp < slice_tile_max)
457 slice_tile_max = tmp;
458 tmp = S_028060_PITCH_TILE_MAX((pitch / 8) - 1) |
459 S_028060_SLICE_TILE_MAX(slice_tile_max - 1);
460 ib[track->cb_color_size_idx[i]] = tmp;
461 return 0;
462 }
463
r600_cs_track_validate_db(struct radeon_cs_parser * p)464 static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
465 {
466 struct r600_cs_track *track = p->track;
467 u32 nviews, bpe, ntiles, size, slice_tile_max, tmp;
468 u32 height_align, pitch_align, depth_align;
469 u32 pitch = 8192;
470 u32 height = 8192;
471 u64 base_offset, base_align;
472 struct array_mode_checker array_check;
473 int array_mode;
474 volatile u32 *ib = p->ib->ptr;
475
476
477 if (track->db_bo == NULL) {
478 dev_warn(p->dev, "z/stencil with no depth buffer\n");
479 return -EINVAL;
480 }
481 switch (G_028010_FORMAT(track->db_depth_info)) {
482 case V_028010_DEPTH_16:
483 bpe = 2;
484 break;
485 case V_028010_DEPTH_X8_24:
486 case V_028010_DEPTH_8_24:
487 case V_028010_DEPTH_X8_24_FLOAT:
488 case V_028010_DEPTH_8_24_FLOAT:
489 case V_028010_DEPTH_32_FLOAT:
490 bpe = 4;
491 break;
492 case V_028010_DEPTH_X24_8_32_FLOAT:
493 bpe = 8;
494 break;
495 default:
496 dev_warn(p->dev, "z/stencil with invalid format %d\n", G_028010_FORMAT(track->db_depth_info));
497 return -EINVAL;
498 }
499 if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) {
500 if (!track->db_depth_size_idx) {
501 dev_warn(p->dev, "z/stencil buffer size not set\n");
502 return -EINVAL;
503 }
504 tmp = radeon_bo_size(track->db_bo) - track->db_offset;
505 tmp = (tmp / bpe) >> 6;
506 if (!tmp) {
507 dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %ld)\n",
508 track->db_depth_size, bpe, track->db_offset,
509 radeon_bo_size(track->db_bo));
510 return -EINVAL;
511 }
512 ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF);
513 } else {
514 size = radeon_bo_size(track->db_bo);
515 /* pitch in pixels */
516 pitch = (G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1) * 8;
517 slice_tile_max = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
518 slice_tile_max *= 64;
519 height = slice_tile_max / pitch;
520 if (height > 8192)
521 height = 8192;
522 base_offset = track->db_bo_mc + track->db_offset;
523 array_mode = G_028010_ARRAY_MODE(track->db_depth_info);
524 array_check.array_mode = array_mode;
525 array_check.group_size = track->group_size;
526 array_check.nbanks = track->nbanks;
527 array_check.npipes = track->npipes;
528 array_check.nsamples = track->nsamples;
529 array_check.blocksize = bpe;
530 if (r600_get_array_mode_alignment(&array_check,
531 &pitch_align, &height_align, &depth_align, &base_align)) {
532 dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
533 G_028010_ARRAY_MODE(track->db_depth_info),
534 track->db_depth_info);
535 return -EINVAL;
536 }
537 switch (array_mode) {
538 case V_028010_ARRAY_1D_TILED_THIN1:
539 /* don't break userspace */
540 height &= ~0x7;
541 break;
542 case V_028010_ARRAY_2D_TILED_THIN1:
543 break;
544 default:
545 dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
546 G_028010_ARRAY_MODE(track->db_depth_info),
547 track->db_depth_info);
548 return -EINVAL;
549 }
550
551 if (!IS_ALIGNED(pitch, pitch_align)) {
552 dev_warn(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n",
553 __func__, __LINE__, pitch, pitch_align, array_mode);
554 return -EINVAL;
555 }
556 if (!IS_ALIGNED(height, height_align)) {
557 dev_warn(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n",
558 __func__, __LINE__, height, height_align, array_mode);
559 return -EINVAL;
560 }
561 if (!IS_ALIGNED(base_offset, base_align)) {
562 dev_warn(p->dev, "%s offset 0x%llx, 0x%llx, %d not aligned\n", __func__,
563 base_offset, base_align, array_mode);
564 return -EINVAL;
565 }
566
567 ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
568 nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
569 tmp = ntiles * bpe * 64 * nviews;
570 if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) {
571 dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n",
572 array_mode,
573 track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
574 radeon_bo_size(track->db_bo));
575 return -EINVAL;
576 }
577 }
578
579 /* hyperz */
580 if (G_028010_TILE_SURFACE_ENABLE(track->db_depth_info)) {
581 unsigned long size;
582 unsigned nbx, nby;
583
584 if (track->htile_bo == NULL) {
585 dev_warn(p->dev, "%s:%d htile enabled without htile surface 0x%08x\n",
586 __func__, __LINE__, track->db_depth_info);
587 return -EINVAL;
588 }
589 if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) {
590 dev_warn(p->dev, "%s:%d htile can't be enabled with bogus db_depth_size 0x%08x\n",
591 __func__, __LINE__, track->db_depth_size);
592 return -EINVAL;
593 }
594
595 nbx = pitch;
596 nby = height;
597 if (G_028D24_LINEAR(track->htile_surface)) {
598 /* nbx must be 16 htiles aligned == 16 * 8 pixel aligned */
599 nbx = round_up(nbx, 16 * 8);
600 /* nby is npipes htiles aligned == npipes * 8 pixel aligned */
601 nby = round_up(nby, track->npipes * 8);
602 } else {
603 /* htile widht & nby (8 or 4) make 2 bits number */
604 tmp = track->htile_surface & 3;
605 /* align is htile align * 8, htile align vary according to
606 * number of pipe and tile width and nby
607 */
608 switch (track->npipes) {
609 case 8:
610 switch (tmp) {
611 case 3: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
612 nbx = round_up(nbx, 64 * 8);
613 nby = round_up(nby, 64 * 8);
614 break;
615 case 2: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/
616 case 1: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/
617 nbx = round_up(nbx, 64 * 8);
618 nby = round_up(nby, 32 * 8);
619 break;
620 case 0: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/
621 nbx = round_up(nbx, 32 * 8);
622 nby = round_up(nby, 32 * 8);
623 break;
624 default:
625 return -EINVAL;
626 }
627 break;
628 case 4:
629 switch (tmp) {
630 case 3: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
631 nbx = round_up(nbx, 64 * 8);
632 nby = round_up(nby, 32 * 8);
633 break;
634 case 2: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/
635 case 1: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/
636 nbx = round_up(nbx, 32 * 8);
637 nby = round_up(nby, 32 * 8);
638 break;
639 case 0: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/
640 nbx = round_up(nbx, 32 * 8);
641 nby = round_up(nby, 16 * 8);
642 break;
643 default:
644 return -EINVAL;
645 }
646 break;
647 case 2:
648 switch (tmp) {
649 case 3: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
650 nbx = round_up(nbx, 32 * 8);
651 nby = round_up(nby, 32 * 8);
652 break;
653 case 2: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/
654 case 1: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/
655 nbx = round_up(nbx, 32 * 8);
656 nby = round_up(nby, 16 * 8);
657 break;
658 case 0: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/
659 nbx = round_up(nbx, 16 * 8);
660 nby = round_up(nby, 16 * 8);
661 break;
662 default:
663 return -EINVAL;
664 }
665 break;
666 case 1:
667 switch (tmp) {
668 case 3: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
669 nbx = round_up(nbx, 32 * 8);
670 nby = round_up(nby, 16 * 8);
671 break;
672 case 2: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/
673 case 1: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/
674 nbx = round_up(nbx, 16 * 8);
675 nby = round_up(nby, 16 * 8);
676 break;
677 case 0: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/
678 nbx = round_up(nbx, 16 * 8);
679 nby = round_up(nby, 8 * 8);
680 break;
681 default:
682 return -EINVAL;
683 }
684 break;
685 default:
686 dev_warn(p->dev, "%s:%d invalid num pipes %d\n",
687 __func__, __LINE__, track->npipes);
688 return -EINVAL;
689 }
690 }
691 /* compute number of htile */
692 nbx = G_028D24_HTILE_WIDTH(track->htile_surface) ? nbx / 8 : nbx / 4;
693 nby = G_028D24_HTILE_HEIGHT(track->htile_surface) ? nby / 8 : nby / 4;
694 size = nbx * nby * 4;
695 size += track->htile_offset;
696
697 if (size > radeon_bo_size(track->htile_bo)) {
698 dev_warn(p->dev, "%s:%d htile surface too small %ld for %ld (%d %d)\n",
699 __func__, __LINE__, radeon_bo_size(track->htile_bo),
700 size, nbx, nby);
701 return -EINVAL;
702 }
703 }
704
705 track->db_dirty = false;
706 return 0;
707 }
708
r600_cs_track_check(struct radeon_cs_parser * p)709 static int r600_cs_track_check(struct radeon_cs_parser *p)
710 {
711 struct r600_cs_track *track = p->track;
712 u32 tmp;
713 int r, i;
714
715 /* on legacy kernel we don't perform advanced check */
716 if (p->rdev == NULL)
717 return 0;
718
719 /* check streamout */
720 if (track->streamout_dirty && track->vgt_strmout_en) {
721 for (i = 0; i < 4; i++) {
722 if (track->vgt_strmout_buffer_en & (1 << i)) {
723 if (track->vgt_strmout_bo[i]) {
724 u64 offset = (u64)track->vgt_strmout_bo_offset[i] +
725 (u64)track->vgt_strmout_size[i];
726 if (offset > radeon_bo_size(track->vgt_strmout_bo[i])) {
727 DRM_ERROR("streamout %d bo too small: 0x%llx, 0x%lx\n",
728 i, offset,
729 radeon_bo_size(track->vgt_strmout_bo[i]));
730 return -EINVAL;
731 }
732 } else {
733 dev_warn(p->dev, "No buffer for streamout %d\n", i);
734 return -EINVAL;
735 }
736 }
737 }
738 track->streamout_dirty = false;
739 }
740
741 if (track->sx_misc_kill_all_prims)
742 return 0;
743
744 /* check that we have a cb for each enabled target, we don't check
745 * shader_mask because it seems mesa isn't always setting it :(
746 */
747 if (track->cb_dirty) {
748 tmp = track->cb_target_mask;
749 for (i = 0; i < 8; i++) {
750 if ((tmp >> (i * 4)) & 0xF) {
751 /* at least one component is enabled */
752 if (track->cb_color_bo[i] == NULL) {
753 dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
754 __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i);
755 return -EINVAL;
756 }
757 /* perform rewrite of CB_COLOR[0-7]_SIZE */
758 r = r600_cs_track_validate_cb(p, i);
759 if (r)
760 return r;
761 }
762 }
763 track->cb_dirty = false;
764 }
765
766 /* Check depth buffer */
767 if (track->db_dirty && (G_028800_STENCIL_ENABLE(track->db_depth_control) ||
768 G_028800_Z_ENABLE(track->db_depth_control))) {
769 r = r600_cs_track_validate_db(p);
770 if (r)
771 return r;
772 }
773
774 return 0;
775 }
776
777 /**
778 * r600_cs_packet_parse() - parse cp packet and point ib index to next packet
779 * @parser: parser structure holding parsing context.
780 * @pkt: where to store packet informations
781 *
782 * Assume that chunk_ib_index is properly set. Will return -EINVAL
783 * if packet is bigger than remaining ib size. or if packets is unknown.
784 **/
r600_cs_packet_parse(struct radeon_cs_parser * p,struct radeon_cs_packet * pkt,unsigned idx)785 int r600_cs_packet_parse(struct radeon_cs_parser *p,
786 struct radeon_cs_packet *pkt,
787 unsigned idx)
788 {
789 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
790 uint32_t header;
791
792 if (idx >= ib_chunk->length_dw) {
793 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
794 idx, ib_chunk->length_dw);
795 return -EINVAL;
796 }
797 header = radeon_get_ib_value(p, idx);
798 pkt->idx = idx;
799 pkt->type = CP_PACKET_GET_TYPE(header);
800 pkt->count = CP_PACKET_GET_COUNT(header);
801 pkt->one_reg_wr = 0;
802 switch (pkt->type) {
803 case PACKET_TYPE0:
804 pkt->reg = CP_PACKET0_GET_REG(header);
805 break;
806 case PACKET_TYPE3:
807 pkt->opcode = CP_PACKET3_GET_OPCODE(header);
808 break;
809 case PACKET_TYPE2:
810 pkt->count = -1;
811 break;
812 default:
813 DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
814 return -EINVAL;
815 }
816 if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
817 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
818 pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
819 return -EINVAL;
820 }
821 return 0;
822 }
823
824 /**
825 * r600_cs_packet_next_reloc_mm() - parse next packet which should be reloc packet3
826 * @parser: parser structure holding parsing context.
827 * @data: pointer to relocation data
828 * @offset_start: starting offset
829 * @offset_mask: offset mask (to align start offset on)
830 * @reloc: reloc informations
831 *
832 * Check next packet is relocation packet3, do bo validation and compute
833 * GPU offset using the provided start.
834 **/
r600_cs_packet_next_reloc_mm(struct radeon_cs_parser * p,struct radeon_cs_reloc ** cs_reloc)835 static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
836 struct radeon_cs_reloc **cs_reloc)
837 {
838 struct radeon_cs_chunk *relocs_chunk;
839 struct radeon_cs_packet p3reloc;
840 unsigned idx;
841 int r;
842
843 if (p->chunk_relocs_idx == -1) {
844 DRM_ERROR("No relocation chunk !\n");
845 return -EINVAL;
846 }
847 *cs_reloc = NULL;
848 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
849 r = r600_cs_packet_parse(p, &p3reloc, p->idx);
850 if (r) {
851 return r;
852 }
853 p->idx += p3reloc.count + 2;
854 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
855 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
856 p3reloc.idx);
857 return -EINVAL;
858 }
859 idx = radeon_get_ib_value(p, p3reloc.idx + 1);
860 if (idx >= relocs_chunk->length_dw) {
861 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
862 idx, relocs_chunk->length_dw);
863 return -EINVAL;
864 }
865 /* FIXME: we assume reloc size is 4 dwords */
866 *cs_reloc = p->relocs_ptr[(idx / 4)];
867 return 0;
868 }
869
870 /**
871 * r600_cs_packet_next_reloc_nomm() - parse next packet which should be reloc packet3
872 * @parser: parser structure holding parsing context.
873 * @data: pointer to relocation data
874 * @offset_start: starting offset
875 * @offset_mask: offset mask (to align start offset on)
876 * @reloc: reloc informations
877 *
878 * Check next packet is relocation packet3, do bo validation and compute
879 * GPU offset using the provided start.
880 **/
r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser * p,struct radeon_cs_reloc ** cs_reloc)881 static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
882 struct radeon_cs_reloc **cs_reloc)
883 {
884 struct radeon_cs_chunk *relocs_chunk;
885 struct radeon_cs_packet p3reloc;
886 unsigned idx;
887 int r;
888
889 if (p->chunk_relocs_idx == -1) {
890 DRM_ERROR("No relocation chunk !\n");
891 return -EINVAL;
892 }
893 *cs_reloc = NULL;
894 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
895 r = r600_cs_packet_parse(p, &p3reloc, p->idx);
896 if (r) {
897 return r;
898 }
899 p->idx += p3reloc.count + 2;
900 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
901 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
902 p3reloc.idx);
903 return -EINVAL;
904 }
905 idx = radeon_get_ib_value(p, p3reloc.idx + 1);
906 if (idx >= relocs_chunk->length_dw) {
907 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
908 idx, relocs_chunk->length_dw);
909 return -EINVAL;
910 }
911 *cs_reloc = p->relocs;
912 (*cs_reloc)->lobj.gpu_offset = (u64)relocs_chunk->kdata[idx + 3] << 32;
913 (*cs_reloc)->lobj.gpu_offset |= relocs_chunk->kdata[idx + 0];
914 return 0;
915 }
916
917 /**
918 * r600_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc
919 * @parser: parser structure holding parsing context.
920 *
921 * Check next packet is relocation packet3, do bo validation and compute
922 * GPU offset using the provided start.
923 **/
r600_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser * p)924 static int r600_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
925 {
926 struct radeon_cs_packet p3reloc;
927 int r;
928
929 r = r600_cs_packet_parse(p, &p3reloc, p->idx);
930 if (r) {
931 return 0;
932 }
933 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
934 return 0;
935 }
936 return 1;
937 }
938
939 /**
940 * r600_cs_packet_next_vline() - parse userspace VLINE packet
941 * @parser: parser structure holding parsing context.
942 *
943 * Userspace sends a special sequence for VLINE waits.
944 * PACKET0 - VLINE_START_END + value
945 * PACKET3 - WAIT_REG_MEM poll vline status reg
946 * RELOC (P3) - crtc_id in reloc.
947 *
948 * This function parses this and relocates the VLINE START END
949 * and WAIT_REG_MEM packets to the correct crtc.
950 * It also detects a switched off crtc and nulls out the
951 * wait in that case.
952 */
r600_cs_packet_parse_vline(struct radeon_cs_parser * p)953 static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
954 {
955 struct drm_mode_object *obj;
956 struct drm_crtc *crtc;
957 struct radeon_crtc *radeon_crtc;
958 struct radeon_cs_packet p3reloc, wait_reg_mem;
959 int crtc_id;
960 int r;
961 uint32_t header, h_idx, reg, wait_reg_mem_info;
962 volatile uint32_t *ib;
963
964 ib = p->ib->ptr;
965
966 /* parse the WAIT_REG_MEM */
967 r = r600_cs_packet_parse(p, &wait_reg_mem, p->idx);
968 if (r)
969 return r;
970
971 /* check its a WAIT_REG_MEM */
972 if (wait_reg_mem.type != PACKET_TYPE3 ||
973 wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
974 DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
975 return -EINVAL;
976 }
977
978 wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
979 /* bit 4 is reg (0) or mem (1) */
980 if (wait_reg_mem_info & 0x10) {
981 DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n");
982 return -EINVAL;
983 }
984 /* waiting for value to be equal */
985 if ((wait_reg_mem_info & 0x7) != 0x3) {
986 DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
987 return -EINVAL;
988 }
989 if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != AVIVO_D1MODE_VLINE_STATUS) {
990 DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
991 return -EINVAL;
992 }
993
994 if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != AVIVO_D1MODE_VLINE_STAT) {
995 DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
996 return -EINVAL;
997 }
998
999 /* jump over the NOP */
1000 r = r600_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2);
1001 if (r)
1002 return r;
1003
1004 h_idx = p->idx - 2;
1005 p->idx += wait_reg_mem.count + 2;
1006 p->idx += p3reloc.count + 2;
1007
1008 header = radeon_get_ib_value(p, h_idx);
1009 crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
1010 reg = CP_PACKET0_GET_REG(header);
1011
1012 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
1013 if (!obj) {
1014 DRM_ERROR("cannot find crtc %d\n", crtc_id);
1015 return -EINVAL;
1016 }
1017 crtc = obj_to_crtc(obj);
1018 radeon_crtc = to_radeon_crtc(crtc);
1019 crtc_id = radeon_crtc->crtc_id;
1020
1021 if (!crtc->enabled) {
1022 /* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
1023 ib[h_idx + 2] = PACKET2(0);
1024 ib[h_idx + 3] = PACKET2(0);
1025 ib[h_idx + 4] = PACKET2(0);
1026 ib[h_idx + 5] = PACKET2(0);
1027 ib[h_idx + 6] = PACKET2(0);
1028 ib[h_idx + 7] = PACKET2(0);
1029 ib[h_idx + 8] = PACKET2(0);
1030 } else if (crtc_id == 1) {
1031 switch (reg) {
1032 case AVIVO_D1MODE_VLINE_START_END:
1033 header &= ~R600_CP_PACKET0_REG_MASK;
1034 header |= AVIVO_D2MODE_VLINE_START_END >> 2;
1035 break;
1036 default:
1037 DRM_ERROR("unknown crtc reloc\n");
1038 return -EINVAL;
1039 }
1040 ib[h_idx] = header;
1041 ib[h_idx + 4] = AVIVO_D2MODE_VLINE_STATUS >> 2;
1042 }
1043
1044 return 0;
1045 }
1046
r600_packet0_check(struct radeon_cs_parser * p,struct radeon_cs_packet * pkt,unsigned idx,unsigned reg)1047 static int r600_packet0_check(struct radeon_cs_parser *p,
1048 struct radeon_cs_packet *pkt,
1049 unsigned idx, unsigned reg)
1050 {
1051 int r;
1052
1053 switch (reg) {
1054 case AVIVO_D1MODE_VLINE_START_END:
1055 r = r600_cs_packet_parse_vline(p);
1056 if (r) {
1057 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1058 idx, reg);
1059 return r;
1060 }
1061 break;
1062 default:
1063 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
1064 reg, idx);
1065 return -EINVAL;
1066 }
1067 return 0;
1068 }
1069
r600_cs_parse_packet0(struct radeon_cs_parser * p,struct radeon_cs_packet * pkt)1070 static int r600_cs_parse_packet0(struct radeon_cs_parser *p,
1071 struct radeon_cs_packet *pkt)
1072 {
1073 unsigned reg, i;
1074 unsigned idx;
1075 int r;
1076
1077 idx = pkt->idx + 1;
1078 reg = pkt->reg;
1079 for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
1080 r = r600_packet0_check(p, pkt, idx, reg);
1081 if (r) {
1082 return r;
1083 }
1084 }
1085 return 0;
1086 }
1087
1088 /**
1089 * r600_cs_check_reg() - check if register is authorized or not
1090 * @parser: parser structure holding parsing context
1091 * @reg: register we are testing
1092 * @idx: index into the cs buffer
1093 *
1094 * This function will test against r600_reg_safe_bm and return 0
1095 * if register is safe. If register is not flag as safe this function
1096 * will test it against a list of register needind special handling.
1097 */
r600_cs_check_reg(struct radeon_cs_parser * p,u32 reg,u32 idx)1098 static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1099 {
1100 struct r600_cs_track *track = (struct r600_cs_track *)p->track;
1101 struct radeon_cs_reloc *reloc;
1102 u32 m, i, tmp, *ib;
1103 int r;
1104
1105 i = (reg >> 7);
1106 if (i >= ARRAY_SIZE(r600_reg_safe_bm)) {
1107 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1108 return -EINVAL;
1109 }
1110 m = 1 << ((reg >> 2) & 31);
1111 if (!(r600_reg_safe_bm[i] & m))
1112 return 0;
1113 ib = p->ib->ptr;
1114 switch (reg) {
1115 /* force following reg to 0 in an attempt to disable out buffer
1116 * which will need us to better understand how it works to perform
1117 * security check on it (Jerome)
1118 */
1119 case R_0288A8_SQ_ESGS_RING_ITEMSIZE:
1120 case R_008C44_SQ_ESGS_RING_SIZE:
1121 case R_0288B0_SQ_ESTMP_RING_ITEMSIZE:
1122 case R_008C54_SQ_ESTMP_RING_SIZE:
1123 case R_0288C0_SQ_FBUF_RING_ITEMSIZE:
1124 case R_008C74_SQ_FBUF_RING_SIZE:
1125 case R_0288B4_SQ_GSTMP_RING_ITEMSIZE:
1126 case R_008C5C_SQ_GSTMP_RING_SIZE:
1127 case R_0288AC_SQ_GSVS_RING_ITEMSIZE:
1128 case R_008C4C_SQ_GSVS_RING_SIZE:
1129 case R_0288BC_SQ_PSTMP_RING_ITEMSIZE:
1130 case R_008C6C_SQ_PSTMP_RING_SIZE:
1131 case R_0288C4_SQ_REDUC_RING_ITEMSIZE:
1132 case R_008C7C_SQ_REDUC_RING_SIZE:
1133 case R_0288B8_SQ_VSTMP_RING_ITEMSIZE:
1134 case R_008C64_SQ_VSTMP_RING_SIZE:
1135 case R_0288C8_SQ_GS_VERT_ITEMSIZE:
1136 /* get value to populate the IB don't remove */
1137 tmp =radeon_get_ib_value(p, idx);
1138 ib[idx] = 0;
1139 break;
1140 case SQ_CONFIG:
1141 track->sq_config = radeon_get_ib_value(p, idx);
1142 break;
1143 case R_028800_DB_DEPTH_CONTROL:
1144 track->db_depth_control = radeon_get_ib_value(p, idx);
1145 track->db_dirty = true;
1146 break;
1147 case R_028010_DB_DEPTH_INFO:
1148 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
1149 r600_cs_packet_next_is_pkt3_nop(p)) {
1150 r = r600_cs_packet_next_reloc(p, &reloc);
1151 if (r) {
1152 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1153 "0x%04X\n", reg);
1154 return -EINVAL;
1155 }
1156 track->db_depth_info = radeon_get_ib_value(p, idx);
1157 ib[idx] &= C_028010_ARRAY_MODE;
1158 track->db_depth_info &= C_028010_ARRAY_MODE;
1159 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
1160 ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1);
1161 track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1);
1162 } else {
1163 ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1);
1164 track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1);
1165 }
1166 } else {
1167 track->db_depth_info = radeon_get_ib_value(p, idx);
1168 }
1169 track->db_dirty = true;
1170 break;
1171 case R_028004_DB_DEPTH_VIEW:
1172 track->db_depth_view = radeon_get_ib_value(p, idx);
1173 track->db_dirty = true;
1174 break;
1175 case R_028000_DB_DEPTH_SIZE:
1176 track->db_depth_size = radeon_get_ib_value(p, idx);
1177 track->db_depth_size_idx = idx;
1178 track->db_dirty = true;
1179 break;
1180 case R_028AB0_VGT_STRMOUT_EN:
1181 track->vgt_strmout_en = radeon_get_ib_value(p, idx);
1182 track->streamout_dirty = true;
1183 break;
1184 case R_028B20_VGT_STRMOUT_BUFFER_EN:
1185 track->vgt_strmout_buffer_en = radeon_get_ib_value(p, idx);
1186 track->streamout_dirty = true;
1187 break;
1188 case VGT_STRMOUT_BUFFER_BASE_0:
1189 case VGT_STRMOUT_BUFFER_BASE_1:
1190 case VGT_STRMOUT_BUFFER_BASE_2:
1191 case VGT_STRMOUT_BUFFER_BASE_3:
1192 r = r600_cs_packet_next_reloc(p, &reloc);
1193 if (r) {
1194 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1195 "0x%04X\n", reg);
1196 return -EINVAL;
1197 }
1198 tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16;
1199 track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
1200 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1201 track->vgt_strmout_bo[tmp] = reloc->robj;
1202 track->vgt_strmout_bo_mc[tmp] = reloc->lobj.gpu_offset;
1203 track->streamout_dirty = true;
1204 break;
1205 case VGT_STRMOUT_BUFFER_SIZE_0:
1206 case VGT_STRMOUT_BUFFER_SIZE_1:
1207 case VGT_STRMOUT_BUFFER_SIZE_2:
1208 case VGT_STRMOUT_BUFFER_SIZE_3:
1209 tmp = (reg - VGT_STRMOUT_BUFFER_SIZE_0) / 16;
1210 /* size in register is DWs, convert to bytes */
1211 track->vgt_strmout_size[tmp] = radeon_get_ib_value(p, idx) * 4;
1212 track->streamout_dirty = true;
1213 break;
1214 case CP_COHER_BASE:
1215 r = r600_cs_packet_next_reloc(p, &reloc);
1216 if (r) {
1217 dev_warn(p->dev, "missing reloc for CP_COHER_BASE "
1218 "0x%04X\n", reg);
1219 return -EINVAL;
1220 }
1221 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1222 break;
1223 case R_028238_CB_TARGET_MASK:
1224 track->cb_target_mask = radeon_get_ib_value(p, idx);
1225 track->cb_dirty = true;
1226 break;
1227 case R_02823C_CB_SHADER_MASK:
1228 track->cb_shader_mask = radeon_get_ib_value(p, idx);
1229 break;
1230 case R_028C04_PA_SC_AA_CONFIG:
1231 tmp = G_028C04_MSAA_NUM_SAMPLES(radeon_get_ib_value(p, idx));
1232 track->nsamples = 1 << tmp;
1233 track->cb_dirty = true;
1234 break;
1235 case R_0280A0_CB_COLOR0_INFO:
1236 case R_0280A4_CB_COLOR1_INFO:
1237 case R_0280A8_CB_COLOR2_INFO:
1238 case R_0280AC_CB_COLOR3_INFO:
1239 case R_0280B0_CB_COLOR4_INFO:
1240 case R_0280B4_CB_COLOR5_INFO:
1241 case R_0280B8_CB_COLOR6_INFO:
1242 case R_0280BC_CB_COLOR7_INFO:
1243 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
1244 r600_cs_packet_next_is_pkt3_nop(p)) {
1245 r = r600_cs_packet_next_reloc(p, &reloc);
1246 if (r) {
1247 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
1248 return -EINVAL;
1249 }
1250 tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4;
1251 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
1252 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
1253 ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1);
1254 track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1);
1255 } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
1256 ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1);
1257 track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1);
1258 }
1259 } else {
1260 tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4;
1261 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
1262 }
1263 track->cb_dirty = true;
1264 break;
1265 case R_028080_CB_COLOR0_VIEW:
1266 case R_028084_CB_COLOR1_VIEW:
1267 case R_028088_CB_COLOR2_VIEW:
1268 case R_02808C_CB_COLOR3_VIEW:
1269 case R_028090_CB_COLOR4_VIEW:
1270 case R_028094_CB_COLOR5_VIEW:
1271 case R_028098_CB_COLOR6_VIEW:
1272 case R_02809C_CB_COLOR7_VIEW:
1273 tmp = (reg - R_028080_CB_COLOR0_VIEW) / 4;
1274 track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
1275 track->cb_dirty = true;
1276 break;
1277 case R_028060_CB_COLOR0_SIZE:
1278 case R_028064_CB_COLOR1_SIZE:
1279 case R_028068_CB_COLOR2_SIZE:
1280 case R_02806C_CB_COLOR3_SIZE:
1281 case R_028070_CB_COLOR4_SIZE:
1282 case R_028074_CB_COLOR5_SIZE:
1283 case R_028078_CB_COLOR6_SIZE:
1284 case R_02807C_CB_COLOR7_SIZE:
1285 tmp = (reg - R_028060_CB_COLOR0_SIZE) / 4;
1286 track->cb_color_size[tmp] = radeon_get_ib_value(p, idx);
1287 track->cb_color_size_idx[tmp] = idx;
1288 track->cb_dirty = true;
1289 break;
1290 /* This register were added late, there is userspace
1291 * which does provide relocation for those but set
1292 * 0 offset. In order to avoid breaking old userspace
1293 * we detect this and set address to point to last
1294 * CB_COLOR0_BASE, note that if userspace doesn't set
1295 * CB_COLOR0_BASE before this register we will report
1296 * error. Old userspace always set CB_COLOR0_BASE
1297 * before any of this.
1298 */
1299 case R_0280E0_CB_COLOR0_FRAG:
1300 case R_0280E4_CB_COLOR1_FRAG:
1301 case R_0280E8_CB_COLOR2_FRAG:
1302 case R_0280EC_CB_COLOR3_FRAG:
1303 case R_0280F0_CB_COLOR4_FRAG:
1304 case R_0280F4_CB_COLOR5_FRAG:
1305 case R_0280F8_CB_COLOR6_FRAG:
1306 case R_0280FC_CB_COLOR7_FRAG:
1307 tmp = (reg - R_0280E0_CB_COLOR0_FRAG) / 4;
1308 if (!r600_cs_packet_next_is_pkt3_nop(p)) {
1309 if (!track->cb_color_base_last[tmp]) {
1310 dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
1311 return -EINVAL;
1312 }
1313 ib[idx] = track->cb_color_base_last[tmp];
1314 track->cb_color_frag_bo[tmp] = track->cb_color_bo[tmp];
1315 } else {
1316 r = r600_cs_packet_next_reloc(p, &reloc);
1317 if (r) {
1318 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
1319 return -EINVAL;
1320 }
1321 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1322 track->cb_color_frag_bo[tmp] = reloc->robj;
1323 }
1324 break;
1325 case R_0280C0_CB_COLOR0_TILE:
1326 case R_0280C4_CB_COLOR1_TILE:
1327 case R_0280C8_CB_COLOR2_TILE:
1328 case R_0280CC_CB_COLOR3_TILE:
1329 case R_0280D0_CB_COLOR4_TILE:
1330 case R_0280D4_CB_COLOR5_TILE:
1331 case R_0280D8_CB_COLOR6_TILE:
1332 case R_0280DC_CB_COLOR7_TILE:
1333 tmp = (reg - R_0280C0_CB_COLOR0_TILE) / 4;
1334 if (!r600_cs_packet_next_is_pkt3_nop(p)) {
1335 if (!track->cb_color_base_last[tmp]) {
1336 dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
1337 return -EINVAL;
1338 }
1339 ib[idx] = track->cb_color_base_last[tmp];
1340 track->cb_color_tile_bo[tmp] = track->cb_color_bo[tmp];
1341 } else {
1342 r = r600_cs_packet_next_reloc(p, &reloc);
1343 if (r) {
1344 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
1345 return -EINVAL;
1346 }
1347 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1348 track->cb_color_tile_bo[tmp] = reloc->robj;
1349 }
1350 break;
1351 case CB_COLOR0_BASE:
1352 case CB_COLOR1_BASE:
1353 case CB_COLOR2_BASE:
1354 case CB_COLOR3_BASE:
1355 case CB_COLOR4_BASE:
1356 case CB_COLOR5_BASE:
1357 case CB_COLOR6_BASE:
1358 case CB_COLOR7_BASE:
1359 r = r600_cs_packet_next_reloc(p, &reloc);
1360 if (r) {
1361 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1362 "0x%04X\n", reg);
1363 return -EINVAL;
1364 }
1365 tmp = (reg - CB_COLOR0_BASE) / 4;
1366 track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
1367 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1368 track->cb_color_base_last[tmp] = ib[idx];
1369 track->cb_color_bo[tmp] = reloc->robj;
1370 track->cb_color_bo_mc[tmp] = reloc->lobj.gpu_offset;
1371 track->cb_dirty = true;
1372 break;
1373 case DB_DEPTH_BASE:
1374 r = r600_cs_packet_next_reloc(p, &reloc);
1375 if (r) {
1376 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1377 "0x%04X\n", reg);
1378 return -EINVAL;
1379 }
1380 track->db_offset = radeon_get_ib_value(p, idx) << 8;
1381 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1382 track->db_bo = reloc->robj;
1383 track->db_bo_mc = reloc->lobj.gpu_offset;
1384 track->db_dirty = true;
1385 break;
1386 case DB_HTILE_DATA_BASE:
1387 r = r600_cs_packet_next_reloc(p, &reloc);
1388 if (r) {
1389 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1390 "0x%04X\n", reg);
1391 return -EINVAL;
1392 }
1393 track->htile_offset = radeon_get_ib_value(p, idx) << 8;
1394 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1395 track->htile_bo = reloc->robj;
1396 track->db_dirty = true;
1397 break;
1398 case DB_HTILE_SURFACE:
1399 track->htile_surface = radeon_get_ib_value(p, idx);
1400 track->db_dirty = true;
1401 break;
1402 case SQ_PGM_START_FS:
1403 case SQ_PGM_START_ES:
1404 case SQ_PGM_START_VS:
1405 case SQ_PGM_START_GS:
1406 case SQ_PGM_START_PS:
1407 case SQ_ALU_CONST_CACHE_GS_0:
1408 case SQ_ALU_CONST_CACHE_GS_1:
1409 case SQ_ALU_CONST_CACHE_GS_2:
1410 case SQ_ALU_CONST_CACHE_GS_3:
1411 case SQ_ALU_CONST_CACHE_GS_4:
1412 case SQ_ALU_CONST_CACHE_GS_5:
1413 case SQ_ALU_CONST_CACHE_GS_6:
1414 case SQ_ALU_CONST_CACHE_GS_7:
1415 case SQ_ALU_CONST_CACHE_GS_8:
1416 case SQ_ALU_CONST_CACHE_GS_9:
1417 case SQ_ALU_CONST_CACHE_GS_10:
1418 case SQ_ALU_CONST_CACHE_GS_11:
1419 case SQ_ALU_CONST_CACHE_GS_12:
1420 case SQ_ALU_CONST_CACHE_GS_13:
1421 case SQ_ALU_CONST_CACHE_GS_14:
1422 case SQ_ALU_CONST_CACHE_GS_15:
1423 case SQ_ALU_CONST_CACHE_PS_0:
1424 case SQ_ALU_CONST_CACHE_PS_1:
1425 case SQ_ALU_CONST_CACHE_PS_2:
1426 case SQ_ALU_CONST_CACHE_PS_3:
1427 case SQ_ALU_CONST_CACHE_PS_4:
1428 case SQ_ALU_CONST_CACHE_PS_5:
1429 case SQ_ALU_CONST_CACHE_PS_6:
1430 case SQ_ALU_CONST_CACHE_PS_7:
1431 case SQ_ALU_CONST_CACHE_PS_8:
1432 case SQ_ALU_CONST_CACHE_PS_9:
1433 case SQ_ALU_CONST_CACHE_PS_10:
1434 case SQ_ALU_CONST_CACHE_PS_11:
1435 case SQ_ALU_CONST_CACHE_PS_12:
1436 case SQ_ALU_CONST_CACHE_PS_13:
1437 case SQ_ALU_CONST_CACHE_PS_14:
1438 case SQ_ALU_CONST_CACHE_PS_15:
1439 case SQ_ALU_CONST_CACHE_VS_0:
1440 case SQ_ALU_CONST_CACHE_VS_1:
1441 case SQ_ALU_CONST_CACHE_VS_2:
1442 case SQ_ALU_CONST_CACHE_VS_3:
1443 case SQ_ALU_CONST_CACHE_VS_4:
1444 case SQ_ALU_CONST_CACHE_VS_5:
1445 case SQ_ALU_CONST_CACHE_VS_6:
1446 case SQ_ALU_CONST_CACHE_VS_7:
1447 case SQ_ALU_CONST_CACHE_VS_8:
1448 case SQ_ALU_CONST_CACHE_VS_9:
1449 case SQ_ALU_CONST_CACHE_VS_10:
1450 case SQ_ALU_CONST_CACHE_VS_11:
1451 case SQ_ALU_CONST_CACHE_VS_12:
1452 case SQ_ALU_CONST_CACHE_VS_13:
1453 case SQ_ALU_CONST_CACHE_VS_14:
1454 case SQ_ALU_CONST_CACHE_VS_15:
1455 r = r600_cs_packet_next_reloc(p, &reloc);
1456 if (r) {
1457 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1458 "0x%04X\n", reg);
1459 return -EINVAL;
1460 }
1461 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1462 break;
1463 case SX_MEMORY_EXPORT_BASE:
1464 r = r600_cs_packet_next_reloc(p, &reloc);
1465 if (r) {
1466 dev_warn(p->dev, "bad SET_CONFIG_REG "
1467 "0x%04X\n", reg);
1468 return -EINVAL;
1469 }
1470 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1471 break;
1472 case SX_MISC:
1473 track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0;
1474 break;
1475 default:
1476 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1477 return -EINVAL;
1478 }
1479 return 0;
1480 }
1481
r600_mip_minify(unsigned size,unsigned level)1482 unsigned r600_mip_minify(unsigned size, unsigned level)
1483 {
1484 unsigned val;
1485
1486 val = max(1U, size >> level);
1487 if (level > 0)
1488 val = roundup_pow_of_two(val);
1489 return val;
1490 }
1491
r600_texture_size(unsigned nfaces,unsigned blevel,unsigned llevel,unsigned w0,unsigned h0,unsigned d0,unsigned format,unsigned block_align,unsigned height_align,unsigned base_align,unsigned * l0_size,unsigned * mipmap_size)1492 static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel,
1493 unsigned w0, unsigned h0, unsigned d0, unsigned format,
1494 unsigned block_align, unsigned height_align, unsigned base_align,
1495 unsigned *l0_size, unsigned *mipmap_size)
1496 {
1497 unsigned offset, i, level;
1498 unsigned width, height, depth, size;
1499 unsigned blocksize;
1500 unsigned nbx, nby;
1501 unsigned nlevels = llevel - blevel + 1;
1502
1503 *l0_size = -1;
1504 blocksize = r600_fmt_get_blocksize(format);
1505
1506 w0 = r600_mip_minify(w0, 0);
1507 h0 = r600_mip_minify(h0, 0);
1508 d0 = r600_mip_minify(d0, 0);
1509 for(i = 0, offset = 0, level = blevel; i < nlevels; i++, level++) {
1510 width = r600_mip_minify(w0, i);
1511 nbx = r600_fmt_get_nblocksx(format, width);
1512
1513 nbx = round_up(nbx, block_align);
1514
1515 height = r600_mip_minify(h0, i);
1516 nby = r600_fmt_get_nblocksy(format, height);
1517 nby = round_up(nby, height_align);
1518
1519 depth = r600_mip_minify(d0, i);
1520
1521 size = nbx * nby * blocksize;
1522 if (nfaces)
1523 size *= nfaces;
1524 else
1525 size *= depth;
1526
1527 if (i == 0)
1528 *l0_size = size;
1529
1530 if (i == 0 || i == 1)
1531 offset = round_up(offset, base_align);
1532
1533 offset += size;
1534 }
1535 *mipmap_size = offset;
1536 if (llevel == 0)
1537 *mipmap_size = *l0_size;
1538 if (!blevel)
1539 *mipmap_size -= *l0_size;
1540 }
1541
1542 /**
1543 * r600_check_texture_resource() - check if register is authorized or not
1544 * @p: parser structure holding parsing context
1545 * @idx: index into the cs buffer
1546 * @texture: texture's bo structure
1547 * @mipmap: mipmap's bo structure
1548 *
1549 * This function will check that the resource has valid field and that
1550 * the texture and mipmap bo object are big enough to cover this resource.
1551 */
r600_check_texture_resource(struct radeon_cs_parser * p,u32 idx,struct radeon_bo * texture,struct radeon_bo * mipmap,u64 base_offset,u64 mip_offset,u32 tiling_flags)1552 static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
1553 struct radeon_bo *texture,
1554 struct radeon_bo *mipmap,
1555 u64 base_offset,
1556 u64 mip_offset,
1557 u32 tiling_flags)
1558 {
1559 struct r600_cs_track *track = p->track;
1560 u32 nfaces, llevel, blevel, w0, h0, d0;
1561 u32 word0, word1, l0_size, mipmap_size, word2, word3;
1562 u32 height_align, pitch, pitch_align, depth_align;
1563 u32 array, barray, larray;
1564 u64 base_align;
1565 struct array_mode_checker array_check;
1566 u32 format;
1567
1568 /* on legacy kernel we don't perform advanced check */
1569 if (p->rdev == NULL)
1570 return 0;
1571
1572 /* convert to bytes */
1573 base_offset <<= 8;
1574 mip_offset <<= 8;
1575
1576 word0 = radeon_get_ib_value(p, idx + 0);
1577 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1578 if (tiling_flags & RADEON_TILING_MACRO)
1579 word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
1580 else if (tiling_flags & RADEON_TILING_MICRO)
1581 word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
1582 }
1583 word1 = radeon_get_ib_value(p, idx + 1);
1584 w0 = G_038000_TEX_WIDTH(word0) + 1;
1585 h0 = G_038004_TEX_HEIGHT(word1) + 1;
1586 d0 = G_038004_TEX_DEPTH(word1);
1587 nfaces = 1;
1588 array = 0;
1589 switch (G_038000_DIM(word0)) {
1590 case V_038000_SQ_TEX_DIM_1D:
1591 case V_038000_SQ_TEX_DIM_2D:
1592 case V_038000_SQ_TEX_DIM_3D:
1593 break;
1594 case V_038000_SQ_TEX_DIM_CUBEMAP:
1595 if (p->family >= CHIP_RV770)
1596 nfaces = 8;
1597 else
1598 nfaces = 6;
1599 break;
1600 case V_038000_SQ_TEX_DIM_1D_ARRAY:
1601 case V_038000_SQ_TEX_DIM_2D_ARRAY:
1602 array = 1;
1603 break;
1604 case V_038000_SQ_TEX_DIM_2D_MSAA:
1605 case V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA:
1606 default:
1607 dev_warn(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0));
1608 return -EINVAL;
1609 }
1610 format = G_038004_DATA_FORMAT(word1);
1611 if (!r600_fmt_is_valid_texture(format, p->family)) {
1612 dev_warn(p->dev, "%s:%d texture invalid format %d\n",
1613 __func__, __LINE__, format);
1614 return -EINVAL;
1615 }
1616
1617 /* pitch in texels */
1618 pitch = (G_038000_PITCH(word0) + 1) * 8;
1619 array_check.array_mode = G_038000_TILE_MODE(word0);
1620 array_check.group_size = track->group_size;
1621 array_check.nbanks = track->nbanks;
1622 array_check.npipes = track->npipes;
1623 array_check.nsamples = 1;
1624 array_check.blocksize = r600_fmt_get_blocksize(format);
1625 if (r600_get_array_mode_alignment(&array_check,
1626 &pitch_align, &height_align, &depth_align, &base_align)) {
1627 dev_warn(p->dev, "%s:%d tex array mode (%d) invalid\n",
1628 __func__, __LINE__, G_038000_TILE_MODE(word0));
1629 return -EINVAL;
1630 }
1631
1632 /* XXX check height as well... */
1633
1634 if (!IS_ALIGNED(pitch, pitch_align)) {
1635 dev_warn(p->dev, "%s:%d tex pitch (%d, 0x%x, %d) invalid\n",
1636 __func__, __LINE__, pitch, pitch_align, G_038000_TILE_MODE(word0));
1637 return -EINVAL;
1638 }
1639 if (!IS_ALIGNED(base_offset, base_align)) {
1640 dev_warn(p->dev, "%s:%d tex base offset (0x%llx, 0x%llx, %d) invalid\n",
1641 __func__, __LINE__, base_offset, base_align, G_038000_TILE_MODE(word0));
1642 return -EINVAL;
1643 }
1644 if (!IS_ALIGNED(mip_offset, base_align)) {
1645 dev_warn(p->dev, "%s:%d tex mip offset (0x%llx, 0x%llx, %d) invalid\n",
1646 __func__, __LINE__, mip_offset, base_align, G_038000_TILE_MODE(word0));
1647 return -EINVAL;
1648 }
1649
1650 word2 = radeon_get_ib_value(p, idx + 2) << 8;
1651 word3 = radeon_get_ib_value(p, idx + 3) << 8;
1652
1653 word0 = radeon_get_ib_value(p, idx + 4);
1654 word1 = radeon_get_ib_value(p, idx + 5);
1655 blevel = G_038010_BASE_LEVEL(word0);
1656 llevel = G_038014_LAST_LEVEL(word1);
1657 if (blevel > llevel) {
1658 dev_warn(p->dev, "texture blevel %d > llevel %d\n",
1659 blevel, llevel);
1660 }
1661 if (array == 1) {
1662 barray = G_038014_BASE_ARRAY(word1);
1663 larray = G_038014_LAST_ARRAY(word1);
1664
1665 nfaces = larray - barray + 1;
1666 }
1667 r600_texture_size(nfaces, blevel, llevel, w0, h0, d0, format,
1668 pitch_align, height_align, base_align,
1669 &l0_size, &mipmap_size);
1670 /* using get ib will give us the offset into the texture bo */
1671 if ((l0_size + word2) > radeon_bo_size(texture)) {
1672 dev_warn(p->dev, "texture bo too small ((%d %d) (%d %d) %d %d %d -> %d have %ld)\n",
1673 w0, h0, pitch_align, height_align,
1674 array_check.array_mode, format, word2,
1675 l0_size, radeon_bo_size(texture));
1676 dev_warn(p->dev, "alignments %d %d %d %lld\n", pitch, pitch_align, height_align, base_align);
1677 return -EINVAL;
1678 }
1679 /* using get ib will give us the offset into the mipmap bo */
1680 word3 = radeon_get_ib_value(p, idx + 3) << 8;
1681 if ((mipmap_size + word3) > radeon_bo_size(mipmap)) {
1682 /*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n",
1683 w0, h0, format, blevel, nlevels, word3, mipmap_size, radeon_bo_size(texture));*/
1684 }
1685 return 0;
1686 }
1687
r600_is_safe_reg(struct radeon_cs_parser * p,u32 reg,u32 idx)1688 static bool r600_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1689 {
1690 u32 m, i;
1691
1692 i = (reg >> 7);
1693 if (i >= ARRAY_SIZE(r600_reg_safe_bm)) {
1694 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1695 return false;
1696 }
1697 m = 1 << ((reg >> 2) & 31);
1698 if (!(r600_reg_safe_bm[i] & m))
1699 return true;
1700 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1701 return false;
1702 }
1703
r600_packet3_check(struct radeon_cs_parser * p,struct radeon_cs_packet * pkt)1704 static int r600_packet3_check(struct radeon_cs_parser *p,
1705 struct radeon_cs_packet *pkt)
1706 {
1707 struct radeon_cs_reloc *reloc;
1708 struct r600_cs_track *track;
1709 volatile u32 *ib;
1710 unsigned idx;
1711 unsigned i;
1712 unsigned start_reg, end_reg, reg;
1713 int r;
1714 u32 idx_value;
1715
1716 track = (struct r600_cs_track *)p->track;
1717 ib = p->ib->ptr;
1718 idx = pkt->idx + 1;
1719 idx_value = radeon_get_ib_value(p, idx);
1720
1721 switch (pkt->opcode) {
1722 case PACKET3_SET_PREDICATION:
1723 {
1724 int pred_op;
1725 int tmp;
1726 uint64_t offset;
1727
1728 if (pkt->count != 1) {
1729 DRM_ERROR("bad SET PREDICATION\n");
1730 return -EINVAL;
1731 }
1732
1733 tmp = radeon_get_ib_value(p, idx + 1);
1734 pred_op = (tmp >> 16) & 0x7;
1735
1736 /* for the clear predicate operation */
1737 if (pred_op == 0)
1738 return 0;
1739
1740 if (pred_op > 2) {
1741 DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op);
1742 return -EINVAL;
1743 }
1744
1745 r = r600_cs_packet_next_reloc(p, &reloc);
1746 if (r) {
1747 DRM_ERROR("bad SET PREDICATION\n");
1748 return -EINVAL;
1749 }
1750
1751 offset = reloc->lobj.gpu_offset +
1752 (idx_value & 0xfffffff0) +
1753 ((u64)(tmp & 0xff) << 32);
1754
1755 ib[idx + 0] = offset;
1756 ib[idx + 1] = (tmp & 0xffffff00) | (upper_32_bits(offset) & 0xff);
1757 }
1758 break;
1759
1760 case PACKET3_START_3D_CMDBUF:
1761 if (p->family >= CHIP_RV770 || pkt->count) {
1762 DRM_ERROR("bad START_3D\n");
1763 return -EINVAL;
1764 }
1765 break;
1766 case PACKET3_CONTEXT_CONTROL:
1767 if (pkt->count != 1) {
1768 DRM_ERROR("bad CONTEXT_CONTROL\n");
1769 return -EINVAL;
1770 }
1771 break;
1772 case PACKET3_INDEX_TYPE:
1773 case PACKET3_NUM_INSTANCES:
1774 if (pkt->count) {
1775 DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES\n");
1776 return -EINVAL;
1777 }
1778 break;
1779 case PACKET3_DRAW_INDEX:
1780 {
1781 uint64_t offset;
1782 if (pkt->count != 3) {
1783 DRM_ERROR("bad DRAW_INDEX\n");
1784 return -EINVAL;
1785 }
1786 r = r600_cs_packet_next_reloc(p, &reloc);
1787 if (r) {
1788 DRM_ERROR("bad DRAW_INDEX\n");
1789 return -EINVAL;
1790 }
1791
1792 offset = reloc->lobj.gpu_offset +
1793 idx_value +
1794 ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
1795
1796 ib[idx+0] = offset;
1797 ib[idx+1] = upper_32_bits(offset) & 0xff;
1798
1799 r = r600_cs_track_check(p);
1800 if (r) {
1801 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1802 return r;
1803 }
1804 break;
1805 }
1806 case PACKET3_DRAW_INDEX_AUTO:
1807 if (pkt->count != 1) {
1808 DRM_ERROR("bad DRAW_INDEX_AUTO\n");
1809 return -EINVAL;
1810 }
1811 r = r600_cs_track_check(p);
1812 if (r) {
1813 dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
1814 return r;
1815 }
1816 break;
1817 case PACKET3_DRAW_INDEX_IMMD_BE:
1818 case PACKET3_DRAW_INDEX_IMMD:
1819 if (pkt->count < 2) {
1820 DRM_ERROR("bad DRAW_INDEX_IMMD\n");
1821 return -EINVAL;
1822 }
1823 r = r600_cs_track_check(p);
1824 if (r) {
1825 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1826 return r;
1827 }
1828 break;
1829 case PACKET3_WAIT_REG_MEM:
1830 if (pkt->count != 5) {
1831 DRM_ERROR("bad WAIT_REG_MEM\n");
1832 return -EINVAL;
1833 }
1834 /* bit 4 is reg (0) or mem (1) */
1835 if (idx_value & 0x10) {
1836 uint64_t offset;
1837
1838 r = r600_cs_packet_next_reloc(p, &reloc);
1839 if (r) {
1840 DRM_ERROR("bad WAIT_REG_MEM\n");
1841 return -EINVAL;
1842 }
1843
1844 offset = reloc->lobj.gpu_offset +
1845 (radeon_get_ib_value(p, idx+1) & 0xfffffff0) +
1846 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
1847
1848 ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffff0);
1849 ib[idx+2] = upper_32_bits(offset) & 0xff;
1850 }
1851 break;
1852 case PACKET3_SURFACE_SYNC:
1853 if (pkt->count != 3) {
1854 DRM_ERROR("bad SURFACE_SYNC\n");
1855 return -EINVAL;
1856 }
1857 /* 0xffffffff/0x0 is flush all cache flag */
1858 if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
1859 radeon_get_ib_value(p, idx + 2) != 0) {
1860 r = r600_cs_packet_next_reloc(p, &reloc);
1861 if (r) {
1862 DRM_ERROR("bad SURFACE_SYNC\n");
1863 return -EINVAL;
1864 }
1865 ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1866 }
1867 break;
1868 case PACKET3_EVENT_WRITE:
1869 if (pkt->count != 2 && pkt->count != 0) {
1870 DRM_ERROR("bad EVENT_WRITE\n");
1871 return -EINVAL;
1872 }
1873 if (pkt->count) {
1874 uint64_t offset;
1875
1876 r = r600_cs_packet_next_reloc(p, &reloc);
1877 if (r) {
1878 DRM_ERROR("bad EVENT_WRITE\n");
1879 return -EINVAL;
1880 }
1881 offset = reloc->lobj.gpu_offset +
1882 (radeon_get_ib_value(p, idx+1) & 0xfffffff8) +
1883 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
1884
1885 ib[idx+1] = offset & 0xfffffff8;
1886 ib[idx+2] = upper_32_bits(offset) & 0xff;
1887 }
1888 break;
1889 case PACKET3_EVENT_WRITE_EOP:
1890 {
1891 uint64_t offset;
1892
1893 if (pkt->count != 4) {
1894 DRM_ERROR("bad EVENT_WRITE_EOP\n");
1895 return -EINVAL;
1896 }
1897 r = r600_cs_packet_next_reloc(p, &reloc);
1898 if (r) {
1899 DRM_ERROR("bad EVENT_WRITE\n");
1900 return -EINVAL;
1901 }
1902
1903 offset = reloc->lobj.gpu_offset +
1904 (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
1905 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
1906
1907 ib[idx+1] = offset & 0xfffffffc;
1908 ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
1909 break;
1910 }
1911 case PACKET3_SET_CONFIG_REG:
1912 start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_OFFSET;
1913 end_reg = 4 * pkt->count + start_reg - 4;
1914 if ((start_reg < PACKET3_SET_CONFIG_REG_OFFSET) ||
1915 (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
1916 (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
1917 DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
1918 return -EINVAL;
1919 }
1920 for (i = 0; i < pkt->count; i++) {
1921 reg = start_reg + (4 * i);
1922 r = r600_cs_check_reg(p, reg, idx+1+i);
1923 if (r)
1924 return r;
1925 }
1926 break;
1927 case PACKET3_SET_CONTEXT_REG:
1928 start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_OFFSET;
1929 end_reg = 4 * pkt->count + start_reg - 4;
1930 if ((start_reg < PACKET3_SET_CONTEXT_REG_OFFSET) ||
1931 (start_reg >= PACKET3_SET_CONTEXT_REG_END) ||
1932 (end_reg >= PACKET3_SET_CONTEXT_REG_END)) {
1933 DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
1934 return -EINVAL;
1935 }
1936 for (i = 0; i < pkt->count; i++) {
1937 reg = start_reg + (4 * i);
1938 r = r600_cs_check_reg(p, reg, idx+1+i);
1939 if (r)
1940 return r;
1941 }
1942 break;
1943 case PACKET3_SET_RESOURCE:
1944 if (pkt->count % 7) {
1945 DRM_ERROR("bad SET_RESOURCE\n");
1946 return -EINVAL;
1947 }
1948 start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_OFFSET;
1949 end_reg = 4 * pkt->count + start_reg - 4;
1950 if ((start_reg < PACKET3_SET_RESOURCE_OFFSET) ||
1951 (start_reg >= PACKET3_SET_RESOURCE_END) ||
1952 (end_reg >= PACKET3_SET_RESOURCE_END)) {
1953 DRM_ERROR("bad SET_RESOURCE\n");
1954 return -EINVAL;
1955 }
1956 for (i = 0; i < (pkt->count / 7); i++) {
1957 struct radeon_bo *texture, *mipmap;
1958 u32 size, offset, base_offset, mip_offset;
1959
1960 switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p, idx+(i*7)+6+1))) {
1961 case SQ_TEX_VTX_VALID_TEXTURE:
1962 /* tex base */
1963 r = r600_cs_packet_next_reloc(p, &reloc);
1964 if (r) {
1965 DRM_ERROR("bad SET_RESOURCE\n");
1966 return -EINVAL;
1967 }
1968 base_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1969 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1970 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
1971 ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
1972 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
1973 ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
1974 }
1975 texture = reloc->robj;
1976 /* tex mip base */
1977 r = r600_cs_packet_next_reloc(p, &reloc);
1978 if (r) {
1979 DRM_ERROR("bad SET_RESOURCE\n");
1980 return -EINVAL;
1981 }
1982 mip_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1983 mipmap = reloc->robj;
1984 r = r600_check_texture_resource(p, idx+(i*7)+1,
1985 texture, mipmap,
1986 base_offset + radeon_get_ib_value(p, idx+1+(i*7)+2),
1987 mip_offset + radeon_get_ib_value(p, idx+1+(i*7)+3),
1988 reloc->lobj.tiling_flags);
1989 if (r)
1990 return r;
1991 ib[idx+1+(i*7)+2] += base_offset;
1992 ib[idx+1+(i*7)+3] += mip_offset;
1993 break;
1994 case SQ_TEX_VTX_VALID_BUFFER:
1995 {
1996 uint64_t offset64;
1997 /* vtx base */
1998 r = r600_cs_packet_next_reloc(p, &reloc);
1999 if (r) {
2000 DRM_ERROR("bad SET_RESOURCE\n");
2001 return -EINVAL;
2002 }
2003 offset = radeon_get_ib_value(p, idx+1+(i*7)+0);
2004 size = radeon_get_ib_value(p, idx+1+(i*7)+1) + 1;
2005 if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
2006 /* force size to size of the buffer */
2007 dev_warn(p->dev, "vbo resource seems too big (%d) for the bo (%ld)\n",
2008 size + offset, radeon_bo_size(reloc->robj));
2009 ib[idx+1+(i*7)+1] = radeon_bo_size(reloc->robj) - offset;
2010 }
2011
2012 offset64 = reloc->lobj.gpu_offset + offset;
2013 ib[idx+1+(i*8)+0] = offset64;
2014 ib[idx+1+(i*8)+2] = (ib[idx+1+(i*8)+2] & 0xffffff00) |
2015 (upper_32_bits(offset64) & 0xff);
2016 break;
2017 }
2018 case SQ_TEX_VTX_INVALID_TEXTURE:
2019 case SQ_TEX_VTX_INVALID_BUFFER:
2020 default:
2021 DRM_ERROR("bad SET_RESOURCE\n");
2022 return -EINVAL;
2023 }
2024 }
2025 break;
2026 case PACKET3_SET_ALU_CONST:
2027 if (track->sq_config & DX9_CONSTS) {
2028 start_reg = (idx_value << 2) + PACKET3_SET_ALU_CONST_OFFSET;
2029 end_reg = 4 * pkt->count + start_reg - 4;
2030 if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) ||
2031 (start_reg >= PACKET3_SET_ALU_CONST_END) ||
2032 (end_reg >= PACKET3_SET_ALU_CONST_END)) {
2033 DRM_ERROR("bad SET_ALU_CONST\n");
2034 return -EINVAL;
2035 }
2036 }
2037 break;
2038 case PACKET3_SET_BOOL_CONST:
2039 start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_OFFSET;
2040 end_reg = 4 * pkt->count + start_reg - 4;
2041 if ((start_reg < PACKET3_SET_BOOL_CONST_OFFSET) ||
2042 (start_reg >= PACKET3_SET_BOOL_CONST_END) ||
2043 (end_reg >= PACKET3_SET_BOOL_CONST_END)) {
2044 DRM_ERROR("bad SET_BOOL_CONST\n");
2045 return -EINVAL;
2046 }
2047 break;
2048 case PACKET3_SET_LOOP_CONST:
2049 start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_OFFSET;
2050 end_reg = 4 * pkt->count + start_reg - 4;
2051 if ((start_reg < PACKET3_SET_LOOP_CONST_OFFSET) ||
2052 (start_reg >= PACKET3_SET_LOOP_CONST_END) ||
2053 (end_reg >= PACKET3_SET_LOOP_CONST_END)) {
2054 DRM_ERROR("bad SET_LOOP_CONST\n");
2055 return -EINVAL;
2056 }
2057 break;
2058 case PACKET3_SET_CTL_CONST:
2059 start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_OFFSET;
2060 end_reg = 4 * pkt->count + start_reg - 4;
2061 if ((start_reg < PACKET3_SET_CTL_CONST_OFFSET) ||
2062 (start_reg >= PACKET3_SET_CTL_CONST_END) ||
2063 (end_reg >= PACKET3_SET_CTL_CONST_END)) {
2064 DRM_ERROR("bad SET_CTL_CONST\n");
2065 return -EINVAL;
2066 }
2067 break;
2068 case PACKET3_SET_SAMPLER:
2069 if (pkt->count % 3) {
2070 DRM_ERROR("bad SET_SAMPLER\n");
2071 return -EINVAL;
2072 }
2073 start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_OFFSET;
2074 end_reg = 4 * pkt->count + start_reg - 4;
2075 if ((start_reg < PACKET3_SET_SAMPLER_OFFSET) ||
2076 (start_reg >= PACKET3_SET_SAMPLER_END) ||
2077 (end_reg >= PACKET3_SET_SAMPLER_END)) {
2078 DRM_ERROR("bad SET_SAMPLER\n");
2079 return -EINVAL;
2080 }
2081 break;
2082 case PACKET3_SURFACE_BASE_UPDATE:
2083 if (p->family >= CHIP_RV770 || p->family == CHIP_R600) {
2084 DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
2085 return -EINVAL;
2086 }
2087 if (pkt->count) {
2088 DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
2089 return -EINVAL;
2090 }
2091 break;
2092 case PACKET3_STRMOUT_BUFFER_UPDATE:
2093 if (pkt->count != 4) {
2094 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (invalid count)\n");
2095 return -EINVAL;
2096 }
2097 /* Updating memory at DST_ADDRESS. */
2098 if (idx_value & 0x1) {
2099 u64 offset;
2100 r = r600_cs_packet_next_reloc(p, &reloc);
2101 if (r) {
2102 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n");
2103 return -EINVAL;
2104 }
2105 offset = radeon_get_ib_value(p, idx+1);
2106 offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
2107 if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2108 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%llx, 0x%lx\n",
2109 offset + 4, radeon_bo_size(reloc->robj));
2110 return -EINVAL;
2111 }
2112 offset += reloc->lobj.gpu_offset;
2113 ib[idx+1] = offset;
2114 ib[idx+2] = upper_32_bits(offset) & 0xff;
2115 }
2116 /* Reading data from SRC_ADDRESS. */
2117 if (((idx_value >> 1) & 0x3) == 2) {
2118 u64 offset;
2119 r = r600_cs_packet_next_reloc(p, &reloc);
2120 if (r) {
2121 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n");
2122 return -EINVAL;
2123 }
2124 offset = radeon_get_ib_value(p, idx+3);
2125 offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
2126 if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2127 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%llx, 0x%lx\n",
2128 offset + 4, radeon_bo_size(reloc->robj));
2129 return -EINVAL;
2130 }
2131 offset += reloc->lobj.gpu_offset;
2132 ib[idx+3] = offset;
2133 ib[idx+4] = upper_32_bits(offset) & 0xff;
2134 }
2135 break;
2136 case PACKET3_COPY_DW:
2137 if (pkt->count != 4) {
2138 DRM_ERROR("bad COPY_DW (invalid count)\n");
2139 return -EINVAL;
2140 }
2141 if (idx_value & 0x1) {
2142 u64 offset;
2143 /* SRC is memory. */
2144 r = r600_cs_packet_next_reloc(p, &reloc);
2145 if (r) {
2146 DRM_ERROR("bad COPY_DW (missing src reloc)\n");
2147 return -EINVAL;
2148 }
2149 offset = radeon_get_ib_value(p, idx+1);
2150 offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
2151 if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2152 DRM_ERROR("bad COPY_DW src bo too small: 0x%llx, 0x%lx\n",
2153 offset + 4, radeon_bo_size(reloc->robj));
2154 return -EINVAL;
2155 }
2156 offset += reloc->lobj.gpu_offset;
2157 ib[idx+1] = offset;
2158 ib[idx+2] = upper_32_bits(offset) & 0xff;
2159 } else {
2160 /* SRC is a reg. */
2161 reg = radeon_get_ib_value(p, idx+1) << 2;
2162 if (!r600_is_safe_reg(p, reg, idx+1))
2163 return -EINVAL;
2164 }
2165 if (idx_value & 0x2) {
2166 u64 offset;
2167 /* DST is memory. */
2168 r = r600_cs_packet_next_reloc(p, &reloc);
2169 if (r) {
2170 DRM_ERROR("bad COPY_DW (missing dst reloc)\n");
2171 return -EINVAL;
2172 }
2173 offset = radeon_get_ib_value(p, idx+3);
2174 offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
2175 if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2176 DRM_ERROR("bad COPY_DW dst bo too small: 0x%llx, 0x%lx\n",
2177 offset + 4, radeon_bo_size(reloc->robj));
2178 return -EINVAL;
2179 }
2180 offset += reloc->lobj.gpu_offset;
2181 ib[idx+3] = offset;
2182 ib[idx+4] = upper_32_bits(offset) & 0xff;
2183 } else {
2184 /* DST is a reg. */
2185 reg = radeon_get_ib_value(p, idx+3) << 2;
2186 if (!r600_is_safe_reg(p, reg, idx+3))
2187 return -EINVAL;
2188 }
2189 break;
2190 case PACKET3_NOP:
2191 break;
2192 default:
2193 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
2194 return -EINVAL;
2195 }
2196 return 0;
2197 }
2198
r600_cs_parse(struct radeon_cs_parser * p)2199 int r600_cs_parse(struct radeon_cs_parser *p)
2200 {
2201 struct radeon_cs_packet pkt;
2202 struct r600_cs_track *track;
2203 int r;
2204
2205 if (p->track == NULL) {
2206 /* initialize tracker, we are in kms */
2207 track = kzalloc(sizeof(*track), GFP_KERNEL);
2208 if (track == NULL)
2209 return -ENOMEM;
2210 r600_cs_track_init(track);
2211 if (p->rdev->family < CHIP_RV770) {
2212 track->npipes = p->rdev->config.r600.tiling_npipes;
2213 track->nbanks = p->rdev->config.r600.tiling_nbanks;
2214 track->group_size = p->rdev->config.r600.tiling_group_size;
2215 } else if (p->rdev->family <= CHIP_RV740) {
2216 track->npipes = p->rdev->config.rv770.tiling_npipes;
2217 track->nbanks = p->rdev->config.rv770.tiling_nbanks;
2218 track->group_size = p->rdev->config.rv770.tiling_group_size;
2219 }
2220 p->track = track;
2221 }
2222 do {
2223 r = r600_cs_packet_parse(p, &pkt, p->idx);
2224 if (r) {
2225 kfree(p->track);
2226 p->track = NULL;
2227 return r;
2228 }
2229 p->idx += pkt.count + 2;
2230 switch (pkt.type) {
2231 case PACKET_TYPE0:
2232 r = r600_cs_parse_packet0(p, &pkt);
2233 break;
2234 case PACKET_TYPE2:
2235 break;
2236 case PACKET_TYPE3:
2237 r = r600_packet3_check(p, &pkt);
2238 break;
2239 default:
2240 DRM_ERROR("Unknown packet type %d !\n", pkt.type);
2241 kfree(p->track);
2242 p->track = NULL;
2243 return -EINVAL;
2244 }
2245 if (r) {
2246 kfree(p->track);
2247 p->track = NULL;
2248 return r;
2249 }
2250 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
2251 #if 0
2252 for (r = 0; r < p->ib->length_dw; r++) {
2253 printk(KERN_INFO "%05d 0x%08X\n", r, p->ib->ptr[r]);
2254 mdelay(1);
2255 }
2256 #endif
2257 kfree(p->track);
2258 p->track = NULL;
2259 return 0;
2260 }
2261
r600_cs_parser_relocs_legacy(struct radeon_cs_parser * p)2262 static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p)
2263 {
2264 if (p->chunk_relocs_idx == -1) {
2265 return 0;
2266 }
2267 p->relocs = kzalloc(sizeof(struct radeon_cs_reloc), GFP_KERNEL);
2268 if (p->relocs == NULL) {
2269 return -ENOMEM;
2270 }
2271 return 0;
2272 }
2273
2274 /**
2275 * cs_parser_fini() - clean parser states
2276 * @parser: parser structure holding parsing context.
2277 * @error: error number
2278 *
2279 * If error is set than unvalidate buffer, otherwise just free memory
2280 * used by parsing context.
2281 **/
r600_cs_parser_fini(struct radeon_cs_parser * parser,int error)2282 static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error)
2283 {
2284 unsigned i;
2285
2286 kfree(parser->relocs);
2287 for (i = 0; i < parser->nchunks; i++) {
2288 kfree(parser->chunks[i].kdata);
2289 kfree(parser->chunks[i].kpage[0]);
2290 kfree(parser->chunks[i].kpage[1]);
2291 }
2292 kfree(parser->chunks);
2293 kfree(parser->chunks_array);
2294 }
2295
r600_cs_legacy(struct drm_device * dev,void * data,struct drm_file * filp,unsigned family,u32 * ib,int * l)2296 int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
2297 unsigned family, u32 *ib, int *l)
2298 {
2299 struct radeon_cs_parser parser;
2300 struct radeon_cs_chunk *ib_chunk;
2301 struct radeon_ib fake_ib;
2302 struct r600_cs_track *track;
2303 int r;
2304
2305 /* initialize tracker */
2306 track = kzalloc(sizeof(*track), GFP_KERNEL);
2307 if (track == NULL)
2308 return -ENOMEM;
2309 r600_cs_track_init(track);
2310 r600_cs_legacy_get_tiling_conf(dev, &track->npipes, &track->nbanks, &track->group_size);
2311 /* initialize parser */
2312 memset(&parser, 0, sizeof(struct radeon_cs_parser));
2313 parser.filp = filp;
2314 parser.dev = &dev->pdev->dev;
2315 parser.rdev = NULL;
2316 parser.family = family;
2317 parser.ib = &fake_ib;
2318 parser.track = track;
2319 fake_ib.ptr = ib;
2320 r = radeon_cs_parser_init(&parser, data);
2321 if (r) {
2322 DRM_ERROR("Failed to initialize parser !\n");
2323 r600_cs_parser_fini(&parser, r);
2324 return r;
2325 }
2326 r = r600_cs_parser_relocs_legacy(&parser);
2327 if (r) {
2328 DRM_ERROR("Failed to parse relocation !\n");
2329 r600_cs_parser_fini(&parser, r);
2330 return r;
2331 }
2332 /* Copy the packet into the IB, the parser will read from the
2333 * input memory (cached) and write to the IB (which can be
2334 * uncached). */
2335 ib_chunk = &parser.chunks[parser.chunk_ib_idx];
2336 parser.ib->length_dw = ib_chunk->length_dw;
2337 *l = parser.ib->length_dw;
2338 r = r600_cs_parse(&parser);
2339 if (r) {
2340 DRM_ERROR("Invalid command stream !\n");
2341 r600_cs_parser_fini(&parser, r);
2342 return r;
2343 }
2344 r = radeon_cs_finish_pages(&parser);
2345 if (r) {
2346 DRM_ERROR("Invalid command stream !\n");
2347 r600_cs_parser_fini(&parser, r);
2348 return r;
2349 }
2350 r600_cs_parser_fini(&parser, r);
2351 return r;
2352 }
2353
r600_cs_legacy_init(void)2354 void r600_cs_legacy_init(void)
2355 {
2356 r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_nomm;
2357 }
2358