1 /*
2 * Copyright © 2015-2019 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 /** @file elk_eu_validate.c
25 *
26 * This file implements a pass that validates shader assembly.
27 *
28 * The restrictions implemented herein are intended to verify that instructions
29 * in shader assembly do not violate restrictions documented in the graphics
30 * programming reference manuals.
31 *
32 * The restrictions are difficult for humans to quickly verify due to their
33 * complexity and abundance.
34 *
35 * It is critical that this code is thoroughly unit tested because false
36 * results will lead developers astray, which is worse than having no validator
37 * at all. Functional changes to this file without corresponding unit tests (in
38 * test_eu_validate.cpp) will be rejected.
39 */
40
41 #include <stdlib.h>
42 #include "elk_eu.h"
43 #include "elk_disasm_info.h"
44
45 /* We're going to do lots of string concatenation, so this should help. */
46 struct string {
47 char *str;
48 size_t len;
49 };
50
51 static void
cat(struct string * dest,const struct string src)52 cat(struct string *dest, const struct string src)
53 {
54 dest->str = realloc(dest->str, dest->len + src.len + 1);
55 memcpy(dest->str + dest->len, src.str, src.len);
56 dest->str[dest->len + src.len] = '\0';
57 dest->len = dest->len + src.len;
58 }
59 #define CAT(dest, src) cat(&dest, (struct string){src, strlen(src)})
60
61 static bool
contains(const struct string haystack,const struct string needle)62 contains(const struct string haystack, const struct string needle)
63 {
64 return haystack.str && memmem(haystack.str, haystack.len,
65 needle.str, needle.len) != NULL;
66 }
67 #define CONTAINS(haystack, needle) \
68 contains(haystack, (struct string){needle, strlen(needle)})
69
70 #define error(str) "\tERROR: " str "\n"
71 #define ERROR_INDENT "\t "
72
73 #define ERROR(msg) ERROR_IF(true, msg)
74 #define ERROR_IF(cond, msg) \
75 do { \
76 if ((cond) && !CONTAINS(error_msg, error(msg))) { \
77 CAT(error_msg, error(msg)); \
78 } \
79 } while(0)
80
81 #define CHECK(func, args...) \
82 do { \
83 struct string __msg = func(isa, inst, ##args); \
84 if (__msg.str) { \
85 cat(&error_msg, __msg); \
86 free(__msg.str); \
87 } \
88 } while (0)
89
90 #define STRIDE(stride) (stride != 0 ? 1 << ((stride) - 1) : 0)
91 #define WIDTH(width) (1 << (width))
92
93 static bool
inst_is_send(const struct elk_isa_info * isa,const elk_inst * inst)94 inst_is_send(const struct elk_isa_info *isa, const elk_inst *inst)
95 {
96 switch (elk_inst_opcode(isa, inst)) {
97 case ELK_OPCODE_SEND:
98 case ELK_OPCODE_SENDC:
99 case ELK_OPCODE_SENDS:
100 case ELK_OPCODE_SENDSC:
101 return true;
102 default:
103 return false;
104 }
105 }
106
107 static bool
inst_is_split_send(const struct elk_isa_info * isa,const elk_inst * inst)108 inst_is_split_send(const struct elk_isa_info *isa, const elk_inst *inst)
109 {
110 const struct intel_device_info *devinfo = isa->devinfo;
111
112 if (devinfo->ver >= 12) {
113 return inst_is_send(isa, inst);
114 } else {
115 switch (elk_inst_opcode(isa, inst)) {
116 case ELK_OPCODE_SENDS:
117 case ELK_OPCODE_SENDSC:
118 return true;
119 default:
120 return false;
121 }
122 }
123 }
124
125 static unsigned
signed_type(unsigned type)126 signed_type(unsigned type)
127 {
128 switch (type) {
129 case ELK_REGISTER_TYPE_UD: return ELK_REGISTER_TYPE_D;
130 case ELK_REGISTER_TYPE_UW: return ELK_REGISTER_TYPE_W;
131 case ELK_REGISTER_TYPE_UB: return ELK_REGISTER_TYPE_B;
132 case ELK_REGISTER_TYPE_UQ: return ELK_REGISTER_TYPE_Q;
133 default: return type;
134 }
135 }
136
137 static enum elk_reg_type
inst_dst_type(const struct elk_isa_info * isa,const elk_inst * inst)138 inst_dst_type(const struct elk_isa_info *isa, const elk_inst *inst)
139 {
140 const struct intel_device_info *devinfo = isa->devinfo;
141
142 return (devinfo->ver < 12 || !inst_is_send(isa, inst)) ?
143 elk_inst_dst_type(devinfo, inst) : ELK_REGISTER_TYPE_D;
144 }
145
146 static bool
inst_is_raw_move(const struct elk_isa_info * isa,const elk_inst * inst)147 inst_is_raw_move(const struct elk_isa_info *isa, const elk_inst *inst)
148 {
149 const struct intel_device_info *devinfo = isa->devinfo;
150
151 unsigned dst_type = signed_type(inst_dst_type(isa, inst));
152 unsigned src_type = signed_type(elk_inst_src0_type(devinfo, inst));
153
154 if (elk_inst_src0_reg_file(devinfo, inst) == ELK_IMMEDIATE_VALUE) {
155 /* FIXME: not strictly true */
156 if (elk_inst_src0_type(devinfo, inst) == ELK_REGISTER_TYPE_VF ||
157 elk_inst_src0_type(devinfo, inst) == ELK_REGISTER_TYPE_UV ||
158 elk_inst_src0_type(devinfo, inst) == ELK_REGISTER_TYPE_V) {
159 return false;
160 }
161 } else if (elk_inst_src0_negate(devinfo, inst) ||
162 elk_inst_src0_abs(devinfo, inst)) {
163 return false;
164 }
165
166 return elk_inst_opcode(isa, inst) == ELK_OPCODE_MOV &&
167 elk_inst_saturate(devinfo, inst) == 0 &&
168 dst_type == src_type;
169 }
170
171 static bool
dst_is_null(const struct intel_device_info * devinfo,const elk_inst * inst)172 dst_is_null(const struct intel_device_info *devinfo, const elk_inst *inst)
173 {
174 return elk_inst_dst_reg_file(devinfo, inst) == ELK_ARCHITECTURE_REGISTER_FILE &&
175 elk_inst_dst_da_reg_nr(devinfo, inst) == ELK_ARF_NULL;
176 }
177
178 static bool
src0_is_null(const struct intel_device_info * devinfo,const elk_inst * inst)179 src0_is_null(const struct intel_device_info *devinfo, const elk_inst *inst)
180 {
181 return elk_inst_src0_address_mode(devinfo, inst) == ELK_ADDRESS_DIRECT &&
182 elk_inst_src0_reg_file(devinfo, inst) == ELK_ARCHITECTURE_REGISTER_FILE &&
183 elk_inst_src0_da_reg_nr(devinfo, inst) == ELK_ARF_NULL;
184 }
185
186 static bool
src1_is_null(const struct intel_device_info * devinfo,const elk_inst * inst)187 src1_is_null(const struct intel_device_info *devinfo, const elk_inst *inst)
188 {
189 return elk_inst_src1_reg_file(devinfo, inst) == ELK_ARCHITECTURE_REGISTER_FILE &&
190 elk_inst_src1_da_reg_nr(devinfo, inst) == ELK_ARF_NULL;
191 }
192
193 static bool
src0_is_acc(const struct intel_device_info * devinfo,const elk_inst * inst)194 src0_is_acc(const struct intel_device_info *devinfo, const elk_inst *inst)
195 {
196 return elk_inst_src0_reg_file(devinfo, inst) == ELK_ARCHITECTURE_REGISTER_FILE &&
197 (elk_inst_src0_da_reg_nr(devinfo, inst) & 0xF0) == ELK_ARF_ACCUMULATOR;
198 }
199
200 static bool
src1_is_acc(const struct intel_device_info * devinfo,const elk_inst * inst)201 src1_is_acc(const struct intel_device_info *devinfo, const elk_inst *inst)
202 {
203 return elk_inst_src1_reg_file(devinfo, inst) == ELK_ARCHITECTURE_REGISTER_FILE &&
204 (elk_inst_src1_da_reg_nr(devinfo, inst) & 0xF0) == ELK_ARF_ACCUMULATOR;
205 }
206
207 static bool
src0_has_scalar_region(const struct intel_device_info * devinfo,const elk_inst * inst)208 src0_has_scalar_region(const struct intel_device_info *devinfo,
209 const elk_inst *inst)
210 {
211 return elk_inst_src0_vstride(devinfo, inst) == ELK_VERTICAL_STRIDE_0 &&
212 elk_inst_src0_width(devinfo, inst) == ELK_WIDTH_1 &&
213 elk_inst_src0_hstride(devinfo, inst) == ELK_HORIZONTAL_STRIDE_0;
214 }
215
216 static bool
src1_has_scalar_region(const struct intel_device_info * devinfo,const elk_inst * inst)217 src1_has_scalar_region(const struct intel_device_info *devinfo,
218 const elk_inst *inst)
219 {
220 return elk_inst_src1_vstride(devinfo, inst) == ELK_VERTICAL_STRIDE_0 &&
221 elk_inst_src1_width(devinfo, inst) == ELK_WIDTH_1 &&
222 elk_inst_src1_hstride(devinfo, inst) == ELK_HORIZONTAL_STRIDE_0;
223 }
224
225 static struct string
invalid_values(const struct elk_isa_info * isa,const elk_inst * inst)226 invalid_values(const struct elk_isa_info *isa, const elk_inst *inst)
227 {
228 const struct intel_device_info *devinfo = isa->devinfo;
229
230 unsigned num_sources = elk_num_sources_from_inst(isa, inst);
231 struct string error_msg = { .str = NULL, .len = 0 };
232
233 switch ((enum elk_execution_size) elk_inst_exec_size(devinfo, inst)) {
234 case ELK_EXECUTE_1:
235 case ELK_EXECUTE_2:
236 case ELK_EXECUTE_4:
237 case ELK_EXECUTE_8:
238 case ELK_EXECUTE_16:
239 case ELK_EXECUTE_32:
240 break;
241 default:
242 ERROR("invalid execution size");
243 break;
244 }
245
246 if (error_msg.str)
247 return error_msg;
248
249 if (devinfo->ver >= 12) {
250 unsigned group_size = 1 << elk_inst_exec_size(devinfo, inst);
251 unsigned qtr_ctrl = elk_inst_qtr_control(devinfo, inst);
252 unsigned nib_ctrl = elk_inst_nib_control(devinfo, inst);
253
254 unsigned chan_off = (qtr_ctrl * 2 + nib_ctrl) << 2;
255 ERROR_IF(chan_off % group_size != 0,
256 "The execution size must be a factor of the chosen offset");
257 }
258
259 if (inst_is_send(isa, inst))
260 return error_msg;
261
262 if (num_sources == 3) {
263 /* Nothing to test:
264 * No 3-src instructions on Gfx4-5
265 * No reg file bits on Gfx6-10 (align16)
266 * No invalid encodings on Gfx10-12 (align1)
267 */
268 } else {
269 if (devinfo->ver > 6) {
270 ERROR_IF(elk_inst_dst_reg_file(devinfo, inst) == MRF ||
271 (num_sources > 0 &&
272 elk_inst_src0_reg_file(devinfo, inst) == MRF) ||
273 (num_sources > 1 &&
274 elk_inst_src1_reg_file(devinfo, inst) == MRF),
275 "invalid register file encoding");
276 }
277 }
278
279 if (error_msg.str)
280 return error_msg;
281
282 if (num_sources == 3) {
283 if (elk_inst_access_mode(devinfo, inst) == ELK_ALIGN_1) {
284 if (devinfo->ver >= 10) {
285 ERROR_IF(elk_inst_3src_a1_dst_type (devinfo, inst) == INVALID_REG_TYPE ||
286 elk_inst_3src_a1_src0_type(devinfo, inst) == INVALID_REG_TYPE ||
287 elk_inst_3src_a1_src1_type(devinfo, inst) == INVALID_REG_TYPE ||
288 elk_inst_3src_a1_src2_type(devinfo, inst) == INVALID_REG_TYPE,
289 "invalid register type encoding");
290 } else {
291 ERROR("Align1 mode not allowed on Gen < 10");
292 }
293 } else {
294 ERROR_IF(elk_inst_3src_a16_dst_type(devinfo, inst) == INVALID_REG_TYPE ||
295 elk_inst_3src_a16_src_type(devinfo, inst) == INVALID_REG_TYPE,
296 "invalid register type encoding");
297 }
298 } else {
299 ERROR_IF(elk_inst_dst_type (devinfo, inst) == INVALID_REG_TYPE ||
300 (num_sources > 0 &&
301 elk_inst_src0_type(devinfo, inst) == INVALID_REG_TYPE) ||
302 (num_sources > 1 &&
303 elk_inst_src1_type(devinfo, inst) == INVALID_REG_TYPE),
304 "invalid register type encoding");
305 }
306
307 return error_msg;
308 }
309
310 static struct string
sources_not_null(const struct elk_isa_info * isa,const elk_inst * inst)311 sources_not_null(const struct elk_isa_info *isa,
312 const elk_inst *inst)
313 {
314 const struct intel_device_info *devinfo = isa->devinfo;
315 unsigned num_sources = elk_num_sources_from_inst(isa, inst);
316 struct string error_msg = { .str = NULL, .len = 0 };
317
318 /* Nothing to test. 3-src instructions can only have GRF sources, and
319 * there's no bit to control the file.
320 */
321 if (num_sources == 3)
322 return (struct string){};
323
324 /* Nothing to test. Split sends can only encode a file in sources that are
325 * allowed to be NULL.
326 */
327 if (inst_is_split_send(isa, inst))
328 return (struct string){};
329
330 if (num_sources >= 1 && elk_inst_opcode(isa, inst) != ELK_OPCODE_SYNC)
331 ERROR_IF(src0_is_null(devinfo, inst), "src0 is null");
332
333 if (num_sources == 2)
334 ERROR_IF(src1_is_null(devinfo, inst), "src1 is null");
335
336 return error_msg;
337 }
338
339 static struct string
alignment_supported(const struct elk_isa_info * isa,const elk_inst * inst)340 alignment_supported(const struct elk_isa_info *isa,
341 const elk_inst *inst)
342 {
343 const struct intel_device_info *devinfo = isa->devinfo;
344 struct string error_msg = { .str = NULL, .len = 0 };
345
346 ERROR_IF(devinfo->ver >= 11 && elk_inst_access_mode(devinfo, inst) == ELK_ALIGN_16,
347 "Align16 not supported");
348
349 return error_msg;
350 }
351
352 static bool
inst_uses_src_acc(const struct elk_isa_info * isa,const elk_inst * inst)353 inst_uses_src_acc(const struct elk_isa_info *isa,
354 const elk_inst *inst)
355 {
356 const struct intel_device_info *devinfo = isa->devinfo;
357
358 /* Check instructions that use implicit accumulator sources */
359 switch (elk_inst_opcode(isa, inst)) {
360 case ELK_OPCODE_MAC:
361 case ELK_OPCODE_MACH:
362 case ELK_OPCODE_SADA2:
363 return true;
364 default:
365 break;
366 }
367
368 /* FIXME: support 3-src instructions */
369 unsigned num_sources = elk_num_sources_from_inst(isa, inst);
370 assert(num_sources < 3);
371
372 return src0_is_acc(devinfo, inst) || (num_sources > 1 && src1_is_acc(devinfo, inst));
373 }
374
375 static struct string
send_restrictions(const struct elk_isa_info * isa,const elk_inst * inst)376 send_restrictions(const struct elk_isa_info *isa,
377 const elk_inst *inst)
378 {
379 const struct intel_device_info *devinfo = isa->devinfo;
380
381 struct string error_msg = { .str = NULL, .len = 0 };
382
383 if (inst_is_split_send(isa, inst)) {
384 ERROR_IF(elk_inst_send_src1_reg_file(devinfo, inst) == ELK_ARCHITECTURE_REGISTER_FILE &&
385 elk_inst_send_src1_reg_nr(devinfo, inst) != ELK_ARF_NULL,
386 "src1 of split send must be a GRF or NULL");
387
388 ERROR_IF(elk_inst_eot(devinfo, inst) &&
389 elk_inst_src0_da_reg_nr(devinfo, inst) < 112,
390 "send with EOT must use g112-g127");
391 ERROR_IF(elk_inst_eot(devinfo, inst) &&
392 elk_inst_send_src1_reg_file(devinfo, inst) == ELK_GENERAL_REGISTER_FILE &&
393 elk_inst_send_src1_reg_nr(devinfo, inst) < 112,
394 "send with EOT must use g112-g127");
395
396 if (elk_inst_send_src0_reg_file(devinfo, inst) == ELK_GENERAL_REGISTER_FILE &&
397 elk_inst_send_src1_reg_file(devinfo, inst) == ELK_GENERAL_REGISTER_FILE) {
398 /* Assume minimums if we don't know */
399 unsigned mlen = 1;
400 if (!elk_inst_send_sel_reg32_desc(devinfo, inst)) {
401 const uint32_t desc = elk_inst_send_desc(devinfo, inst);
402 mlen = elk_message_desc_mlen(devinfo, desc) / reg_unit(devinfo);
403 }
404
405 unsigned ex_mlen = 1;
406 if (!elk_inst_send_sel_reg32_ex_desc(devinfo, inst)) {
407 const uint32_t ex_desc = elk_inst_sends_ex_desc(devinfo, inst);
408 ex_mlen = elk_message_ex_desc_ex_mlen(devinfo, ex_desc) /
409 reg_unit(devinfo);
410 }
411 const unsigned src0_reg_nr = elk_inst_src0_da_reg_nr(devinfo, inst);
412 const unsigned src1_reg_nr = elk_inst_send_src1_reg_nr(devinfo, inst);
413 ERROR_IF((src0_reg_nr <= src1_reg_nr &&
414 src1_reg_nr < src0_reg_nr + mlen) ||
415 (src1_reg_nr <= src0_reg_nr &&
416 src0_reg_nr < src1_reg_nr + ex_mlen),
417 "split send payloads must not overlap");
418 }
419 } else if (inst_is_send(isa, inst)) {
420 ERROR_IF(elk_inst_src0_address_mode(devinfo, inst) != ELK_ADDRESS_DIRECT,
421 "send must use direct addressing");
422
423 if (devinfo->ver >= 7) {
424 ERROR_IF(elk_inst_send_src0_reg_file(devinfo, inst) != ELK_GENERAL_REGISTER_FILE,
425 "send from non-GRF");
426 ERROR_IF(elk_inst_eot(devinfo, inst) &&
427 elk_inst_src0_da_reg_nr(devinfo, inst) < 112,
428 "send with EOT must use g112-g127");
429 }
430
431 if (devinfo->ver >= 8) {
432 ERROR_IF(!dst_is_null(devinfo, inst) &&
433 (elk_inst_dst_da_reg_nr(devinfo, inst) +
434 elk_inst_rlen(devinfo, inst) > 127) &&
435 (elk_inst_src0_da_reg_nr(devinfo, inst) +
436 elk_inst_mlen(devinfo, inst) >
437 elk_inst_dst_da_reg_nr(devinfo, inst)),
438 "r127 must not be used for return address when there is "
439 "a src and dest overlap");
440 }
441 }
442
443 return error_msg;
444 }
445
446 static bool
is_unsupported_inst(const struct elk_isa_info * isa,const elk_inst * inst)447 is_unsupported_inst(const struct elk_isa_info *isa,
448 const elk_inst *inst)
449 {
450 return elk_inst_opcode(isa, inst) == ELK_OPCODE_ILLEGAL;
451 }
452
453 /**
454 * Returns whether a combination of two types would qualify as mixed float
455 * operation mode
456 */
457 static inline bool
types_are_mixed_float(enum elk_reg_type t0,enum elk_reg_type t1)458 types_are_mixed_float(enum elk_reg_type t0, enum elk_reg_type t1)
459 {
460 return (t0 == ELK_REGISTER_TYPE_F && t1 == ELK_REGISTER_TYPE_HF) ||
461 (t1 == ELK_REGISTER_TYPE_F && t0 == ELK_REGISTER_TYPE_HF);
462 }
463
464 static enum elk_reg_type
execution_type_for_type(enum elk_reg_type type)465 execution_type_for_type(enum elk_reg_type type)
466 {
467 switch (type) {
468 case ELK_REGISTER_TYPE_NF:
469 case ELK_REGISTER_TYPE_DF:
470 case ELK_REGISTER_TYPE_F:
471 case ELK_REGISTER_TYPE_HF:
472 return type;
473
474 case ELK_REGISTER_TYPE_VF:
475 return ELK_REGISTER_TYPE_F;
476
477 case ELK_REGISTER_TYPE_Q:
478 case ELK_REGISTER_TYPE_UQ:
479 return ELK_REGISTER_TYPE_Q;
480
481 case ELK_REGISTER_TYPE_D:
482 case ELK_REGISTER_TYPE_UD:
483 return ELK_REGISTER_TYPE_D;
484
485 case ELK_REGISTER_TYPE_W:
486 case ELK_REGISTER_TYPE_UW:
487 case ELK_REGISTER_TYPE_B:
488 case ELK_REGISTER_TYPE_UB:
489 case ELK_REGISTER_TYPE_V:
490 case ELK_REGISTER_TYPE_UV:
491 return ELK_REGISTER_TYPE_W;
492 }
493 unreachable("not reached");
494 }
495
496 /**
497 * Returns the execution type of an instruction \p inst
498 */
499 static enum elk_reg_type
execution_type(const struct elk_isa_info * isa,const elk_inst * inst)500 execution_type(const struct elk_isa_info *isa, const elk_inst *inst)
501 {
502 const struct intel_device_info *devinfo = isa->devinfo;
503
504 unsigned num_sources = elk_num_sources_from_inst(isa, inst);
505 enum elk_reg_type src0_exec_type, src1_exec_type;
506
507 /* Execution data type is independent of destination data type, except in
508 * mixed F/HF instructions.
509 */
510 enum elk_reg_type dst_exec_type = inst_dst_type(isa, inst);
511
512 src0_exec_type = execution_type_for_type(elk_inst_src0_type(devinfo, inst));
513 if (num_sources == 1) {
514 if (src0_exec_type == ELK_REGISTER_TYPE_HF)
515 return dst_exec_type;
516 return src0_exec_type;
517 }
518
519 src1_exec_type = execution_type_for_type(elk_inst_src1_type(devinfo, inst));
520 if (types_are_mixed_float(src0_exec_type, src1_exec_type) ||
521 types_are_mixed_float(src0_exec_type, dst_exec_type) ||
522 types_are_mixed_float(src1_exec_type, dst_exec_type)) {
523 return ELK_REGISTER_TYPE_F;
524 }
525
526 if (src0_exec_type == src1_exec_type)
527 return src0_exec_type;
528
529 if (src0_exec_type == ELK_REGISTER_TYPE_NF ||
530 src1_exec_type == ELK_REGISTER_TYPE_NF)
531 return ELK_REGISTER_TYPE_NF;
532
533 /* Mixed operand types where one is float is float on Gen < 6
534 * (and not allowed on later platforms)
535 */
536 if (devinfo->ver < 6 &&
537 (src0_exec_type == ELK_REGISTER_TYPE_F ||
538 src1_exec_type == ELK_REGISTER_TYPE_F))
539 return ELK_REGISTER_TYPE_F;
540
541 if (src0_exec_type == ELK_REGISTER_TYPE_Q ||
542 src1_exec_type == ELK_REGISTER_TYPE_Q)
543 return ELK_REGISTER_TYPE_Q;
544
545 if (src0_exec_type == ELK_REGISTER_TYPE_D ||
546 src1_exec_type == ELK_REGISTER_TYPE_D)
547 return ELK_REGISTER_TYPE_D;
548
549 if (src0_exec_type == ELK_REGISTER_TYPE_W ||
550 src1_exec_type == ELK_REGISTER_TYPE_W)
551 return ELK_REGISTER_TYPE_W;
552
553 if (src0_exec_type == ELK_REGISTER_TYPE_DF ||
554 src1_exec_type == ELK_REGISTER_TYPE_DF)
555 return ELK_REGISTER_TYPE_DF;
556
557 unreachable("not reached");
558 }
559
560 /**
561 * Returns whether a region is packed
562 *
563 * A region is packed if its elements are adjacent in memory, with no
564 * intervening space, no overlap, and no replicated values.
565 */
566 static bool
is_packed(unsigned vstride,unsigned width,unsigned hstride)567 is_packed(unsigned vstride, unsigned width, unsigned hstride)
568 {
569 if (vstride == width) {
570 if (vstride == 1) {
571 return hstride == 0;
572 } else {
573 return hstride == 1;
574 }
575 }
576
577 return false;
578 }
579
580 /**
581 * Returns whether a region is linear
582 *
583 * A region is linear if its elements do not overlap and are not replicated.
584 * Unlike a packed region, intervening space (i.e. strided values) is allowed.
585 */
586 static bool
is_linear(unsigned vstride,unsigned width,unsigned hstride)587 is_linear(unsigned vstride, unsigned width, unsigned hstride)
588 {
589 return vstride == width * hstride ||
590 (hstride == 0 && width == 1);
591 }
592
593 /**
594 * Returns whether an instruction is an explicit or implicit conversion
595 * to/from half-float.
596 */
597 static bool
is_half_float_conversion(const struct elk_isa_info * isa,const elk_inst * inst)598 is_half_float_conversion(const struct elk_isa_info *isa,
599 const elk_inst *inst)
600 {
601 const struct intel_device_info *devinfo = isa->devinfo;
602
603 enum elk_reg_type dst_type = elk_inst_dst_type(devinfo, inst);
604
605 unsigned num_sources = elk_num_sources_from_inst(isa, inst);
606 enum elk_reg_type src0_type = elk_inst_src0_type(devinfo, inst);
607
608 if (dst_type != src0_type &&
609 (dst_type == ELK_REGISTER_TYPE_HF || src0_type == ELK_REGISTER_TYPE_HF)) {
610 return true;
611 } else if (num_sources > 1) {
612 enum elk_reg_type src1_type = elk_inst_src1_type(devinfo, inst);
613 return dst_type != src1_type &&
614 (dst_type == ELK_REGISTER_TYPE_HF ||
615 src1_type == ELK_REGISTER_TYPE_HF);
616 }
617
618 return false;
619 }
620
621 /*
622 * Returns whether an instruction is using mixed float operation mode
623 */
624 static bool
is_mixed_float(const struct elk_isa_info * isa,const elk_inst * inst)625 is_mixed_float(const struct elk_isa_info *isa, const elk_inst *inst)
626 {
627 const struct intel_device_info *devinfo = isa->devinfo;
628
629 if (devinfo->ver < 8)
630 return false;
631
632 if (inst_is_send(isa, inst))
633 return false;
634
635 unsigned opcode = elk_inst_opcode(isa, inst);
636 const struct elk_opcode_desc *desc = elk_opcode_desc(isa, opcode);
637 if (desc->ndst == 0)
638 return false;
639
640 /* FIXME: support 3-src instructions */
641 unsigned num_sources = elk_num_sources_from_inst(isa, inst);
642 assert(num_sources < 3);
643
644 enum elk_reg_type dst_type = elk_inst_dst_type(devinfo, inst);
645 enum elk_reg_type src0_type = elk_inst_src0_type(devinfo, inst);
646
647 if (num_sources == 1)
648 return types_are_mixed_float(src0_type, dst_type);
649
650 enum elk_reg_type src1_type = elk_inst_src1_type(devinfo, inst);
651
652 return types_are_mixed_float(src0_type, src1_type) ||
653 types_are_mixed_float(src0_type, dst_type) ||
654 types_are_mixed_float(src1_type, dst_type);
655 }
656
657 /**
658 * Returns whether an instruction is an explicit or implicit conversion
659 * to/from byte.
660 */
661 static bool
is_byte_conversion(const struct elk_isa_info * isa,const elk_inst * inst)662 is_byte_conversion(const struct elk_isa_info *isa,
663 const elk_inst *inst)
664 {
665 const struct intel_device_info *devinfo = isa->devinfo;
666
667 enum elk_reg_type dst_type = elk_inst_dst_type(devinfo, inst);
668
669 unsigned num_sources = elk_num_sources_from_inst(isa, inst);
670 enum elk_reg_type src0_type = elk_inst_src0_type(devinfo, inst);
671
672 if (dst_type != src0_type &&
673 (type_sz(dst_type) == 1 || type_sz(src0_type) == 1)) {
674 return true;
675 } else if (num_sources > 1) {
676 enum elk_reg_type src1_type = elk_inst_src1_type(devinfo, inst);
677 return dst_type != src1_type &&
678 (type_sz(dst_type) == 1 || type_sz(src1_type) == 1);
679 }
680
681 return false;
682 }
683
684 /**
685 * Checks restrictions listed in "General Restrictions Based on Operand Types"
686 * in the "Register Region Restrictions" section.
687 */
688 static struct string
general_restrictions_based_on_operand_types(const struct elk_isa_info * isa,const elk_inst * inst)689 general_restrictions_based_on_operand_types(const struct elk_isa_info *isa,
690 const elk_inst *inst)
691 {
692 const struct intel_device_info *devinfo = isa->devinfo;
693
694 const struct elk_opcode_desc *desc =
695 elk_opcode_desc(isa, elk_inst_opcode(isa, inst));
696 unsigned num_sources = elk_num_sources_from_inst(isa, inst);
697 unsigned exec_size = 1 << elk_inst_exec_size(devinfo, inst);
698 struct string error_msg = { .str = NULL, .len = 0 };
699
700 if (inst_is_send(isa, inst))
701 return error_msg;
702
703 if (devinfo->ver >= 11) {
704 /* A register type of B or UB for DPAS actually means 4 bytes packed into
705 * a D or UD, so it is allowed.
706 */
707 if (num_sources == 3 && elk_inst_opcode(isa, inst) != ELK_OPCODE_DPAS) {
708 ERROR_IF(elk_reg_type_to_size(elk_inst_3src_a1_src1_type(devinfo, inst)) == 1 ||
709 elk_reg_type_to_size(elk_inst_3src_a1_src2_type(devinfo, inst)) == 1,
710 "Byte data type is not supported for src1/2 register regioning. This includes "
711 "byte broadcast as well.");
712 }
713 if (num_sources == 2) {
714 ERROR_IF(elk_reg_type_to_size(elk_inst_src1_type(devinfo, inst)) == 1,
715 "Byte data type is not supported for src1 register regioning. This includes "
716 "byte broadcast as well.");
717 }
718 }
719
720 enum elk_reg_type dst_type;
721
722 if (num_sources == 3) {
723 if (elk_inst_access_mode(devinfo, inst) == ELK_ALIGN_1)
724 dst_type = elk_inst_3src_a1_dst_type(devinfo, inst);
725 else
726 dst_type = elk_inst_3src_a16_dst_type(devinfo, inst);
727 } else {
728 dst_type = inst_dst_type(isa, inst);
729 }
730
731 ERROR_IF(dst_type == ELK_REGISTER_TYPE_DF &&
732 !devinfo->has_64bit_float,
733 "64-bit float destination, but platform does not support it");
734
735 ERROR_IF((dst_type == ELK_REGISTER_TYPE_Q ||
736 dst_type == ELK_REGISTER_TYPE_UQ) &&
737 !devinfo->has_64bit_int,
738 "64-bit int destination, but platform does not support it");
739
740 for (unsigned s = 0; s < num_sources; s++) {
741 enum elk_reg_type src_type;
742 if (num_sources == 3) {
743 if (elk_inst_access_mode(devinfo, inst) == ELK_ALIGN_1) {
744 switch (s) {
745 case 0: src_type = elk_inst_3src_a1_src0_type(devinfo, inst); break;
746 case 1: src_type = elk_inst_3src_a1_src1_type(devinfo, inst); break;
747 case 2: src_type = elk_inst_3src_a1_src2_type(devinfo, inst); break;
748 default: unreachable("invalid src");
749 }
750 } else {
751 src_type = elk_inst_3src_a16_src_type(devinfo, inst);
752 }
753 } else {
754 switch (s) {
755 case 0: src_type = elk_inst_src0_type(devinfo, inst); break;
756 case 1: src_type = elk_inst_src1_type(devinfo, inst); break;
757 default: unreachable("invalid src");
758 }
759 }
760
761 ERROR_IF(src_type == ELK_REGISTER_TYPE_DF &&
762 !devinfo->has_64bit_float,
763 "64-bit float source, but platform does not support it");
764
765 ERROR_IF((src_type == ELK_REGISTER_TYPE_Q ||
766 src_type == ELK_REGISTER_TYPE_UQ) &&
767 !devinfo->has_64bit_int,
768 "64-bit int source, but platform does not support it");
769 }
770
771 if (num_sources == 3)
772 return error_msg;
773
774 if (exec_size == 1)
775 return error_msg;
776
777 if (desc->ndst == 0)
778 return error_msg;
779
780 /* The PRMs say:
781 *
782 * Where n is the largest element size in bytes for any source or
783 * destination operand type, ExecSize * n must be <= 64.
784 *
785 * But we do not attempt to enforce it, because it is implied by other
786 * rules:
787 *
788 * - that the destination stride must match the execution data type
789 * - sources may not span more than two adjacent GRF registers
790 * - destination may not span more than two adjacent GRF registers
791 *
792 * In fact, checking it would weaken testing of the other rules.
793 */
794
795 unsigned dst_stride = STRIDE(elk_inst_dst_hstride(devinfo, inst));
796 bool dst_type_is_byte =
797 inst_dst_type(isa, inst) == ELK_REGISTER_TYPE_B ||
798 inst_dst_type(isa, inst) == ELK_REGISTER_TYPE_UB;
799
800 if (dst_type_is_byte) {
801 if (is_packed(exec_size * dst_stride, exec_size, dst_stride)) {
802 if (!inst_is_raw_move(isa, inst))
803 ERROR("Only raw MOV supports a packed-byte destination");
804 return error_msg;
805 }
806 }
807
808 unsigned exec_type = execution_type(isa, inst);
809 unsigned exec_type_size = elk_reg_type_to_size(exec_type);
810 unsigned dst_type_size = elk_reg_type_to_size(dst_type);
811
812 /* On IVB/BYT, region parameters and execution size for DF are in terms of
813 * 32-bit elements, so they are doubled. For evaluating the validity of an
814 * instruction, we halve them.
815 */
816 if (devinfo->verx10 == 70 &&
817 exec_type_size == 8 && dst_type_size == 4)
818 dst_type_size = 8;
819
820 if (is_byte_conversion(isa, inst)) {
821 /* From the BDW+ PRM, Volume 2a, Command Reference, Instructions - MOV:
822 *
823 * "There is no direct conversion from B/UB to DF or DF to B/UB.
824 * There is no direct conversion from B/UB to Q/UQ or Q/UQ to B/UB."
825 *
826 * Even if these restrictions are listed for the MOV instruction, we
827 * validate this more generally, since there is the possibility
828 * of implicit conversions from other instructions.
829 */
830 enum elk_reg_type src0_type = elk_inst_src0_type(devinfo, inst);
831 enum elk_reg_type src1_type = num_sources > 1 ?
832 elk_inst_src1_type(devinfo, inst) : 0;
833
834 ERROR_IF(type_sz(dst_type) == 1 &&
835 (type_sz(src0_type) == 8 ||
836 (num_sources > 1 && type_sz(src1_type) == 8)),
837 "There are no direct conversions between 64-bit types and B/UB");
838
839 ERROR_IF(type_sz(dst_type) == 8 &&
840 (type_sz(src0_type) == 1 ||
841 (num_sources > 1 && type_sz(src1_type) == 1)),
842 "There are no direct conversions between 64-bit types and B/UB");
843 }
844
845 if (is_half_float_conversion(isa, inst)) {
846 /**
847 * A helper to validate used in the validation of the following restriction
848 * from the BDW+ PRM, Volume 2a, Command Reference, Instructions - MOV:
849 *
850 * "There is no direct conversion from HF to DF or DF to HF.
851 * There is no direct conversion from HF to Q/UQ or Q/UQ to HF."
852 *
853 * Even if these restrictions are listed for the MOV instruction, we
854 * validate this more generally, since there is the possibility
855 * of implicit conversions from other instructions, such us implicit
856 * conversion from integer to HF with the ADD instruction in SKL+.
857 */
858 enum elk_reg_type src0_type = elk_inst_src0_type(devinfo, inst);
859 enum elk_reg_type src1_type = num_sources > 1 ?
860 elk_inst_src1_type(devinfo, inst) : 0;
861 ERROR_IF(dst_type == ELK_REGISTER_TYPE_HF &&
862 (type_sz(src0_type) == 8 ||
863 (num_sources > 1 && type_sz(src1_type) == 8)),
864 "There are no direct conversions between 64-bit types and HF");
865
866 ERROR_IF(type_sz(dst_type) == 8 &&
867 (src0_type == ELK_REGISTER_TYPE_HF ||
868 (num_sources > 1 && src1_type == ELK_REGISTER_TYPE_HF)),
869 "There are no direct conversions between 64-bit types and HF");
870
871 /* From the BDW+ PRM:
872 *
873 * "Conversion between Integer and HF (Half Float) must be
874 * DWord-aligned and strided by a DWord on the destination."
875 *
876 * Also, the above restrictions seems to be expanded on CHV and SKL+ by:
877 *
878 * "There is a relaxed alignment rule for word destinations. When
879 * the destination type is word (UW, W, HF), destination data types
880 * can be aligned to either the lowest word or the second lowest
881 * word of the execution channel. This means the destination data
882 * words can be either all in the even word locations or all in the
883 * odd word locations."
884 *
885 * We do not implement the second rule as is though, since empirical
886 * testing shows inconsistencies:
887 * - It suggests that packed 16-bit is not allowed, which is not true.
888 * - It suggests that conversions from Q/DF to W (which need to be
889 * 64-bit aligned on the destination) are not possible, which is
890 * not true.
891 *
892 * So from this rule we only validate the implication that conversions
893 * from F to HF need to be DWord strided (except in Align1 mixed
894 * float mode where packed fp16 destination is allowed so long as the
895 * destination is oword-aligned).
896 *
897 * Finally, we only validate this for Align1 because Align16 always
898 * requires packed destinations, so these restrictions can't possibly
899 * apply to Align16 mode.
900 */
901 if (elk_inst_access_mode(devinfo, inst) == ELK_ALIGN_1) {
902 if ((dst_type == ELK_REGISTER_TYPE_HF &&
903 (elk_reg_type_is_integer(src0_type) ||
904 (num_sources > 1 && elk_reg_type_is_integer(src1_type)))) ||
905 (elk_reg_type_is_integer(dst_type) &&
906 (src0_type == ELK_REGISTER_TYPE_HF ||
907 (num_sources > 1 && src1_type == ELK_REGISTER_TYPE_HF)))) {
908 ERROR_IF(dst_stride * dst_type_size != 4,
909 "Conversions between integer and half-float must be "
910 "strided by a DWord on the destination");
911
912 unsigned subreg = elk_inst_dst_da1_subreg_nr(devinfo, inst);
913 ERROR_IF(subreg % 4 != 0,
914 "Conversions between integer and half-float must be "
915 "aligned to a DWord on the destination");
916 } else if ((devinfo->platform == INTEL_PLATFORM_CHV ||
917 devinfo->ver >= 9) &&
918 dst_type == ELK_REGISTER_TYPE_HF) {
919 unsigned subreg = elk_inst_dst_da1_subreg_nr(devinfo, inst);
920 ERROR_IF(dst_stride != 2 &&
921 !(is_mixed_float(isa, inst) &&
922 dst_stride == 1 && subreg % 16 == 0),
923 "Conversions to HF must have either all words in even "
924 "word locations or all words in odd word locations or "
925 "be mixed-float with Oword-aligned packed destination");
926 }
927 }
928 }
929
930 /* There are special regioning rules for mixed-float mode in CHV and SKL that
931 * override the general rule for the ratio of sizes of the destination type
932 * and the execution type. We will add validation for those in a later patch.
933 */
934 bool validate_dst_size_and_exec_size_ratio =
935 !is_mixed_float(isa, inst) ||
936 !(devinfo->platform == INTEL_PLATFORM_CHV || devinfo->ver >= 9);
937
938 if (validate_dst_size_and_exec_size_ratio &&
939 exec_type_size > dst_type_size) {
940 if (!(dst_type_is_byte && inst_is_raw_move(isa, inst))) {
941 ERROR_IF(dst_stride * dst_type_size != exec_type_size,
942 "Destination stride must be equal to the ratio of the sizes "
943 "of the execution data type to the destination type");
944 }
945
946 unsigned subreg = elk_inst_dst_da1_subreg_nr(devinfo, inst);
947
948 if (elk_inst_access_mode(devinfo, inst) == ELK_ALIGN_1 &&
949 elk_inst_dst_address_mode(devinfo, inst) == ELK_ADDRESS_DIRECT) {
950 /* The i965 PRM says:
951 *
952 * Implementation Restriction: The relaxed alignment rule for byte
953 * destination (#10.5) is not supported.
954 */
955 if (devinfo->verx10 >= 45 && dst_type_is_byte) {
956 ERROR_IF(subreg % exec_type_size != 0 &&
957 subreg % exec_type_size != 1,
958 "Destination subreg must be aligned to the size of the "
959 "execution data type (or to the next lowest byte for byte "
960 "destinations)");
961 } else {
962 ERROR_IF(subreg % exec_type_size != 0,
963 "Destination subreg must be aligned to the size of the "
964 "execution data type");
965 }
966 }
967 }
968
969 return error_msg;
970 }
971
972 /**
973 * Checks restrictions listed in "General Restrictions on Regioning Parameters"
974 * in the "Register Region Restrictions" section.
975 */
976 static struct string
general_restrictions_on_region_parameters(const struct elk_isa_info * isa,const elk_inst * inst)977 general_restrictions_on_region_parameters(const struct elk_isa_info *isa,
978 const elk_inst *inst)
979 {
980 const struct intel_device_info *devinfo = isa->devinfo;
981
982 const struct elk_opcode_desc *desc =
983 elk_opcode_desc(isa, elk_inst_opcode(isa, inst));
984 unsigned num_sources = elk_num_sources_from_inst(isa, inst);
985 unsigned exec_size = 1 << elk_inst_exec_size(devinfo, inst);
986 struct string error_msg = { .str = NULL, .len = 0 };
987
988 if (num_sources == 3)
989 return (struct string){};
990
991 /* Split sends don't have the bits in the instruction to encode regions so
992 * there's nothing to check.
993 */
994 if (inst_is_split_send(isa, inst))
995 return (struct string){};
996
997 if (elk_inst_access_mode(devinfo, inst) == ELK_ALIGN_16) {
998 if (desc->ndst != 0 && !dst_is_null(devinfo, inst))
999 ERROR_IF(elk_inst_dst_hstride(devinfo, inst) != ELK_HORIZONTAL_STRIDE_1,
1000 "Destination Horizontal Stride must be 1");
1001
1002 if (num_sources >= 1) {
1003 if (devinfo->verx10 >= 75) {
1004 ERROR_IF(elk_inst_src0_reg_file(devinfo, inst) != ELK_IMMEDIATE_VALUE &&
1005 elk_inst_src0_vstride(devinfo, inst) != ELK_VERTICAL_STRIDE_0 &&
1006 elk_inst_src0_vstride(devinfo, inst) != ELK_VERTICAL_STRIDE_2 &&
1007 elk_inst_src0_vstride(devinfo, inst) != ELK_VERTICAL_STRIDE_4,
1008 "In Align16 mode, only VertStride of 0, 2, or 4 is allowed");
1009 } else {
1010 ERROR_IF(elk_inst_src0_reg_file(devinfo, inst) != ELK_IMMEDIATE_VALUE &&
1011 elk_inst_src0_vstride(devinfo, inst) != ELK_VERTICAL_STRIDE_0 &&
1012 elk_inst_src0_vstride(devinfo, inst) != ELK_VERTICAL_STRIDE_4,
1013 "In Align16 mode, only VertStride of 0 or 4 is allowed");
1014 }
1015 }
1016
1017 if (num_sources == 2) {
1018 if (devinfo->verx10 >= 75) {
1019 ERROR_IF(elk_inst_src1_reg_file(devinfo, inst) != ELK_IMMEDIATE_VALUE &&
1020 elk_inst_src1_vstride(devinfo, inst) != ELK_VERTICAL_STRIDE_0 &&
1021 elk_inst_src1_vstride(devinfo, inst) != ELK_VERTICAL_STRIDE_2 &&
1022 elk_inst_src1_vstride(devinfo, inst) != ELK_VERTICAL_STRIDE_4,
1023 "In Align16 mode, only VertStride of 0, 2, or 4 is allowed");
1024 } else {
1025 ERROR_IF(elk_inst_src1_reg_file(devinfo, inst) != ELK_IMMEDIATE_VALUE &&
1026 elk_inst_src1_vstride(devinfo, inst) != ELK_VERTICAL_STRIDE_0 &&
1027 elk_inst_src1_vstride(devinfo, inst) != ELK_VERTICAL_STRIDE_4,
1028 "In Align16 mode, only VertStride of 0 or 4 is allowed");
1029 }
1030 }
1031
1032 return error_msg;
1033 }
1034
1035 for (unsigned i = 0; i < num_sources; i++) {
1036 unsigned vstride, width, hstride, element_size, subreg;
1037 enum elk_reg_type type;
1038
1039 #define DO_SRC(n) \
1040 if (elk_inst_src ## n ## _reg_file(devinfo, inst) == \
1041 ELK_IMMEDIATE_VALUE) \
1042 continue; \
1043 \
1044 vstride = STRIDE(elk_inst_src ## n ## _vstride(devinfo, inst)); \
1045 width = WIDTH(elk_inst_src ## n ## _width(devinfo, inst)); \
1046 hstride = STRIDE(elk_inst_src ## n ## _hstride(devinfo, inst)); \
1047 type = elk_inst_src ## n ## _type(devinfo, inst); \
1048 element_size = elk_reg_type_to_size(type); \
1049 subreg = elk_inst_src ## n ## _da1_subreg_nr(devinfo, inst)
1050
1051 if (i == 0) {
1052 DO_SRC(0);
1053 } else {
1054 DO_SRC(1);
1055 }
1056 #undef DO_SRC
1057
1058 /* On IVB/BYT, region parameters and execution size for DF are in terms of
1059 * 32-bit elements, so they are doubled. For evaluating the validity of an
1060 * instruction, we halve them.
1061 */
1062 if (devinfo->verx10 == 70 &&
1063 element_size == 8)
1064 element_size = 4;
1065
1066 /* ExecSize must be greater than or equal to Width. */
1067 ERROR_IF(exec_size < width, "ExecSize must be greater than or equal "
1068 "to Width");
1069
1070 /* If ExecSize = Width and HorzStride ≠ 0,
1071 * VertStride must be set to Width * HorzStride.
1072 */
1073 if (exec_size == width && hstride != 0) {
1074 ERROR_IF(vstride != width * hstride,
1075 "If ExecSize = Width and HorzStride ≠ 0, "
1076 "VertStride must be set to Width * HorzStride");
1077 }
1078
1079 /* If Width = 1, HorzStride must be 0 regardless of the values of
1080 * ExecSize and VertStride.
1081 */
1082 if (width == 1) {
1083 ERROR_IF(hstride != 0,
1084 "If Width = 1, HorzStride must be 0 regardless "
1085 "of the values of ExecSize and VertStride");
1086 }
1087
1088 /* If ExecSize = Width = 1, both VertStride and HorzStride must be 0. */
1089 if (exec_size == 1 && width == 1) {
1090 ERROR_IF(vstride != 0 || hstride != 0,
1091 "If ExecSize = Width = 1, both VertStride "
1092 "and HorzStride must be 0");
1093 }
1094
1095 /* If VertStride = HorzStride = 0, Width must be 1 regardless of the
1096 * value of ExecSize.
1097 */
1098 if (vstride == 0 && hstride == 0) {
1099 ERROR_IF(width != 1,
1100 "If VertStride = HorzStride = 0, Width must be "
1101 "1 regardless of the value of ExecSize");
1102 }
1103
1104 /* VertStride must be used to cross GRF register boundaries. This rule
1105 * implies that elements within a 'Width' cannot cross GRF boundaries.
1106 */
1107 const uint64_t mask = (1ULL << element_size) - 1;
1108 unsigned rowbase = subreg;
1109
1110 for (int y = 0; y < exec_size / width; y++) {
1111 uint64_t access_mask = 0;
1112 unsigned offset = rowbase;
1113
1114 for (int x = 0; x < width; x++) {
1115 access_mask |= mask << (offset % 64);
1116 offset += hstride * element_size;
1117 }
1118
1119 rowbase += vstride * element_size;
1120
1121 if ((uint32_t)access_mask != 0 && (access_mask >> 32) != 0) {
1122 ERROR("VertStride must be used to cross GRF register boundaries");
1123 break;
1124 }
1125 }
1126 }
1127
1128 /* Dst.HorzStride must not be 0. */
1129 if (desc->ndst != 0 && !dst_is_null(devinfo, inst)) {
1130 ERROR_IF(elk_inst_dst_hstride(devinfo, inst) == ELK_HORIZONTAL_STRIDE_0,
1131 "Destination Horizontal Stride must not be 0");
1132 }
1133
1134 return error_msg;
1135 }
1136
1137 static struct string
special_restrictions_for_mixed_float_mode(const struct elk_isa_info * isa,const elk_inst * inst)1138 special_restrictions_for_mixed_float_mode(const struct elk_isa_info *isa,
1139 const elk_inst *inst)
1140 {
1141 const struct intel_device_info *devinfo = isa->devinfo;
1142
1143 struct string error_msg = { .str = NULL, .len = 0 };
1144
1145 const unsigned opcode = elk_inst_opcode(isa, inst);
1146 const unsigned num_sources = elk_num_sources_from_inst(isa, inst);
1147 if (num_sources >= 3)
1148 return error_msg;
1149
1150 if (!is_mixed_float(isa, inst))
1151 return error_msg;
1152
1153 unsigned exec_size = 1 << elk_inst_exec_size(devinfo, inst);
1154 bool is_align16 = elk_inst_access_mode(devinfo, inst) == ELK_ALIGN_16;
1155
1156 enum elk_reg_type src0_type = elk_inst_src0_type(devinfo, inst);
1157 enum elk_reg_type src1_type = num_sources > 1 ?
1158 elk_inst_src1_type(devinfo, inst) : 0;
1159 enum elk_reg_type dst_type = elk_inst_dst_type(devinfo, inst);
1160
1161 unsigned dst_stride = STRIDE(elk_inst_dst_hstride(devinfo, inst));
1162 bool dst_is_packed = is_packed(exec_size * dst_stride, exec_size, dst_stride);
1163
1164 /* From the SKL PRM, Special Restrictions for Handling Mixed Mode
1165 * Float Operations:
1166 *
1167 * "Indirect addressing on source is not supported when source and
1168 * destination data types are mixed float."
1169 */
1170 ERROR_IF(elk_inst_src0_address_mode(devinfo, inst) != ELK_ADDRESS_DIRECT ||
1171 (num_sources > 1 &&
1172 elk_inst_src1_address_mode(devinfo, inst) != ELK_ADDRESS_DIRECT),
1173 "Indirect addressing on source is not supported when source and "
1174 "destination data types are mixed float");
1175
1176 /* From the SKL PRM, Special Restrictions for Handling Mixed Mode
1177 * Float Operations:
1178 *
1179 * "No SIMD16 in mixed mode when destination is f32. Instruction
1180 * execution size must be no more than 8."
1181 */
1182 ERROR_IF(exec_size > 8 && dst_type == ELK_REGISTER_TYPE_F,
1183 "Mixed float mode with 32-bit float destination is limited "
1184 "to SIMD8");
1185
1186 if (is_align16) {
1187 /* From the SKL PRM, Special Restrictions for Handling Mixed Mode
1188 * Float Operations:
1189 *
1190 * "In Align16 mode, when half float and float data types are mixed
1191 * between source operands OR between source and destination operands,
1192 * the register content are assumed to be packed."
1193 *
1194 * Since Align16 doesn't have a concept of horizontal stride (or width),
1195 * it means that vertical stride must always be 4, since 0 and 2 would
1196 * lead to replicated data, and any other value is disallowed in Align16.
1197 */
1198 ERROR_IF(elk_inst_src0_vstride(devinfo, inst) != ELK_VERTICAL_STRIDE_4,
1199 "Align16 mixed float mode assumes packed data (vstride must be 4");
1200
1201 ERROR_IF(num_sources >= 2 &&
1202 elk_inst_src1_vstride(devinfo, inst) != ELK_VERTICAL_STRIDE_4,
1203 "Align16 mixed float mode assumes packed data (vstride must be 4");
1204
1205 /* From the SKL PRM, Special Restrictions for Handling Mixed Mode
1206 * Float Operations:
1207 *
1208 * "For Align16 mixed mode, both input and output packed f16 data
1209 * must be oword aligned, no oword crossing in packed f16."
1210 *
1211 * The previous rule requires that Align16 operands are always packed,
1212 * and since there is only one bit for Align16 subnr, which represents
1213 * offsets 0B and 16B, this rule is always enforced and we don't need to
1214 * validate it.
1215 */
1216
1217 /* From the SKL PRM, Special Restrictions for Handling Mixed Mode
1218 * Float Operations:
1219 *
1220 * "No SIMD16 in mixed mode when destination is packed f16 for both
1221 * Align1 and Align16."
1222 *
1223 * And:
1224 *
1225 * "In Align16 mode, when half float and float data types are mixed
1226 * between source operands OR between source and destination operands,
1227 * the register content are assumed to be packed."
1228 *
1229 * Which implies that SIMD16 is not available in Align16. This is further
1230 * confirmed by:
1231 *
1232 * "For Align16 mixed mode, both input and output packed f16 data
1233 * must be oword aligned, no oword crossing in packed f16"
1234 *
1235 * Since oword-aligned packed f16 data would cross oword boundaries when
1236 * the execution size is larger than 8.
1237 */
1238 ERROR_IF(exec_size > 8, "Align16 mixed float mode is limited to SIMD8");
1239
1240 /* From the SKL PRM, Special Restrictions for Handling Mixed Mode
1241 * Float Operations:
1242 *
1243 * "No accumulator read access for Align16 mixed float."
1244 */
1245 ERROR_IF(inst_uses_src_acc(isa, inst),
1246 "No accumulator read access for Align16 mixed float");
1247 } else {
1248 assert(!is_align16);
1249
1250 /* From the SKL PRM, Special Restrictions for Handling Mixed Mode
1251 * Float Operations:
1252 *
1253 * "No SIMD16 in mixed mode when destination is packed f16 for both
1254 * Align1 and Align16."
1255 */
1256 ERROR_IF(exec_size > 8 && dst_is_packed &&
1257 dst_type == ELK_REGISTER_TYPE_HF,
1258 "Align1 mixed float mode is limited to SIMD8 when destination "
1259 "is packed half-float");
1260
1261 /* From the SKL PRM, Special Restrictions for Handling Mixed Mode
1262 * Float Operations:
1263 *
1264 * "Math operations for mixed mode:
1265 * - In Align1, f16 inputs need to be strided"
1266 */
1267 if (opcode == ELK_OPCODE_MATH) {
1268 if (src0_type == ELK_REGISTER_TYPE_HF) {
1269 ERROR_IF(STRIDE(elk_inst_src0_hstride(devinfo, inst)) <= 1,
1270 "Align1 mixed mode math needs strided half-float inputs");
1271 }
1272
1273 if (num_sources >= 2 && src1_type == ELK_REGISTER_TYPE_HF) {
1274 ERROR_IF(STRIDE(elk_inst_src1_hstride(devinfo, inst)) <= 1,
1275 "Align1 mixed mode math needs strided half-float inputs");
1276 }
1277 }
1278
1279 if (dst_type == ELK_REGISTER_TYPE_HF && dst_stride == 1) {
1280 /* From the SKL PRM, Special Restrictions for Handling Mixed Mode
1281 * Float Operations:
1282 *
1283 * "In Align1, destination stride can be smaller than execution
1284 * type. When destination is stride of 1, 16 bit packed data is
1285 * updated on the destination. However, output packed f16 data
1286 * must be oword aligned, no oword crossing in packed f16."
1287 *
1288 * The requirement of not crossing oword boundaries for 16-bit oword
1289 * aligned data means that execution size is limited to 8.
1290 */
1291 unsigned subreg;
1292 if (elk_inst_dst_address_mode(devinfo, inst) == ELK_ADDRESS_DIRECT)
1293 subreg = elk_inst_dst_da1_subreg_nr(devinfo, inst);
1294 else
1295 subreg = elk_inst_dst_ia_subreg_nr(devinfo, inst);
1296 ERROR_IF(subreg % 16 != 0,
1297 "Align1 mixed mode packed half-float output must be "
1298 "oword aligned");
1299 ERROR_IF(exec_size > 8,
1300 "Align1 mixed mode packed half-float output must not "
1301 "cross oword boundaries (max exec size is 8)");
1302
1303 /* From the SKL PRM, Special Restrictions for Handling Mixed Mode
1304 * Float Operations:
1305 *
1306 * "When source is float or half float from accumulator register and
1307 * destination is half float with a stride of 1, the source must
1308 * register aligned. i.e., source must have offset zero."
1309 *
1310 * Align16 mixed float mode doesn't allow accumulator access on sources,
1311 * so we only need to check this for Align1.
1312 */
1313 if (src0_is_acc(devinfo, inst) &&
1314 (src0_type == ELK_REGISTER_TYPE_F ||
1315 src0_type == ELK_REGISTER_TYPE_HF)) {
1316 ERROR_IF(elk_inst_src0_da1_subreg_nr(devinfo, inst) != 0,
1317 "Mixed float mode requires register-aligned accumulator "
1318 "source reads when destination is packed half-float");
1319
1320 }
1321
1322 if (num_sources > 1 &&
1323 src1_is_acc(devinfo, inst) &&
1324 (src1_type == ELK_REGISTER_TYPE_F ||
1325 src1_type == ELK_REGISTER_TYPE_HF)) {
1326 ERROR_IF(elk_inst_src1_da1_subreg_nr(devinfo, inst) != 0,
1327 "Mixed float mode requires register-aligned accumulator "
1328 "source reads when destination is packed half-float");
1329 }
1330 }
1331
1332 /* From the SKL PRM, Special Restrictions for Handling Mixed Mode
1333 * Float Operations:
1334 *
1335 * "No swizzle is allowed when an accumulator is used as an implicit
1336 * source or an explicit source in an instruction. i.e. when
1337 * destination is half float with an implicit accumulator source,
1338 * destination stride needs to be 2."
1339 *
1340 * FIXME: it is not quite clear what the first sentence actually means
1341 * or its link to the implication described after it, so we only
1342 * validate the explicit implication, which is clearly described.
1343 */
1344 if (dst_type == ELK_REGISTER_TYPE_HF &&
1345 inst_uses_src_acc(isa, inst)) {
1346 ERROR_IF(dst_stride != 2,
1347 "Mixed float mode with implicit/explicit accumulator "
1348 "source and half-float destination requires a stride "
1349 "of 2 on the destination");
1350 }
1351 }
1352
1353 return error_msg;
1354 }
1355
1356 /**
1357 * Creates an \p access_mask for an \p exec_size, \p element_size, and a region
1358 *
1359 * An \p access_mask is a 32-element array of uint64_t, where each uint64_t is
1360 * a bitmask of bytes accessed by the region.
1361 *
1362 * For instance the access mask of the source gX.1<4,2,2>F in an exec_size = 4
1363 * instruction would be
1364 *
1365 * access_mask[0] = 0x00000000000000F0
1366 * access_mask[1] = 0x000000000000F000
1367 * access_mask[2] = 0x0000000000F00000
1368 * access_mask[3] = 0x00000000F0000000
1369 * access_mask[4-31] = 0
1370 *
1371 * because the first execution channel accesses bytes 7-4 and the second
1372 * execution channel accesses bytes 15-12, etc.
1373 */
1374 static void
align1_access_mask(uint64_t access_mask[static32],unsigned exec_size,unsigned element_size,unsigned subreg,unsigned vstride,unsigned width,unsigned hstride)1375 align1_access_mask(uint64_t access_mask[static 32],
1376 unsigned exec_size, unsigned element_size, unsigned subreg,
1377 unsigned vstride, unsigned width, unsigned hstride)
1378 {
1379 const uint64_t mask = (1ULL << element_size) - 1;
1380 unsigned rowbase = subreg;
1381 unsigned element = 0;
1382
1383 for (int y = 0; y < exec_size / width; y++) {
1384 unsigned offset = rowbase;
1385
1386 for (int x = 0; x < width; x++) {
1387 access_mask[element++] = mask << (offset % 64);
1388 offset += hstride * element_size;
1389 }
1390
1391 rowbase += vstride * element_size;
1392 }
1393
1394 assert(element == 0 || element == exec_size);
1395 }
1396
1397 /**
1398 * Returns the number of registers accessed according to the \p access_mask
1399 */
1400 static int
registers_read(const uint64_t access_mask[static32])1401 registers_read(const uint64_t access_mask[static 32])
1402 {
1403 int regs_read = 0;
1404
1405 for (unsigned i = 0; i < 32; i++) {
1406 if (access_mask[i] > 0xFFFFFFFF) {
1407 return 2;
1408 } else if (access_mask[i]) {
1409 regs_read = 1;
1410 }
1411 }
1412
1413 return regs_read;
1414 }
1415
1416 /**
1417 * Checks restrictions listed in "Region Alignment Rules" in the "Register
1418 * Region Restrictions" section.
1419 */
1420 static struct string
region_alignment_rules(const struct elk_isa_info * isa,const elk_inst * inst)1421 region_alignment_rules(const struct elk_isa_info *isa,
1422 const elk_inst *inst)
1423 {
1424 const struct intel_device_info *devinfo = isa->devinfo;
1425 const struct elk_opcode_desc *desc =
1426 elk_opcode_desc(isa, elk_inst_opcode(isa, inst));
1427 unsigned num_sources = elk_num_sources_from_inst(isa, inst);
1428 unsigned exec_size = 1 << elk_inst_exec_size(devinfo, inst);
1429 uint64_t dst_access_mask[32], src0_access_mask[32], src1_access_mask[32];
1430 struct string error_msg = { .str = NULL, .len = 0 };
1431
1432 if (num_sources == 3)
1433 return (struct string){};
1434
1435 if (elk_inst_access_mode(devinfo, inst) == ELK_ALIGN_16)
1436 return (struct string){};
1437
1438 if (inst_is_send(isa, inst))
1439 return (struct string){};
1440
1441 memset(dst_access_mask, 0, sizeof(dst_access_mask));
1442 memset(src0_access_mask, 0, sizeof(src0_access_mask));
1443 memset(src1_access_mask, 0, sizeof(src1_access_mask));
1444
1445 for (unsigned i = 0; i < num_sources; i++) {
1446 unsigned vstride, width, hstride, element_size, subreg;
1447 enum elk_reg_type type;
1448
1449 /* In Direct Addressing mode, a source cannot span more than 2 adjacent
1450 * GRF registers.
1451 */
1452
1453 #define DO_SRC(n) \
1454 if (elk_inst_src ## n ## _address_mode(devinfo, inst) != \
1455 ELK_ADDRESS_DIRECT) \
1456 continue; \
1457 \
1458 if (elk_inst_src ## n ## _reg_file(devinfo, inst) == \
1459 ELK_IMMEDIATE_VALUE) \
1460 continue; \
1461 \
1462 vstride = STRIDE(elk_inst_src ## n ## _vstride(devinfo, inst)); \
1463 width = WIDTH(elk_inst_src ## n ## _width(devinfo, inst)); \
1464 hstride = STRIDE(elk_inst_src ## n ## _hstride(devinfo, inst)); \
1465 type = elk_inst_src ## n ## _type(devinfo, inst); \
1466 element_size = elk_reg_type_to_size(type); \
1467 subreg = elk_inst_src ## n ## _da1_subreg_nr(devinfo, inst); \
1468 align1_access_mask(src ## n ## _access_mask, \
1469 exec_size, element_size, subreg, \
1470 vstride, width, hstride)
1471
1472 if (i == 0) {
1473 DO_SRC(0);
1474 } else {
1475 DO_SRC(1);
1476 }
1477 #undef DO_SRC
1478
1479 unsigned num_vstride = exec_size / width;
1480 unsigned num_hstride = width;
1481 unsigned vstride_elements = (num_vstride - 1) * vstride;
1482 unsigned hstride_elements = (num_hstride - 1) * hstride;
1483 unsigned offset = (vstride_elements + hstride_elements) * element_size +
1484 subreg;
1485 ERROR_IF(offset >= 64 * reg_unit(devinfo),
1486 "A source cannot span more than 2 adjacent GRF registers");
1487 }
1488
1489 if (desc->ndst == 0 || dst_is_null(devinfo, inst))
1490 return error_msg;
1491
1492 unsigned stride = STRIDE(elk_inst_dst_hstride(devinfo, inst));
1493 enum elk_reg_type dst_type = inst_dst_type(isa, inst);
1494 unsigned element_size = elk_reg_type_to_size(dst_type);
1495 unsigned subreg = elk_inst_dst_da1_subreg_nr(devinfo, inst);
1496 unsigned offset = ((exec_size - 1) * stride * element_size) + subreg;
1497 ERROR_IF(offset >= 64 * reg_unit(devinfo),
1498 "A destination cannot span more than 2 adjacent GRF registers");
1499
1500 if (error_msg.str)
1501 return error_msg;
1502
1503 /* On IVB/BYT, region parameters and execution size for DF are in terms of
1504 * 32-bit elements, so they are doubled. For evaluating the validity of an
1505 * instruction, we halve them.
1506 */
1507 if (devinfo->verx10 == 70 &&
1508 element_size == 8)
1509 element_size = 4;
1510
1511 align1_access_mask(dst_access_mask, exec_size, element_size, subreg,
1512 exec_size == 1 ? 0 : exec_size * stride,
1513 exec_size == 1 ? 1 : exec_size,
1514 exec_size == 1 ? 0 : stride);
1515
1516 unsigned dst_regs = registers_read(dst_access_mask);
1517 unsigned src0_regs = registers_read(src0_access_mask);
1518 unsigned src1_regs = registers_read(src1_access_mask);
1519
1520 /* The SNB, IVB, HSW, BDW, and CHV PRMs say:
1521 *
1522 * When an instruction has a source region spanning two registers and a
1523 * destination region contained in one register, the number of elements
1524 * must be the same between two sources and one of the following must be
1525 * true:
1526 *
1527 * 1. The destination region is entirely contained in the lower OWord
1528 * of a register.
1529 * 2. The destination region is entirely contained in the upper OWord
1530 * of a register.
1531 * 3. The destination elements are evenly split between the two OWords
1532 * of a register.
1533 */
1534 if (devinfo->ver <= 8) {
1535 if (dst_regs == 1 && (src0_regs == 2 || src1_regs == 2)) {
1536 unsigned upper_oword_writes = 0, lower_oword_writes = 0;
1537
1538 for (unsigned i = 0; i < exec_size; i++) {
1539 if (dst_access_mask[i] > 0x0000FFFF) {
1540 upper_oword_writes++;
1541 } else {
1542 assert(dst_access_mask[i] != 0);
1543 lower_oword_writes++;
1544 }
1545 }
1546
1547 ERROR_IF(lower_oword_writes != 0 &&
1548 upper_oword_writes != 0 &&
1549 upper_oword_writes != lower_oword_writes,
1550 "Writes must be to only one OWord or "
1551 "evenly split between OWords");
1552 }
1553 }
1554
1555 /* The IVB and HSW PRMs say:
1556 *
1557 * When an instruction has a source region that spans two registers and
1558 * the destination spans two registers, the destination elements must be
1559 * evenly split between the two registers [...]
1560 *
1561 * The SNB PRM contains similar wording (but written in a much more
1562 * confusing manner).
1563 *
1564 * The BDW PRM says:
1565 *
1566 * When destination spans two registers, the source may be one or two
1567 * registers. The destination elements must be evenly split between the
1568 * two registers.
1569 *
1570 * The SKL PRM says:
1571 *
1572 * When destination of MATH instruction spans two registers, the
1573 * destination elements must be evenly split between the two registers.
1574 *
1575 * It is not known whether this restriction applies to KBL other Gens after
1576 * SKL.
1577 */
1578 if (devinfo->ver <= 8 ||
1579 elk_inst_opcode(isa, inst) == ELK_OPCODE_MATH) {
1580
1581 /* Nothing explicitly states that on Gen < 8 elements must be evenly
1582 * split between two destination registers in the two exceptional
1583 * source-region-spans-one-register cases, but since Broadwell requires
1584 * evenly split writes regardless of source region, we assume that it was
1585 * an oversight and require it.
1586 */
1587 if (dst_regs == 2) {
1588 unsigned upper_reg_writes = 0, lower_reg_writes = 0;
1589
1590 for (unsigned i = 0; i < exec_size; i++) {
1591 if (dst_access_mask[i] > 0xFFFFFFFF) {
1592 upper_reg_writes++;
1593 } else {
1594 assert(dst_access_mask[i] != 0);
1595 lower_reg_writes++;
1596 }
1597 }
1598
1599 ERROR_IF(upper_reg_writes != lower_reg_writes,
1600 "Writes must be evenly split between the two "
1601 "destination registers");
1602 }
1603 }
1604
1605 /* The IVB and HSW PRMs say:
1606 *
1607 * When an instruction has a source region that spans two registers and
1608 * the destination spans two registers, the destination elements must be
1609 * evenly split between the two registers and each destination register
1610 * must be entirely derived from one source register.
1611 *
1612 * Note: In such cases, the regioning parameters must ensure that the
1613 * offset from the two source registers is the same.
1614 *
1615 * The SNB PRM contains similar wording (but written in a much more
1616 * confusing manner).
1617 *
1618 * There are effectively three rules stated here:
1619 *
1620 * For an instruction with a source and a destination spanning two
1621 * registers,
1622 *
1623 * (1) destination elements must be evenly split between the two
1624 * registers
1625 * (2) all destination elements in a register must be derived
1626 * from one source register
1627 * (3) the offset (i.e. the starting location in each of the two
1628 * registers spanned by a region) must be the same in the two
1629 * registers spanned by a region
1630 *
1631 * It is impossible to violate rule (1) without violating (2) or (3), so we
1632 * do not attempt to validate it.
1633 */
1634 if (devinfo->ver <= 7 && dst_regs == 2) {
1635 for (unsigned i = 0; i < num_sources; i++) {
1636 #define DO_SRC(n) \
1637 if (src ## n ## _regs <= 1) \
1638 continue; \
1639 \
1640 for (unsigned i = 0; i < exec_size; i++) { \
1641 if ((dst_access_mask[i] > 0xFFFFFFFF) != \
1642 (src ## n ## _access_mask[i] > 0xFFFFFFFF)) { \
1643 ERROR("Each destination register must be entirely derived " \
1644 "from one source register"); \
1645 break; \
1646 } \
1647 } \
1648 \
1649 unsigned offset_0 = \
1650 elk_inst_src ## n ## _da1_subreg_nr(devinfo, inst); \
1651 unsigned offset_1 = offset_0; \
1652 \
1653 for (unsigned i = 0; i < exec_size; i++) { \
1654 if (src ## n ## _access_mask[i] > 0xFFFFFFFF) { \
1655 offset_1 = __builtin_ctzll(src ## n ## _access_mask[i]) - 32; \
1656 break; \
1657 } \
1658 } \
1659 \
1660 ERROR_IF(num_sources == 2 && offset_0 != offset_1, \
1661 "The offset from the two source registers " \
1662 "must be the same")
1663
1664 if (i == 0) {
1665 DO_SRC(0);
1666 } else {
1667 DO_SRC(1);
1668 }
1669 #undef DO_SRC
1670 }
1671 }
1672
1673 /* The IVB and HSW PRMs say:
1674 *
1675 * When destination spans two registers, the source MUST span two
1676 * registers. The exception to the above rule:
1677 * 1. When source is scalar, the source registers are not
1678 * incremented.
1679 * 2. When source is packed integer Word and destination is packed
1680 * integer DWord, the source register is not incremented by the
1681 * source sub register is incremented.
1682 *
1683 * The SNB PRM does not contain this rule, but the internal documentation
1684 * indicates that it applies to SNB as well. We assume that the rule applies
1685 * to Gen <= 5 although their PRMs do not state it.
1686 *
1687 * While the documentation explicitly says in exception (2) that the
1688 * destination must be an integer DWord, the hardware allows at least a
1689 * float destination type as well. We emit such instructions from
1690 *
1691 * elk_fs_visitor::emit_interpolation_setup_gfx6
1692 * elk_fs_visitor::emit_fragcoord_interpolation
1693 *
1694 * and have for years with no ill effects.
1695 *
1696 * Additionally the simulator source code indicates that the real condition
1697 * is that the size of the destination type is 4 bytes.
1698 *
1699 * HSW PRMs also add a note to the second exception:
1700 * "When lower 8 channels are disabled, the sub register of source1
1701 * operand is not incremented. If the lower 8 channels are expected
1702 * to be disabled, say by predication, the instruction must be split
1703 * into pair of simd8 operations."
1704 *
1705 * We can't reliably know if the channels won't be disabled due to,
1706 * for example, IMASK. So, play it safe and disallow packed-word exception
1707 * for src1.
1708 */
1709 if (devinfo->ver <= 7 && dst_regs == 2) {
1710 enum elk_reg_type dst_type = inst_dst_type(isa, inst);
1711 bool dst_is_packed_dword =
1712 is_packed(exec_size * stride, exec_size, stride) &&
1713 elk_reg_type_to_size(dst_type) == 4;
1714
1715 for (unsigned i = 0; i < num_sources; i++) {
1716 #define DO_SRC(n) \
1717 unsigned vstride, width, hstride; \
1718 vstride = STRIDE(elk_inst_src ## n ## _vstride(devinfo, inst)); \
1719 width = WIDTH(elk_inst_src ## n ## _width(devinfo, inst)); \
1720 hstride = STRIDE(elk_inst_src ## n ## _hstride(devinfo, inst)); \
1721 bool src ## n ## _is_packed_word = \
1722 n != 1 && is_packed(vstride, width, hstride) && \
1723 (elk_inst_src ## n ## _type(devinfo, inst) == ELK_REGISTER_TYPE_W || \
1724 elk_inst_src ## n ## _type(devinfo, inst) == ELK_REGISTER_TYPE_UW); \
1725 \
1726 ERROR_IF(src ## n ## _regs == 1 && \
1727 !src ## n ## _has_scalar_region(devinfo, inst) && \
1728 !(dst_is_packed_dword && src ## n ## _is_packed_word), \
1729 "When the destination spans two registers, the source must " \
1730 "span two registers\n" ERROR_INDENT "(exceptions for scalar " \
1731 "sources, and packed-word to packed-dword expansion for src0)")
1732
1733 if (i == 0) {
1734 DO_SRC(0);
1735 } else {
1736 DO_SRC(1);
1737 }
1738 #undef DO_SRC
1739 }
1740 }
1741
1742 return error_msg;
1743 }
1744
1745 static struct string
vector_immediate_restrictions(const struct elk_isa_info * isa,const elk_inst * inst)1746 vector_immediate_restrictions(const struct elk_isa_info *isa,
1747 const elk_inst *inst)
1748 {
1749 const struct intel_device_info *devinfo = isa->devinfo;
1750
1751 unsigned num_sources = elk_num_sources_from_inst(isa, inst);
1752 struct string error_msg = { .str = NULL, .len = 0 };
1753
1754 if (num_sources == 3 || num_sources == 0 ||
1755 (devinfo->ver >= 12 && inst_is_send(isa, inst)))
1756 return (struct string){};
1757
1758 unsigned file = num_sources == 1 ?
1759 elk_inst_src0_reg_file(devinfo, inst) :
1760 elk_inst_src1_reg_file(devinfo, inst);
1761 if (file != ELK_IMMEDIATE_VALUE)
1762 return (struct string){};
1763
1764 enum elk_reg_type dst_type = inst_dst_type(isa, inst);
1765 unsigned dst_type_size = elk_reg_type_to_size(dst_type);
1766 unsigned dst_subreg = elk_inst_access_mode(devinfo, inst) == ELK_ALIGN_1 ?
1767 elk_inst_dst_da1_subreg_nr(devinfo, inst) : 0;
1768 unsigned dst_stride = STRIDE(elk_inst_dst_hstride(devinfo, inst));
1769 enum elk_reg_type type = num_sources == 1 ?
1770 elk_inst_src0_type(devinfo, inst) :
1771 elk_inst_src1_type(devinfo, inst);
1772
1773 /* The PRMs say:
1774 *
1775 * When an immediate vector is used in an instruction, the destination
1776 * must be 128-bit aligned with destination horizontal stride equivalent
1777 * to a word for an immediate integer vector (v) and equivalent to a
1778 * DWord for an immediate float vector (vf).
1779 *
1780 * The text has not been updated for the addition of the immediate unsigned
1781 * integer vector type (uv) on SNB, but presumably the same restriction
1782 * applies.
1783 */
1784 switch (type) {
1785 case ELK_REGISTER_TYPE_V:
1786 case ELK_REGISTER_TYPE_UV:
1787 case ELK_REGISTER_TYPE_VF:
1788 ERROR_IF(dst_subreg % (128 / 8) != 0,
1789 "Destination must be 128-bit aligned in order to use immediate "
1790 "vector types");
1791
1792 if (type == ELK_REGISTER_TYPE_VF) {
1793 ERROR_IF(dst_type_size * dst_stride != 4,
1794 "Destination must have stride equivalent to dword in order "
1795 "to use the VF type");
1796 } else {
1797 ERROR_IF(dst_type_size * dst_stride != 2,
1798 "Destination must have stride equivalent to word in order "
1799 "to use the V or UV type");
1800 }
1801 break;
1802 default:
1803 break;
1804 }
1805
1806 return error_msg;
1807 }
1808
1809 static struct string
special_requirements_for_handling_double_precision_data_types(const struct elk_isa_info * isa,const elk_inst * inst)1810 special_requirements_for_handling_double_precision_data_types(
1811 const struct elk_isa_info *isa,
1812 const elk_inst *inst)
1813 {
1814 const struct intel_device_info *devinfo = isa->devinfo;
1815
1816 unsigned num_sources = elk_num_sources_from_inst(isa, inst);
1817 struct string error_msg = { .str = NULL, .len = 0 };
1818
1819 if (num_sources == 3 || num_sources == 0)
1820 return (struct string){};
1821
1822 /* Split sends don't have types so there's no doubles there. */
1823 if (inst_is_split_send(isa, inst))
1824 return (struct string){};
1825
1826 enum elk_reg_type exec_type = execution_type(isa, inst);
1827 unsigned exec_type_size = elk_reg_type_to_size(exec_type);
1828
1829 enum elk_reg_file dst_file = elk_inst_dst_reg_file(devinfo, inst);
1830 enum elk_reg_type dst_type = inst_dst_type(isa, inst);
1831 unsigned dst_type_size = elk_reg_type_to_size(dst_type);
1832 unsigned dst_hstride = STRIDE(elk_inst_dst_hstride(devinfo, inst));
1833 unsigned dst_reg = elk_inst_dst_da_reg_nr(devinfo, inst);
1834 unsigned dst_subreg = elk_inst_dst_da1_subreg_nr(devinfo, inst);
1835 unsigned dst_address_mode = elk_inst_dst_address_mode(devinfo, inst);
1836
1837 bool is_integer_dword_multiply =
1838 devinfo->ver >= 8 &&
1839 elk_inst_opcode(isa, inst) == ELK_OPCODE_MUL &&
1840 (elk_inst_src0_type(devinfo, inst) == ELK_REGISTER_TYPE_D ||
1841 elk_inst_src0_type(devinfo, inst) == ELK_REGISTER_TYPE_UD) &&
1842 (elk_inst_src1_type(devinfo, inst) == ELK_REGISTER_TYPE_D ||
1843 elk_inst_src1_type(devinfo, inst) == ELK_REGISTER_TYPE_UD);
1844
1845 const bool is_double_precision =
1846 dst_type_size == 8 || exec_type_size == 8 || is_integer_dword_multiply;
1847
1848 for (unsigned i = 0; i < num_sources; i++) {
1849 unsigned vstride, width, hstride, type_size, reg, subreg, address_mode;
1850 bool is_scalar_region;
1851 enum elk_reg_file file;
1852 enum elk_reg_type type;
1853
1854 #define DO_SRC(n) \
1855 if (elk_inst_src ## n ## _reg_file(devinfo, inst) == \
1856 ELK_IMMEDIATE_VALUE) \
1857 continue; \
1858 \
1859 is_scalar_region = src ## n ## _has_scalar_region(devinfo, inst); \
1860 vstride = STRIDE(elk_inst_src ## n ## _vstride(devinfo, inst)); \
1861 width = WIDTH(elk_inst_src ## n ## _width(devinfo, inst)); \
1862 hstride = STRIDE(elk_inst_src ## n ## _hstride(devinfo, inst)); \
1863 file = elk_inst_src ## n ## _reg_file(devinfo, inst); \
1864 type = elk_inst_src ## n ## _type(devinfo, inst); \
1865 type_size = elk_reg_type_to_size(type); \
1866 reg = elk_inst_src ## n ## _da_reg_nr(devinfo, inst); \
1867 subreg = elk_inst_src ## n ## _da1_subreg_nr(devinfo, inst); \
1868 address_mode = elk_inst_src ## n ## _address_mode(devinfo, inst)
1869
1870 if (i == 0) {
1871 DO_SRC(0);
1872 } else {
1873 DO_SRC(1);
1874 }
1875 #undef DO_SRC
1876
1877 const unsigned src_stride = (hstride ? hstride : vstride) * type_size;
1878 const unsigned dst_stride = dst_hstride * dst_type_size;
1879
1880 /* The PRMs say that for CHV, BXT:
1881 *
1882 * When source or destination datatype is 64b or operation is integer
1883 * DWord multiply, regioning in Align1 must follow these rules:
1884 *
1885 * 1. Source and Destination horizontal stride must be aligned to the
1886 * same qword.
1887 * 2. Regioning must ensure Src.Vstride = Src.Width * Src.Hstride.
1888 * 3. Source and Destination offset must be the same, except the case
1889 * of scalar source.
1890 *
1891 * We assume that the restriction applies to GLK as well.
1892 */
1893 if (is_double_precision &&
1894 elk_inst_access_mode(devinfo, inst) == ELK_ALIGN_1 &&
1895 (devinfo->platform == INTEL_PLATFORM_CHV || intel_device_info_is_9lp(devinfo))) {
1896 ERROR_IF(!is_scalar_region &&
1897 (src_stride % 8 != 0 ||
1898 dst_stride % 8 != 0 ||
1899 src_stride != dst_stride),
1900 "Source and destination horizontal stride must equal and a "
1901 "multiple of a qword when the execution type is 64-bit");
1902
1903 ERROR_IF(vstride != width * hstride,
1904 "Vstride must be Width * Hstride when the execution type is "
1905 "64-bit");
1906
1907 ERROR_IF(!is_scalar_region && dst_subreg != subreg,
1908 "Source and destination offset must be the same when the "
1909 "execution type is 64-bit");
1910 }
1911
1912 /* The PRMs say that for CHV, BXT:
1913 *
1914 * When source or destination datatype is 64b or operation is integer
1915 * DWord multiply, indirect addressing must not be used.
1916 *
1917 * We assume that the restriction applies to GLK as well.
1918 */
1919 if (is_double_precision &&
1920 (devinfo->platform == INTEL_PLATFORM_CHV || intel_device_info_is_9lp(devinfo))) {
1921 ERROR_IF(ELK_ADDRESS_REGISTER_INDIRECT_REGISTER == address_mode ||
1922 ELK_ADDRESS_REGISTER_INDIRECT_REGISTER == dst_address_mode,
1923 "Indirect addressing is not allowed when the execution type "
1924 "is 64-bit");
1925 }
1926
1927 /* The PRMs say that for CHV, BXT:
1928 *
1929 * ARF registers must never be used with 64b datatype or when
1930 * operation is integer DWord multiply.
1931 *
1932 * We assume that the restriction applies to GLK as well.
1933 *
1934 * We assume that the restriction does not apply to the null register.
1935 */
1936 if (is_double_precision &&
1937 (devinfo->platform == INTEL_PLATFORM_CHV ||
1938 intel_device_info_is_9lp(devinfo))) {
1939 ERROR_IF(elk_inst_opcode(isa, inst) == ELK_OPCODE_MAC ||
1940 elk_inst_acc_wr_control(devinfo, inst) ||
1941 (ELK_ARCHITECTURE_REGISTER_FILE == file &&
1942 reg != ELK_ARF_NULL) ||
1943 (ELK_ARCHITECTURE_REGISTER_FILE == dst_file &&
1944 dst_reg != ELK_ARF_NULL),
1945 "Architecture registers cannot be used when the execution "
1946 "type is 64-bit");
1947 }
1948
1949 /* From the hardware spec section "Register Region Restrictions":
1950 *
1951 * There are two rules:
1952 *
1953 * "In case of all floating point data types used in destination:" and
1954 *
1955 * "In case where source or destination datatype is 64b or operation is
1956 * integer DWord multiply:"
1957 *
1958 * both of which list the same restrictions:
1959 *
1960 * "1. Register Regioning patterns where register data bit location
1961 * of the LSB of the channels are changed between source and
1962 * destination are not supported on Src0 and Src1 except for
1963 * broadcast of a scalar.
1964 *
1965 * 2. Explicit ARF registers except null and accumulator must not be
1966 * used."
1967 */
1968 if (devinfo->verx10 >= 125 &&
1969 (elk_reg_type_is_floating_point(dst_type) ||
1970 is_double_precision)) {
1971 ERROR_IF(!is_scalar_region &&
1972 ELK_ADDRESS_REGISTER_INDIRECT_REGISTER != address_mode &&
1973 (!is_linear(vstride, width, hstride) ||
1974 src_stride != dst_stride ||
1975 subreg != dst_subreg),
1976 "Register Regioning patterns where register data bit "
1977 "location of the LSB of the channels are changed between "
1978 "source and destination are not supported except for "
1979 "broadcast of a scalar.");
1980
1981 ERROR_IF((address_mode == ELK_ADDRESS_DIRECT && file == ELK_ARCHITECTURE_REGISTER_FILE &&
1982 reg != ELK_ARF_NULL && !(reg >= ELK_ARF_ACCUMULATOR && reg < ELK_ARF_FLAG)) ||
1983 (dst_file == ELK_ARCHITECTURE_REGISTER_FILE &&
1984 dst_reg != ELK_ARF_NULL && dst_reg != ELK_ARF_ACCUMULATOR),
1985 "Explicit ARF registers except null and accumulator must not "
1986 "be used.");
1987 }
1988
1989 /* From the hardware spec section "Register Region Restrictions":
1990 *
1991 * "Vx1 and VxH indirect addressing for Float, Half-Float, Double-Float and
1992 * Quad-Word data must not be used."
1993 */
1994 if (devinfo->verx10 >= 125 &&
1995 (elk_reg_type_is_floating_point(type) || type_sz(type) == 8)) {
1996 ERROR_IF(address_mode == ELK_ADDRESS_REGISTER_INDIRECT_REGISTER &&
1997 vstride == ELK_VERTICAL_STRIDE_ONE_DIMENSIONAL,
1998 "Vx1 and VxH indirect addressing for Float, Half-Float, "
1999 "Double-Float and Quad-Word data must not be used");
2000 }
2001 }
2002
2003 /* The PRMs say that for BDW, SKL:
2004 *
2005 * If Align16 is required for an operation with QW destination and non-QW
2006 * source datatypes, the execution size cannot exceed 2.
2007 *
2008 * We assume that the restriction applies to all Gfx8+ parts.
2009 */
2010 if (is_double_precision && devinfo->ver >= 8) {
2011 enum elk_reg_type src0_type = elk_inst_src0_type(devinfo, inst);
2012 enum elk_reg_type src1_type =
2013 num_sources > 1 ? elk_inst_src1_type(devinfo, inst) : src0_type;
2014 unsigned src0_type_size = elk_reg_type_to_size(src0_type);
2015 unsigned src1_type_size = elk_reg_type_to_size(src1_type);
2016
2017 ERROR_IF(elk_inst_access_mode(devinfo, inst) == ELK_ALIGN_16 &&
2018 dst_type_size == 8 &&
2019 (src0_type_size != 8 || src1_type_size != 8) &&
2020 elk_inst_exec_size(devinfo, inst) > ELK_EXECUTE_2,
2021 "In Align16 exec size cannot exceed 2 with a QWord destination "
2022 "and a non-QWord source");
2023 }
2024
2025 /* The PRMs say that for CHV, BXT:
2026 *
2027 * When source or destination datatype is 64b or operation is integer
2028 * DWord multiply, DepCtrl must not be used.
2029 *
2030 * We assume that the restriction applies to GLK as well.
2031 */
2032 if (is_double_precision &&
2033 (devinfo->platform == INTEL_PLATFORM_CHV || intel_device_info_is_9lp(devinfo))) {
2034 ERROR_IF(elk_inst_no_dd_check(devinfo, inst) ||
2035 elk_inst_no_dd_clear(devinfo, inst),
2036 "DepCtrl is not allowed when the execution type is 64-bit");
2037 }
2038
2039 return error_msg;
2040 }
2041
2042 static struct string
instruction_restrictions(const struct elk_isa_info * isa,const elk_inst * inst)2043 instruction_restrictions(const struct elk_isa_info *isa,
2044 const elk_inst *inst)
2045 {
2046 const struct intel_device_info *devinfo = isa->devinfo;
2047 struct string error_msg = { .str = NULL, .len = 0 };
2048
2049 /* From Wa_1604601757:
2050 *
2051 * "When multiplying a DW and any lower precision integer, source modifier
2052 * is not supported."
2053 */
2054 if (devinfo->ver >= 12 &&
2055 elk_inst_opcode(isa, inst) == ELK_OPCODE_MUL) {
2056 enum elk_reg_type exec_type = execution_type(isa, inst);
2057 const bool src0_valid = type_sz(elk_inst_src0_type(devinfo, inst)) == 4 ||
2058 elk_inst_src0_reg_file(devinfo, inst) == ELK_IMMEDIATE_VALUE ||
2059 !(elk_inst_src0_negate(devinfo, inst) ||
2060 elk_inst_src0_abs(devinfo, inst));
2061 const bool src1_valid = type_sz(elk_inst_src1_type(devinfo, inst)) == 4 ||
2062 elk_inst_src1_reg_file(devinfo, inst) == ELK_IMMEDIATE_VALUE ||
2063 !(elk_inst_src1_negate(devinfo, inst) ||
2064 elk_inst_src1_abs(devinfo, inst));
2065
2066 ERROR_IF(!elk_reg_type_is_floating_point(exec_type) &&
2067 type_sz(exec_type) == 4 && !(src0_valid && src1_valid),
2068 "When multiplying a DW and any lower precision integer, source "
2069 "modifier is not supported.");
2070 }
2071
2072 if (elk_inst_opcode(isa, inst) == ELK_OPCODE_CMP ||
2073 elk_inst_opcode(isa, inst) == ELK_OPCODE_CMPN) {
2074 if (devinfo->ver <= 7) {
2075 /* Page 166 of the Ivy Bridge PRM Volume 4 part 3 (Execution Unit
2076 * ISA) says:
2077 *
2078 * Accumulator cannot be destination, implicit or explicit. The
2079 * destination must be a general register or the null register.
2080 *
2081 * Page 77 of the Haswell PRM Volume 2b contains the same text. The
2082 * 965G PRMs contain similar text.
2083 *
2084 * Page 864 (page 880 of the PDF) of the Broadwell PRM Volume 7 says:
2085 *
2086 * For the cmp and cmpn instructions, remove the accumulator
2087 * restrictions.
2088 */
2089 ERROR_IF(elk_inst_dst_reg_file(devinfo, inst) == ELK_ARCHITECTURE_REGISTER_FILE &&
2090 elk_inst_dst_da_reg_nr(devinfo, inst) != ELK_ARF_NULL,
2091 "Accumulator cannot be destination, implicit or explicit.");
2092 }
2093
2094 /* Page 166 of the Ivy Bridge PRM Volume 4 part 3 (Execution Unit ISA)
2095 * says:
2096 *
2097 * If the destination is the null register, the {Switch} instruction
2098 * option must be used.
2099 *
2100 * Page 77 of the Haswell PRM Volume 2b contains the same text.
2101 */
2102 if (devinfo->ver == 7) {
2103 ERROR_IF(dst_is_null(devinfo, inst) &&
2104 elk_inst_thread_control(devinfo, inst) != ELK_THREAD_SWITCH,
2105 "If the destination is the null register, the {Switch} "
2106 "instruction option must be used.");
2107 }
2108
2109 ERROR_IF(elk_inst_cond_modifier(devinfo, inst) == ELK_CONDITIONAL_NONE,
2110 "CMP (or CMPN) must have a condition.");
2111 }
2112
2113 if (elk_inst_opcode(isa, inst) == ELK_OPCODE_SEL) {
2114 if (devinfo->ver < 6) {
2115 ERROR_IF(elk_inst_cond_modifier(devinfo, inst) != ELK_CONDITIONAL_NONE,
2116 "SEL must not have a condition modifier");
2117 ERROR_IF(elk_inst_pred_control(devinfo, inst) == ELK_PREDICATE_NONE,
2118 "SEL must be predicated");
2119 } else {
2120 ERROR_IF((elk_inst_cond_modifier(devinfo, inst) != ELK_CONDITIONAL_NONE) ==
2121 (elk_inst_pred_control(devinfo, inst) != ELK_PREDICATE_NONE),
2122 "SEL must either be predicated or have a condition modifiers");
2123 }
2124 }
2125
2126 if (elk_inst_opcode(isa, inst) == ELK_OPCODE_MUL) {
2127 const enum elk_reg_type src0_type = elk_inst_src0_type(devinfo, inst);
2128 const enum elk_reg_type src1_type = elk_inst_src1_type(devinfo, inst);
2129 const enum elk_reg_type dst_type = inst_dst_type(isa, inst);
2130
2131 if (devinfo->ver == 6) {
2132 /* Page 223 of the Sandybridge PRM volume 4 part 2 says:
2133 *
2134 * [DevSNB]: When multiple (sic) a DW and a W, the W has to be on
2135 * src0, and the DW has to be on src1.
2136 *
2137 * This text appears only in the Sandybridge PRMw.
2138 */
2139 ERROR_IF(elk_reg_type_is_integer(src0_type) &&
2140 type_sz(src0_type) == 4 && type_sz(src1_type) < 4,
2141 "When multiplying a DW and any lower precision integer, the "
2142 "DW operand must be src1.");
2143 } else if (devinfo->ver >= 7) {
2144 /* Page 966 (page 982 of the PDF) of Broadwell PRM volume 2a says:
2145 *
2146 * When multiplying a DW and any lower precision integer, the DW
2147 * operand must on src0.
2148 *
2149 * Ivy Bridge, Haswell, Skylake, and Ice Lake PRMs contain the same
2150 * text.
2151 */
2152 ERROR_IF(elk_reg_type_is_integer(src1_type) &&
2153 type_sz(src0_type) < 4 && type_sz(src1_type) == 4,
2154 "When multiplying a DW and any lower precision integer, the "
2155 "DW operand must be src0.");
2156 }
2157
2158 if (devinfo->ver <= 7) {
2159 /* Section 14.2.28 of Intel 965 Express Chipset PRM volume 4 says:
2160 *
2161 * Source operands cannot be an accumulator register.
2162 *
2163 * Iron Lake, Sandybridge, and Ivy Bridge PRMs have the same text.
2164 * Haswell does not. Given that later PRMs have different
2165 * restrictions on accumulator sources (see below), it seems most
2166 * likely that Haswell shares the Ivy Bridge restriction.
2167 */
2168 ERROR_IF(src0_is_acc(devinfo, inst) || src1_is_acc(devinfo, inst),
2169 "Source operands cannot be an accumulator register.");
2170 } else {
2171 /* Page 971 (page 987 of the PDF), section "Accumulator
2172 * Restrictions," of the Broadwell PRM volume 7 says:
2173 *
2174 * Integer source operands cannot be accumulators.
2175 *
2176 * The Skylake and Ice Lake PRMs contain the same text.
2177 */
2178 ERROR_IF((src0_is_acc(devinfo, inst) &&
2179 elk_reg_type_is_integer(src0_type)) ||
2180 (src1_is_acc(devinfo, inst) &&
2181 elk_reg_type_is_integer(src1_type)),
2182 "Integer source operands cannot be accumulators.");
2183 }
2184
2185 if (devinfo->ver <= 6) {
2186 /* Page 223 of the Sandybridge PRM volume 4 part 2 says:
2187 *
2188 * Dword integer source is not allowed for this instruction in
2189 * float execution mode. In other words, if one source is of type
2190 * float (:f, :vf), the other source cannot be of type dword
2191 * integer (:ud or :d).
2192 *
2193 * G965 and Iron Lake PRMs have similar text. Later GPUs do not
2194 * allow mixed source types at all, but that restriction should be
2195 * handled elsewhere.
2196 */
2197 ERROR_IF(execution_type(isa, inst) == ELK_REGISTER_TYPE_F &&
2198 (src0_type == ELK_REGISTER_TYPE_UD ||
2199 src0_type == ELK_REGISTER_TYPE_D ||
2200 src1_type == ELK_REGISTER_TYPE_UD ||
2201 src1_type == ELK_REGISTER_TYPE_D),
2202 "Dword integer source is not allowed for this instruction in"
2203 "float execution mode.");
2204 }
2205
2206 if (devinfo->ver <= 7) {
2207 /* Page 118 of the Haswell PRM volume 2b says:
2208 *
2209 * When operating on integers with at least one of the source
2210 * being a DWord type (signed or unsigned), the destination cannot
2211 * be floating-point (implementation note: the data converter only
2212 * looks at the low 34 bits of the result).
2213 *
2214 * G965, Iron Lake, Sandybridge, and Ivy Bridge have similar text.
2215 * Later GPUs do not allow mixed source and destination types at all,
2216 * but that restriction should be handled elsewhere.
2217 */
2218 ERROR_IF(dst_type == ELK_REGISTER_TYPE_F &&
2219 (src0_type == ELK_REGISTER_TYPE_UD ||
2220 src0_type == ELK_REGISTER_TYPE_D ||
2221 src1_type == ELK_REGISTER_TYPE_UD ||
2222 src1_type == ELK_REGISTER_TYPE_D),
2223 "Float destination type not allowed with DWord source type.");
2224 }
2225
2226 if (devinfo->ver == 8) {
2227 /* Page 966 (page 982 of the PDF) of the Broadwell PRM volume 2a
2228 * says:
2229 *
2230 * When multiplying DW x DW, the dst cannot be accumulator.
2231 *
2232 * This text also appears in the Cherry Trail / Braswell PRM, but it
2233 * does not appear in any other PRM.
2234 */
2235 ERROR_IF((src0_type == ELK_REGISTER_TYPE_UD ||
2236 src0_type == ELK_REGISTER_TYPE_D) &&
2237 (src1_type == ELK_REGISTER_TYPE_UD ||
2238 src1_type == ELK_REGISTER_TYPE_D) &&
2239 elk_inst_dst_reg_file(devinfo, inst) == ELK_ARCHITECTURE_REGISTER_FILE &&
2240 elk_inst_dst_da_reg_nr(devinfo, inst) != ELK_ARF_NULL,
2241 "When multiplying DW x DW, the dst cannot be accumulator.");
2242 }
2243
2244 /* Page 935 (page 951 of the PDF) of the Ice Lake PRM volume 2a says:
2245 *
2246 * When multiplying integer data types, if one of the sources is a
2247 * DW, the resulting full precision data is stored in the
2248 * accumulator. However, if the destination data type is either W or
2249 * DW, the low bits of the result are written to the destination
2250 * register and the remaining high bits are discarded. This results
2251 * in undefined Overflow and Sign flags. Therefore, conditional
2252 * modifiers and saturation (.sat) cannot be used in this case.
2253 *
2254 * Similar text appears in every version of the PRM.
2255 *
2256 * The wording of the last sentence is not very clear. It could either
2257 * be interpreted as "conditional modifiers combined with saturation
2258 * cannot be used" or "neither conditional modifiers nor saturation can
2259 * be used." I have interpreted it as the latter primarily because that
2260 * is the more restrictive interpretation.
2261 */
2262 ERROR_IF((src0_type == ELK_REGISTER_TYPE_UD ||
2263 src0_type == ELK_REGISTER_TYPE_D ||
2264 src1_type == ELK_REGISTER_TYPE_UD ||
2265 src1_type == ELK_REGISTER_TYPE_D) &&
2266 (dst_type == ELK_REGISTER_TYPE_UD ||
2267 dst_type == ELK_REGISTER_TYPE_D ||
2268 dst_type == ELK_REGISTER_TYPE_UW ||
2269 dst_type == ELK_REGISTER_TYPE_W) &&
2270 (elk_inst_saturate(devinfo, inst) != 0 ||
2271 elk_inst_cond_modifier(devinfo, inst) != ELK_CONDITIONAL_NONE),
2272 "Neither Saturate nor conditional modifier allowed with DW "
2273 "integer multiply.");
2274 }
2275
2276 if (elk_inst_opcode(isa, inst) == ELK_OPCODE_MATH) {
2277 unsigned math_function = elk_inst_math_function(devinfo, inst);
2278 switch (math_function) {
2279 case ELK_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER:
2280 case ELK_MATH_FUNCTION_INT_DIV_QUOTIENT:
2281 case ELK_MATH_FUNCTION_INT_DIV_REMAINDER: {
2282 /* Page 442 of the Broadwell PRM Volume 2a "Extended Math Function" says:
2283 * INT DIV function does not support source modifiers.
2284 * Bspec 6647 extends it back to Ivy Bridge.
2285 */
2286 bool src0_valid = !elk_inst_src0_negate(devinfo, inst) &&
2287 !elk_inst_src0_abs(devinfo, inst);
2288 bool src1_valid = !elk_inst_src1_negate(devinfo, inst) &&
2289 !elk_inst_src1_abs(devinfo, inst);
2290 ERROR_IF(!src0_valid || !src1_valid,
2291 "INT DIV function does not support source modifiers.");
2292 break;
2293 }
2294 default:
2295 break;
2296 }
2297 }
2298
2299 if (elk_inst_opcode(isa, inst) == ELK_OPCODE_DP4A) {
2300 /* Page 396 (page 412 of the PDF) of the DG1 PRM volume 2a says:
2301 *
2302 * Only one of src0 or src1 operand may be an the (sic) accumulator
2303 * register (acc#).
2304 */
2305 ERROR_IF(src0_is_acc(devinfo, inst) && src1_is_acc(devinfo, inst),
2306 "Only one of src0 or src1 operand may be an accumulator "
2307 "register (acc#).");
2308
2309 }
2310
2311 if (elk_inst_opcode(isa, inst) == ELK_OPCODE_ADD3) {
2312 const enum elk_reg_type dst_type = inst_dst_type(isa, inst);
2313
2314 ERROR_IF(dst_type != ELK_REGISTER_TYPE_D &&
2315 dst_type != ELK_REGISTER_TYPE_UD &&
2316 dst_type != ELK_REGISTER_TYPE_W &&
2317 dst_type != ELK_REGISTER_TYPE_UW,
2318 "Destination must be integer D, UD, W, or UW type.");
2319
2320 for (unsigned i = 0; i < 3; i++) {
2321 enum elk_reg_type src_type;
2322
2323 switch (i) {
2324 case 0: src_type = elk_inst_3src_a1_src0_type(devinfo, inst); break;
2325 case 1: src_type = elk_inst_3src_a1_src1_type(devinfo, inst); break;
2326 case 2: src_type = elk_inst_3src_a1_src2_type(devinfo, inst); break;
2327 default: unreachable("invalid src");
2328 }
2329
2330 ERROR_IF(src_type != ELK_REGISTER_TYPE_D &&
2331 src_type != ELK_REGISTER_TYPE_UD &&
2332 src_type != ELK_REGISTER_TYPE_W &&
2333 src_type != ELK_REGISTER_TYPE_UW,
2334 "Source must be integer D, UD, W, or UW type.");
2335
2336 if (i == 0) {
2337 if (elk_inst_3src_a1_src0_is_imm(devinfo, inst)) {
2338 ERROR_IF(src_type != ELK_REGISTER_TYPE_W &&
2339 src_type != ELK_REGISTER_TYPE_UW,
2340 "Immediate source must be integer W or UW type.");
2341 }
2342 } else if (i == 2) {
2343 if (elk_inst_3src_a1_src2_is_imm(devinfo, inst)) {
2344 ERROR_IF(src_type != ELK_REGISTER_TYPE_W &&
2345 src_type != ELK_REGISTER_TYPE_UW,
2346 "Immediate source must be integer W or UW type.");
2347 }
2348 }
2349 }
2350 }
2351
2352 if (elk_inst_opcode(isa, inst) == ELK_OPCODE_OR ||
2353 elk_inst_opcode(isa, inst) == ELK_OPCODE_AND ||
2354 elk_inst_opcode(isa, inst) == ELK_OPCODE_XOR ||
2355 elk_inst_opcode(isa, inst) == ELK_OPCODE_NOT) {
2356 if (devinfo->ver >= 8) {
2357 /* While the behavior of the negate source modifier is defined as
2358 * logical not, the behavior of abs source modifier is not
2359 * defined. Disallow it to be safe.
2360 */
2361 ERROR_IF(elk_inst_src0_abs(devinfo, inst),
2362 "Behavior of abs source modifier in logic ops is undefined.");
2363 ERROR_IF(elk_inst_opcode(isa, inst) != ELK_OPCODE_NOT &&
2364 elk_inst_src1_reg_file(devinfo, inst) != ELK_IMMEDIATE_VALUE &&
2365 elk_inst_src1_abs(devinfo, inst),
2366 "Behavior of abs source modifier in logic ops is undefined.");
2367
2368 /* Page 479 (page 495 of the PDF) of the Broadwell PRM volume 2a says:
2369 *
2370 * Source modifier is not allowed if source is an accumulator.
2371 *
2372 * The same text also appears for OR, NOT, and XOR instructions.
2373 */
2374 ERROR_IF((elk_inst_src0_abs(devinfo, inst) ||
2375 elk_inst_src0_negate(devinfo, inst)) &&
2376 src0_is_acc(devinfo, inst),
2377 "Source modifier is not allowed if source is an accumulator.");
2378 ERROR_IF(elk_num_sources_from_inst(isa, inst) > 1 &&
2379 (elk_inst_src1_abs(devinfo, inst) ||
2380 elk_inst_src1_negate(devinfo, inst)) &&
2381 src1_is_acc(devinfo, inst),
2382 "Source modifier is not allowed if source is an accumulator.");
2383 }
2384
2385 /* Page 479 (page 495 of the PDF) of the Broadwell PRM volume 2a says:
2386 *
2387 * This operation does not produce sign or overflow conditions. Only
2388 * the .e/.z or .ne/.nz conditional modifiers should be used.
2389 *
2390 * The same text also appears for OR, NOT, and XOR instructions.
2391 *
2392 * Per the comment around nir_op_imod in elk_fs_nir.cpp, we have
2393 * determined this to not be true. The only conditions that seem
2394 * absolutely sketchy are O, R, and U. Some OpenGL shaders from Doom
2395 * 2016 have been observed to generate and.g and operate correctly.
2396 */
2397 const enum elk_conditional_mod cmod =
2398 elk_inst_cond_modifier(devinfo, inst);
2399 ERROR_IF(cmod == ELK_CONDITIONAL_O ||
2400 cmod == ELK_CONDITIONAL_R ||
2401 cmod == ELK_CONDITIONAL_U,
2402 "O, R, and U conditional modifiers should not be used.");
2403 }
2404
2405 if (elk_inst_opcode(isa, inst) == ELK_OPCODE_BFI2) {
2406 ERROR_IF(elk_inst_cond_modifier(devinfo, inst) != ELK_CONDITIONAL_NONE,
2407 "BFI2 cannot have conditional modifier");
2408
2409 ERROR_IF(elk_inst_saturate(devinfo, inst),
2410 "BFI2 cannot have saturate modifier");
2411
2412 enum elk_reg_type dst_type;
2413
2414 if (elk_inst_access_mode(devinfo, inst) == ELK_ALIGN_1)
2415 dst_type = elk_inst_3src_a1_dst_type(devinfo, inst);
2416 else
2417 dst_type = elk_inst_3src_a16_dst_type(devinfo, inst);
2418
2419 ERROR_IF(dst_type != ELK_REGISTER_TYPE_D &&
2420 dst_type != ELK_REGISTER_TYPE_UD,
2421 "BFI2 destination type must be D or UD");
2422
2423 for (unsigned s = 0; s < 3; s++) {
2424 enum elk_reg_type src_type;
2425
2426 if (elk_inst_access_mode(devinfo, inst) == ELK_ALIGN_1) {
2427 switch (s) {
2428 case 0: src_type = elk_inst_3src_a1_src0_type(devinfo, inst); break;
2429 case 1: src_type = elk_inst_3src_a1_src1_type(devinfo, inst); break;
2430 case 2: src_type = elk_inst_3src_a1_src2_type(devinfo, inst); break;
2431 default: unreachable("invalid src");
2432 }
2433 } else {
2434 src_type = elk_inst_3src_a16_src_type(devinfo, inst);
2435 }
2436
2437 ERROR_IF(src_type != dst_type,
2438 "BFI2 source type must match destination type");
2439 }
2440 }
2441
2442 if (elk_inst_opcode(isa, inst) == ELK_OPCODE_CSEL) {
2443 ERROR_IF(elk_inst_pred_control(devinfo, inst) != ELK_PREDICATE_NONE,
2444 "CSEL cannot be predicated");
2445
2446 /* CSEL is CMP and SEL fused into one. The condition modifier, which
2447 * does not actually modify the flags, controls the built-in comparison.
2448 */
2449 ERROR_IF(elk_inst_cond_modifier(devinfo, inst) == ELK_CONDITIONAL_NONE,
2450 "CSEL must have a condition.");
2451
2452 enum elk_reg_type dst_type;
2453
2454 if (elk_inst_access_mode(devinfo, inst) == ELK_ALIGN_1)
2455 dst_type = elk_inst_3src_a1_dst_type(devinfo, inst);
2456 else
2457 dst_type = elk_inst_3src_a16_dst_type(devinfo, inst);
2458
2459 if (devinfo->ver < 8) {
2460 ERROR_IF(devinfo->ver < 8, "CSEL not supported before Gfx8");
2461 } else if (devinfo->ver <= 9) {
2462 ERROR_IF(dst_type != ELK_REGISTER_TYPE_F,
2463 "CSEL destination type must be F");
2464 } else {
2465 ERROR_IF(dst_type != ELK_REGISTER_TYPE_F &&
2466 dst_type != ELK_REGISTER_TYPE_HF &&
2467 dst_type != ELK_REGISTER_TYPE_D &&
2468 dst_type != ELK_REGISTER_TYPE_W,
2469 "CSEL destination type must be F, HF, D, or W");
2470 }
2471
2472 for (unsigned s = 0; s < 3; s++) {
2473 enum elk_reg_type src_type;
2474
2475 if (elk_inst_access_mode(devinfo, inst) == ELK_ALIGN_1) {
2476 switch (s) {
2477 case 0: src_type = elk_inst_3src_a1_src0_type(devinfo, inst); break;
2478 case 1: src_type = elk_inst_3src_a1_src1_type(devinfo, inst); break;
2479 case 2: src_type = elk_inst_3src_a1_src2_type(devinfo, inst); break;
2480 default: unreachable("invalid src");
2481 }
2482 } else {
2483 src_type = elk_inst_3src_a16_src_type(devinfo, inst);
2484 }
2485
2486 ERROR_IF(src_type != dst_type,
2487 "CSEL source type must match destination type");
2488 }
2489 }
2490
2491 if (elk_inst_opcode(isa, inst) == ELK_OPCODE_DPAS) {
2492 ERROR_IF(elk_inst_dpas_3src_sdepth(devinfo, inst) != ELK_SYSTOLIC_DEPTH_8,
2493 "Systolic depth must be 8.");
2494
2495 const unsigned sdepth = 8;
2496
2497 const enum elk_reg_type dst_type =
2498 elk_inst_dpas_3src_dst_type(devinfo, inst);
2499 const enum elk_reg_type src0_type =
2500 elk_inst_dpas_3src_src0_type(devinfo, inst);
2501 const enum elk_reg_type src1_type =
2502 elk_inst_dpas_3src_src1_type(devinfo, inst);
2503 const enum elk_reg_type src2_type =
2504 elk_inst_dpas_3src_src2_type(devinfo, inst);
2505
2506 const enum gfx12_sub_byte_precision src1_sub_byte =
2507 elk_inst_dpas_3src_src1_subbyte(devinfo, inst);
2508
2509 if (src1_type != ELK_REGISTER_TYPE_B && src1_type != ELK_REGISTER_TYPE_UB) {
2510 ERROR_IF(src1_sub_byte != ELK_SUB_BYTE_PRECISION_NONE,
2511 "Sub-byte precision must be None for source type larger than Byte.");
2512 } else {
2513 ERROR_IF(src1_sub_byte != ELK_SUB_BYTE_PRECISION_NONE &&
2514 src1_sub_byte != ELK_SUB_BYTE_PRECISION_4BIT &&
2515 src1_sub_byte != ELK_SUB_BYTE_PRECISION_2BIT,
2516 "Invalid sub-byte precision.");
2517 }
2518
2519 const enum gfx12_sub_byte_precision src2_sub_byte =
2520 elk_inst_dpas_3src_src2_subbyte(devinfo, inst);
2521
2522 if (src2_type != ELK_REGISTER_TYPE_B && src2_type != ELK_REGISTER_TYPE_UB) {
2523 ERROR_IF(src2_sub_byte != ELK_SUB_BYTE_PRECISION_NONE,
2524 "Sub-byte precision must be None.");
2525 } else {
2526 ERROR_IF(src2_sub_byte != ELK_SUB_BYTE_PRECISION_NONE &&
2527 src2_sub_byte != ELK_SUB_BYTE_PRECISION_4BIT &&
2528 src2_sub_byte != ELK_SUB_BYTE_PRECISION_2BIT,
2529 "Invalid sub-byte precision.");
2530 }
2531
2532 const unsigned src1_bits_per_element =
2533 (8 * elk_reg_type_to_size(src1_type)) >>
2534 elk_inst_dpas_3src_src1_subbyte(devinfo, inst);
2535
2536 const unsigned src2_bits_per_element =
2537 (8 * elk_reg_type_to_size(src2_type)) >>
2538 elk_inst_dpas_3src_src2_subbyte(devinfo, inst);
2539
2540 /* The MAX2(1, ...) is just to prevent possible division by 0 later. */
2541 const unsigned ops_per_chan =
2542 MAX2(1, 32 / MAX2(src1_bits_per_element, src2_bits_per_element));
2543
2544 ERROR_IF(elk_inst_exec_size(devinfo, inst) != ELK_EXECUTE_8,
2545 "DPAS execution size must be 8.");
2546
2547 const unsigned exec_size = 8;
2548
2549 const unsigned dst_subnr = elk_inst_dpas_3src_dst_subreg_nr(devinfo, inst);
2550 const unsigned src0_subnr = elk_inst_dpas_3src_src0_subreg_nr(devinfo, inst);
2551 const unsigned src1_subnr = elk_inst_dpas_3src_src1_subreg_nr(devinfo, inst);
2552 const unsigned src2_subnr = elk_inst_dpas_3src_src2_subreg_nr(devinfo, inst);
2553
2554 /* Until HF is supported as dst type, this is effectively subnr == 0. */
2555 ERROR_IF(dst_subnr % exec_size != 0,
2556 "Destination subregister offset must be a multiple of ExecSize.");
2557
2558 /* Until HF is supported as src0 type, this is effectively subnr == 0. */
2559 ERROR_IF(src0_subnr % exec_size != 0,
2560 "Src0 subregister offset must be a multiple of ExecSize.");
2561
2562 ERROR_IF(src1_subnr != 0,
2563 "Src1 subregister offsets must be 0.");
2564
2565 /* In nearly all cases, this effectively requires that src2.subnr be
2566 * 0. It is only when src1 is 8 bits and src2 is 2 or 4 bits that the
2567 * ops_per_chan value can allow non-zero src2.subnr.
2568 */
2569 ERROR_IF(src2_subnr % (sdepth * ops_per_chan) != 0,
2570 "Src2 subregister offset must be a multiple of SystolicDepth "
2571 "times OPS_PER_CHAN.");
2572
2573 ERROR_IF(dst_subnr * type_sz(dst_type) >= REG_SIZE,
2574 "Destination subregister specifies next register.");
2575
2576 ERROR_IF(src0_subnr * type_sz(src0_type) >= REG_SIZE,
2577 "Src0 subregister specifies next register.");
2578
2579 ERROR_IF((src1_subnr * type_sz(src1_type) * src1_bits_per_element) / 8 >= REG_SIZE,
2580 "Src1 subregister specifies next register.");
2581
2582 ERROR_IF((src2_subnr * type_sz(src2_type) * src2_bits_per_element) / 8 >= REG_SIZE,
2583 "Src2 subregister specifies next register.");
2584
2585 if (elk_inst_3src_atomic_control(devinfo, inst)) {
2586 /* FINISHME: When we start emitting DPAS with Atomic set, figure out
2587 * a way to validate it. Also add a test in test_eu_validate.cpp.
2588 */
2589 ERROR_IF(true,
2590 "When instruction option Atomic is used it must be follwed by a "
2591 "DPAS instruction.");
2592 }
2593
2594 if (elk_inst_dpas_3src_exec_type(devinfo, inst) ==
2595 ELK_ALIGN1_3SRC_EXEC_TYPE_FLOAT) {
2596 ERROR_IF(dst_type != ELK_REGISTER_TYPE_F,
2597 "DPAS destination type must be F.");
2598 ERROR_IF(src0_type != ELK_REGISTER_TYPE_F,
2599 "DPAS src0 type must be F.");
2600 ERROR_IF(src1_type != ELK_REGISTER_TYPE_HF,
2601 "DPAS src1 type must be HF.");
2602 ERROR_IF(src2_type != ELK_REGISTER_TYPE_HF,
2603 "DPAS src2 type must be HF.");
2604 } else {
2605 ERROR_IF(dst_type != ELK_REGISTER_TYPE_D &&
2606 dst_type != ELK_REGISTER_TYPE_UD,
2607 "DPAS destination type must be D or UD.");
2608 ERROR_IF(src0_type != ELK_REGISTER_TYPE_D &&
2609 src0_type != ELK_REGISTER_TYPE_UD,
2610 "DPAS src0 type must be D or UD.");
2611 ERROR_IF(src1_type != ELK_REGISTER_TYPE_B &&
2612 src1_type != ELK_REGISTER_TYPE_UB,
2613 "DPAS src1 base type must be B or UB.");
2614 ERROR_IF(src2_type != ELK_REGISTER_TYPE_B &&
2615 src2_type != ELK_REGISTER_TYPE_UB,
2616 "DPAS src2 base type must be B or UB.");
2617
2618 if (elk_reg_type_is_unsigned_integer(dst_type)) {
2619 ERROR_IF(!elk_reg_type_is_unsigned_integer(src0_type) ||
2620 !elk_reg_type_is_unsigned_integer(src1_type) ||
2621 !elk_reg_type_is_unsigned_integer(src2_type),
2622 "If any source datatype is signed, destination datatype "
2623 "must be signed.");
2624 }
2625 }
2626
2627 /* FINISHME: Additional restrictions mentioned in the Bspec that are not
2628 * yet enforced here:
2629 *
2630 * - General Accumulator registers access is not supported. This is
2631 * currently enforced in elk_dpas_three_src (elk_eu_emit.c).
2632 *
2633 * - Given any combination of datatypes in the sources of a DPAS
2634 * instructions, the boundaries of a register should not be crossed.
2635 */
2636 }
2637
2638 return error_msg;
2639 }
2640
2641 static struct string
send_descriptor_restrictions(const struct elk_isa_info * isa,const elk_inst * inst)2642 send_descriptor_restrictions(const struct elk_isa_info *isa,
2643 const elk_inst *inst)
2644 {
2645 const struct intel_device_info *devinfo = isa->devinfo;
2646 struct string error_msg = { .str = NULL, .len = 0 };
2647
2648 if (inst_is_split_send(isa, inst)) {
2649 /* We can only validate immediate descriptors */
2650 if (elk_inst_send_sel_reg32_desc(devinfo, inst))
2651 return error_msg;
2652 } else if (inst_is_send(isa, inst)) {
2653 /* We can only validate immediate descriptors */
2654 if (elk_inst_src1_reg_file(devinfo, inst) != ELK_IMMEDIATE_VALUE)
2655 return error_msg;
2656 } else {
2657 return error_msg;
2658 }
2659
2660 const uint32_t desc = elk_inst_send_desc(devinfo, inst);
2661
2662 switch (elk_inst_sfid(devinfo, inst)) {
2663 case ELK_SFID_URB:
2664 if (devinfo->ver < 20)
2665 break;
2666 FALLTHROUGH;
2667 case GFX12_SFID_TGM:
2668 case GFX12_SFID_SLM:
2669 case GFX12_SFID_UGM:
2670 ERROR_IF(!devinfo->has_lsc, "Platform does not support LSC");
2671
2672 ERROR_IF(elk_lsc_opcode_has_transpose(lsc_msg_desc_opcode(devinfo, desc)) &&
2673 lsc_msg_desc_transpose(devinfo, desc) &&
2674 elk_inst_exec_size(devinfo, inst) != ELK_EXECUTE_1,
2675 "Transposed vectors are restricted to Exec_Mask = 1.");
2676 break;
2677
2678 default:
2679 break;
2680 }
2681
2682 if (elk_inst_sfid(devinfo, inst) == ELK_SFID_URB && devinfo->ver < 20) {
2683 /* Gfx4 doesn't have a "header present" bit in the SEND message. */
2684 ERROR_IF(devinfo->ver > 4 && !elk_inst_header_present(devinfo, inst),
2685 "Header must be present for all URB messages.");
2686
2687 switch (elk_inst_urb_opcode(devinfo, inst)) {
2688 case ELK_URB_OPCODE_WRITE_HWORD:
2689 break;
2690
2691 /* case FF_SYNC: */
2692 case ELK_URB_OPCODE_WRITE_OWORD:
2693 /* Gfx5 / Gfx6 FF_SYNC message and Gfx7+ URB_WRITE_OWORD have the
2694 * same opcode value.
2695 */
2696 if (devinfo->ver == 5 || devinfo->ver == 6) {
2697 ERROR_IF(elk_inst_urb_global_offset(devinfo, inst) != 0,
2698 "FF_SYNC global offset must be zero.");
2699 ERROR_IF(elk_inst_urb_swizzle_control(devinfo, inst) != 0,
2700 "FF_SYNC swizzle control must be zero.");
2701 ERROR_IF(elk_inst_urb_used(devinfo, inst) != 0,
2702 "FF_SYNC used must be zero.");
2703 ERROR_IF(elk_inst_urb_complete(devinfo, inst) != 0,
2704 "FF_SYNC complete must be zero.");
2705
2706 /* Volume 4 part 2 of the Sandybridge PRM (page 28) says:
2707 *
2708 * A message response (writeback) length of 1 GRF will be
2709 * indicated on the ‘send’ instruction if the thread requires
2710 * response data and/or synchronization.
2711 */
2712 ERROR_IF((unsigned)elk_inst_rlen(devinfo, inst) > 1,
2713 "FF_SYNC read length must be 0 or 1.");
2714 } else {
2715 ERROR_IF(devinfo->ver < 7,
2716 "URB OWORD write messages only valid on gfx >= 7");
2717 }
2718 break;
2719
2720 case ELK_URB_OPCODE_READ_HWORD:
2721 case ELK_URB_OPCODE_READ_OWORD:
2722 ERROR_IF(devinfo->ver < 7,
2723 "URB read messages only valid on gfx >= 7");
2724 break;
2725
2726 case GFX7_URB_OPCODE_ATOMIC_MOV:
2727 case GFX7_URB_OPCODE_ATOMIC_INC:
2728 ERROR_IF(devinfo->ver < 7,
2729 "URB atomic move and increment messages only valid on gfx >= 7");
2730 break;
2731
2732 case GFX8_URB_OPCODE_ATOMIC_ADD:
2733 /* The Haswell PRM lists this opcode as valid on page 317. */
2734 ERROR_IF(devinfo->verx10 < 75,
2735 "URB atomic add message only valid on gfx >= 7.5");
2736 break;
2737
2738 case GFX8_URB_OPCODE_SIMD8_READ:
2739 ERROR_IF(elk_inst_rlen(devinfo, inst) == 0,
2740 "URB SIMD8 read message must read some data.");
2741 FALLTHROUGH;
2742
2743 case GFX8_URB_OPCODE_SIMD8_WRITE:
2744 ERROR_IF(devinfo->ver < 8,
2745 "URB SIMD8 messages only valid on gfx >= 8");
2746 break;
2747
2748 case GFX125_URB_OPCODE_FENCE:
2749 ERROR_IF(devinfo->verx10 < 125,
2750 "URB fence message only valid on gfx >= 12.5");
2751 break;
2752
2753 default:
2754 ERROR_IF(true, "Invalid URB message");
2755 break;
2756 }
2757 }
2758
2759 return error_msg;
2760 }
2761
2762 bool
elk_validate_instruction(const struct elk_isa_info * isa,const elk_inst * inst,int offset,unsigned inst_size,struct elk_disasm_info * disasm)2763 elk_validate_instruction(const struct elk_isa_info *isa,
2764 const elk_inst *inst, int offset,
2765 unsigned inst_size,
2766 struct elk_disasm_info *disasm)
2767 {
2768 struct string error_msg = { .str = NULL, .len = 0 };
2769
2770 if (is_unsupported_inst(isa, inst)) {
2771 ERROR("Instruction not supported on this Gen");
2772 } else {
2773 CHECK(invalid_values);
2774
2775 if (error_msg.str == NULL) {
2776 CHECK(sources_not_null);
2777 CHECK(send_restrictions);
2778 CHECK(alignment_supported);
2779 CHECK(general_restrictions_based_on_operand_types);
2780 CHECK(general_restrictions_on_region_parameters);
2781 CHECK(special_restrictions_for_mixed_float_mode);
2782 CHECK(region_alignment_rules);
2783 CHECK(vector_immediate_restrictions);
2784 CHECK(special_requirements_for_handling_double_precision_data_types);
2785 CHECK(instruction_restrictions);
2786 CHECK(send_descriptor_restrictions);
2787 }
2788 }
2789
2790 if (error_msg.str && disasm) {
2791 elk_disasm_insert_error(disasm, offset, inst_size, error_msg.str);
2792 }
2793 free(error_msg.str);
2794
2795 return error_msg.len == 0;
2796 }
2797
2798 bool
elk_validate_instructions(const struct elk_isa_info * isa,const void * assembly,int start_offset,int end_offset,struct elk_disasm_info * disasm)2799 elk_validate_instructions(const struct elk_isa_info *isa,
2800 const void *assembly, int start_offset, int end_offset,
2801 struct elk_disasm_info *disasm)
2802 {
2803 const struct intel_device_info *devinfo = isa->devinfo;
2804 bool valid = true;
2805
2806 for (int src_offset = start_offset; src_offset < end_offset;) {
2807 const elk_inst *inst = assembly + src_offset;
2808 bool is_compact = elk_inst_cmpt_control(devinfo, inst);
2809 unsigned inst_size = is_compact ? sizeof(elk_compact_inst)
2810 : sizeof(elk_inst);
2811 elk_inst uncompacted;
2812
2813 if (is_compact) {
2814 elk_compact_inst *compacted = (void *)inst;
2815 elk_uncompact_instruction(isa, &uncompacted, compacted);
2816 inst = &uncompacted;
2817 }
2818
2819 bool v = elk_validate_instruction(isa, inst, src_offset,
2820 inst_size, disasm);
2821 valid = valid && v;
2822
2823 src_offset += inst_size;
2824 }
2825
2826 return valid;
2827 }
2828