Home
last modified time | relevance | path

Searched refs:exec (Results 1 – 25 of 2217) sorted by relevance

12345678910>>...89

/external/mesa3d/src/mesa/vbo/
Dvbo_exec_draw.c44 vbo_exec_debug_verts(struct vbo_exec_context *exec) in vbo_exec_debug_verts() argument
46 GLuint count = exec->vtx.vert_count; in vbo_exec_debug_verts()
52 exec->vtx.prim_count, in vbo_exec_debug_verts()
53 exec->vtx.vertex_size); in vbo_exec_debug_verts()
55 for (i = 0 ; i < exec->vtx.prim_count ; i++) { in vbo_exec_debug_verts()
56 struct _mesa_prim *prim = &exec->vtx.prim[i]; in vbo_exec_debug_verts()
69 vbo_exec_copy_vertices(struct vbo_exec_context *exec) in vbo_exec_copy_vertices() argument
71 struct _mesa_prim *last_prim = &exec->vtx.prim[exec->vtx.prim_count - 1]; in vbo_exec_copy_vertices()
72 const GLuint sz = exec->vtx.vertex_size; in vbo_exec_copy_vertices()
73 fi_type *dst = exec->vtx.copied.buffer; in vbo_exec_copy_vertices()
[all …]
Dvbo_exec_api.c67 vbo_reset_all_attr(struct vbo_exec_context *exec);
76 vbo_exec_wrap_buffers(struct vbo_exec_context *exec) in vbo_exec_wrap_buffers() argument
78 if (exec->vtx.prim_count == 0) { in vbo_exec_wrap_buffers()
79 exec->vtx.copied.nr = 0; in vbo_exec_wrap_buffers()
80 exec->vtx.vert_count = 0; in vbo_exec_wrap_buffers()
81 exec->vtx.buffer_ptr = exec->vtx.buffer_map; in vbo_exec_wrap_buffers()
84 struct _mesa_prim *last_prim = &exec->vtx.prim[exec->vtx.prim_count - 1]; in vbo_exec_wrap_buffers()
88 if (_mesa_inside_begin_end(exec->ctx)) { in vbo_exec_wrap_buffers()
89 last_prim->count = exec->vtx.vert_count - last_prim->start; in vbo_exec_wrap_buffers()
112 if (exec->vtx.vert_count) in vbo_exec_wrap_buffers()
[all …]
Dvbo_exec_eval.c36 static void clear_active_eval1( struct vbo_exec_context *exec, GLuint attr ) in clear_active_eval1() argument
38 assert(attr < ARRAY_SIZE(exec->eval.map1)); in clear_active_eval1()
39 exec->eval.map1[attr].map = NULL; in clear_active_eval1()
42 static void clear_active_eval2( struct vbo_exec_context *exec, GLuint attr ) in clear_active_eval2() argument
44 assert(attr < ARRAY_SIZE(exec->eval.map2)); in clear_active_eval2()
45 exec->eval.map2[attr].map = NULL; in clear_active_eval2()
48 static void set_active_eval1( struct vbo_exec_context *exec, GLuint attr, GLuint dim, in set_active_eval1() argument
51 assert(attr < ARRAY_SIZE(exec->eval.map1)); in set_active_eval1()
52 if (!exec->eval.map1[attr].map) { in set_active_eval1()
53 exec->eval.map1[attr].map = map; in set_active_eval1()
[all …]
/external/llvm-project/llvm/test/CodeGen/AMDGPU/
Dsdwa-peephole-instr.mir6 # GFX89: %{{[0-9]+}}:vgpr_32 = V_MOV_B32_sdwa 0, %{{[0-9]+}}, 0, 5, 0, 5, implicit $exec
7 …{{[0-9]+}}:vgpr_32 = V_FRACT_F32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, implicit $mode, implicit $exec
8 … %{{[0-9]+}}:vgpr_32 = V_SIN_F32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, implicit $mode, implicit $exec
9 …%{{[0-9]+}}:vgpr_32 = V_CVT_U32_F32_sdwa 0, %{{[0-9]+}}, 0, 5, 0, 5, implicit $mode, implicit $exec
10 …[0-9]+}}:vgpr_32 = V_CVT_F32_I32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, implicit $mode, implicit $exec
13 # GFX89: %{{[0-9]+}}:vgpr_32 = V_MOV_B32_sdwa 0, %{{[0-9]+}}, 0, 6, 0, 5, implicit $exec
14 …{{[0-9]+}}:vgpr_32 = V_FRACT_F32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, implicit $mode, implicit $exec
15 … %{{[0-9]+}}:vgpr_32 = V_SIN_F32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, implicit $mode, implicit $exec
16 …%{{[0-9]+}}:vgpr_32 = V_CVT_U32_F32_sdwa 0, %{{[0-9]+}}, 0, 5, 0, 5, implicit $mode, implicit $exec
17 …[0-9]+}}:vgpr_32 = V_CVT_F32_I32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, implicit $mode, implicit $exec
[all …]
Dsdwa-peephole-instr-gfx10.mir5 # GFX1010: %{{[0-9]+}}:vgpr_32 = V_MOV_B32_sdwa 0, %{{[0-9]+}}, 0, 5, 0, 5, implicit $exec
6 …{{[0-9]+}}:vgpr_32 = V_FRACT_F32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, implicit $mode, implicit $exec
7 … %{{[0-9]+}}:vgpr_32 = V_SIN_F32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, implicit $mode, implicit $exec
8 …%{{[0-9]+}}:vgpr_32 = V_CVT_U32_F32_sdwa 0, %{{[0-9]+}}, 0, 5, 0, 5, implicit $mode, implicit $exec
9 …[0-9]+}}:vgpr_32 = V_CVT_F32_I32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, implicit $mode, implicit $exec
11 # GFX1010: %{{[0-9]+}}:vgpr_32 = V_MOV_B32_sdwa 0, %{{[0-9]+}}, 0, 6, 0, 5, implicit $exec
12 …{{[0-9]+}}:vgpr_32 = V_FRACT_F32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, implicit $mode, implicit $exec
13 … %{{[0-9]+}}:vgpr_32 = V_SIN_F32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, implicit $mode, implicit $exec
14 …%{{[0-9]+}}:vgpr_32 = V_CVT_U32_F32_sdwa 0, %{{[0-9]+}}, 0, 5, 0, 5, implicit $mode, implicit $exec
15 …[0-9]+}}:vgpr_32 = V_CVT_F32_I32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, implicit $mode, implicit $exec
[all …]
Dskip-branch-taildup-ret.mir13 ; CHECK: renamable $vgpr0 = V_LSHLREV_B32_e32 2, killed $vgpr0, implicit $exec
15 ; CHECK: $vgpr1 = V_MOV_B32_e32 $sgpr1, implicit $exec, implicit $exec
16 …ECK: renamable $vgpr0 = V_ADD_CO_U32_e32 $sgpr0, killed $vgpr0, implicit-def $vcc, implicit $exec
17 …e $vgpr1 = V_ADDC_U32_e32 0, killed $vgpr1, implicit-def $vcc, implicit killed $vcc, implicit $exec
18 …; CHECK: renamable $vgpr0 = FLAT_LOAD_DWORD renamable $vgpr0_vgpr1, 0, 0, 0, 0, implicit $exec, …
21 ; CHECK: V_CMP_NE_U32_e32 0, killed $vgpr0, implicit-def $vcc, implicit $exec
22 …CK: $sgpr2_sgpr3 = S_AND_SAVEEXEC_B64 $vcc, implicit-def $exec, implicit-def $scc, implicit $exec
23 …; CHECK: renamable $sgpr2_sgpr3 = S_XOR_B64 $exec, killed renamable $sgpr2_sgpr3, implicit-def d…
24 ; CHECK: SI_MASK_BRANCH %bb.1, implicit $exec
25 ; CHECK: S_CBRANCH_EXECZ %bb.1, implicit $exec
[all …]
Dwaitcnt-overflow.mir24 ; GFX9: $vgpr34_vgpr35 = DS_READ2_B32_gfx9 renamable $vgpr99, 34, 35, 0, implicit $exec
27 ; GFX9-NEXT: $vgpr0 = V_MAC_F32_e32 0, $vgpr1, $vgpr0, implicit $mode, implicit $exec
28 ; GFX9-NEXT: $vgpr2 = V_MAC_F32_e32 0, $vgpr3, $vgpr2, implicit $mode, implicit $exec
29 ; GFX9-NEXT: $vgpr4 = V_MAC_F32_e32 0, $vgpr5, $vgpr4, implicit $mode, implicit $exec
30 ; GFX9-NEXT: $vgpr6 = V_MAC_F32_e32 0, $vgpr7, $vgpr6, implicit $mode, implicit $exec
33 ; GFX10: $vgpr34_vgpr35 = DS_READ2_B32_gfx9 renamable $vgpr99, 34, 35, 0, implicit $exec
35 ; GFX10-NEXT: $vgpr0 = V_MAC_F32_e32 0, $vgpr1, $vgpr0, implicit $mode, implicit $exec
37 ; GFX10-NEXT: $vgpr2 = V_MAC_F32_e32 0, $vgpr3, $vgpr2, implicit $mode, implicit $exec
39 ; GFX10-NEXT: $vgpr4 = V_MAC_F32_e32 0, $vgpr5, $vgpr4, implicit $mode, implicit $exec
41 ; GFX10-NEXT: $vgpr6 = V_MAC_F32_e32 0, $vgpr7, $vgpr6, implicit $mode, implicit $exec
[all …]
Dmemory_clause.mir4 …ber %4:vreg_128, early-clobber %1:vreg_128, early-clobber %3:vreg_128 = BUNDLE %0, implicit $exec {
5 # GCN-NEXT: %1:vreg_128 = GLOBAL_LOAD_DWORDX4 %0, 0, 0, 0, 0, implicit $exec
6 # GCN-NEXT: %2:vreg_128 = GLOBAL_LOAD_DWORDX4 %0, 16, 0, 0, 0, implicit $exec
7 # GCN-NEXT: %3:vreg_128 = GLOBAL_LOAD_DWORDX4 %0, 32, 0, 0, 0, implicit $exec
8 # GCN-NEXT: %4:vreg_128 = GLOBAL_LOAD_DWORDX4 %0, 48, 0, 0, 0, implicit $exec
10 # GCN-NEXT: GLOBAL_STORE_DWORDX4 %0, %1, 0, 0, 0, 0, implicit $exec
24 %1:vreg_128 = GLOBAL_LOAD_DWORDX4 %0, 0, 0, 0, 0, implicit $exec
25 %2:vreg_128 = GLOBAL_LOAD_DWORDX4 %0, 16, 0, 0, 0, implicit $exec
26 %3:vreg_128 = GLOBAL_LOAD_DWORDX4 %0, 32, 0, 0, 0, implicit $exec
27 %4:vreg_128 = GLOBAL_LOAD_DWORDX4 %0, 48, 0, 0, 0, implicit $exec
[all …]
Daccvgpr-copy.mir54 ; GCN: $vgpr0 = V_ACCVGPR_READ_B32 killed $agpr0, implicit $exec, implicit $exec
56 $vgpr0 = COPY killed $agpr0, implicit $exec
69 …; GCN: $vgpr0 = V_ACCVGPR_READ_B32 $agpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $ag…
70 …GCN: $vgpr1 = V_ACCVGPR_READ_B32 $agpr1, implicit $exec, implicit killed $agpr0_agpr1, implicit $e…
72 $vgpr0_vgpr1 = COPY killed $agpr0_agpr1, implicit $exec
85 …; GCN: $vgpr0 = V_ACCVGPR_READ_B32 $agpr0, implicit $exec, implicit-def $vgpr0_vgpr1_vgpr2, implic…
86 ; GCN: $vgpr1 = V_ACCVGPR_READ_B32 $agpr1, implicit $exec, implicit $agpr0_agpr1_agpr2
87 …: $vgpr2 = V_ACCVGPR_READ_B32 $agpr2, implicit $exec, implicit killed $agpr0_agpr1_agpr2, implicit…
89 $vgpr0_vgpr1_vgpr2 = COPY killed $agpr0_agpr1_agpr2, implicit $exec
101 …; GCN: $vgpr0 = V_ACCVGPR_READ_B32 $agpr0, implicit $exec, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3, …
[all …]
Dcollapse-endcf.mir14 ; GCN: [[COPY:%[0-9]+]]:sreg_64 = COPY $exec, implicit-def $exec
16 ; GCN: $exec = S_MOV_B64_term killed [[S_AND_B64_]]
17 ; GCN: S_CBRANCH_EXECZ %bb.4, implicit $exec
20 ; GCN: [[COPY1:%[0-9]+]]:sreg_64 = COPY $exec, implicit-def $exec
22 ; GCN: $exec = S_MOV_B64_term killed [[S_AND_B64_1]]
23 ; GCN: S_CBRANCH_EXECZ %bb.4, implicit $exec
27 ; GCN: $exec = S_OR_B64 $exec, [[COPY]], implicit-def $scc
33 …64 = SI_IF undef %1:sreg_64, %bb.4, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
38 …64 = SI_IF undef %3:sreg_64, %bb.3, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
43 SI_END_CF %2:sreg_64, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
[all …]
Dmacro-fusion-cluster-vcc-uses.mir5 # GCN: dead %2:vgpr_32, %3:sreg_64_xexec = V_ADD_CO_U32_e64 %0, %1, 0, implicit $exec
6 # GCN: dead %4:vgpr_32, dead %5:sreg_64_xexec = V_ADDC_U32_e64 %6, %7, %3, 0, implicit $exec
20 %0 = V_MOV_B32_e32 0, implicit $exec
21 %1 = V_MOV_B32_e32 0, implicit $exec
22 %2, %3 = V_ADD_CO_U32_e64 %0, %1, 0, implicit $exec
23 %6 = V_MOV_B32_e32 0, implicit $exec
24 %7 = V_MOV_B32_e32 0, implicit $exec
26 %4, %5 = V_ADDC_U32_e64 %6, %7, %3, 0, implicit $exec
30 # GCN: dead %8:vgpr_32, %9:sreg_64_xexec = V_ADD_CO_U32_e64 %0, %1, 0, implicit $exec
31 # GCN-NEXT: dead %12:vgpr_32, dead %13:sreg_64_xexec = V_ADDC_U32_e64 %4, %5, %9, 0, implicit $exec
[all …]
Ddpp_combine.mir9 # GCN: %4:vgpr_32 = V_ADD_U32_dpp %2, %0, %1, 1, 15, 15, 1, implicit $exec
10 # GCN: %6:vgpr_32 = V_ADD_U32_e32 %5, %1, implicit $exec
11 # GCN: %8:vgpr_32 = V_ADD_U32_e32 %7, %1, implicit $exec
12 # GCN: %10:vgpr_32 = V_ADD_U32_e32 %9, %1, implicit $exec
14 # GCN: %12:vgpr_32 = V_NOT_B32_dpp %2, %0, 1, 15, 15, 1, implicit $exec
15 # GCN: %14:vgpr_32 = V_NOT_B32_e32 %13, implicit $exec
16 # GCN: %16:vgpr_32 = V_NOT_B32_e32 %15, implicit $exec
17 # GCN: %18:vgpr_32 = V_NOT_B32_e32 %17, implicit $exec
28 %3:vgpr_32 = V_MOV_B32_dpp %2, %0, 1, 15, 15, 1, implicit $exec
29 %4:vgpr_32 = V_ADD_U32_e32 %3, %1, implicit $exec
[all …]
Dconstant-fold-imm-immreg.mir5 # GCN: %10:vgpr_32 = V_MOV_B32_e32 1543, implicit $exec
40 BUFFER_STORE_DWORD_OFFSET killed %10, killed %6, 0, 0, 0, 0, 0, 0, 0, implicit $exec
48 # GCN: [[VAL0:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 646, implicit $exec
51 # GCN: [[VAL1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 646, implicit $exec
54 # GCN: [[VAL2:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 646, implicit $exec
57 # GCN: [[VAL3:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 1234567, implicit $exec
60 # GCN: [[VAL4:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 63, implicit $exec
75 %13:vgpr_32 = V_ASHRREV_I32_e64 31, %3, implicit $exec
77 %15:vreg_64 = V_LSHLREV_B64 2, killed %14, implicit $exec
79 %20:vgpr_32 = V_ADD_CO_U32_e32 %4.sub0, %15.sub0, implicit-def $vcc, implicit $exec
[all …]
Dhard-clauses.mir45 …16, implicit-def $vgpr64_hi16, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $exec {
47 …pr1 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 4, 0, 0, 0, 0, 0, implicit $exec
48 …pr2 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 8, 0, 0, 0, 0, 0, implicit $exec
49 …r3 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 12, 0, 0, 0, 0, 0, implicit $exec
50 …r4 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 16, 0, 0, 0, 0, 0, implicit $exec
51 …r5 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 20, 0, 0, 0, 0, 0, implicit $exec
52 …r6 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 24, 0, 0, 0, 0, 0, implicit $exec
53 …r7 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 28, 0, 0, 0, 0, 0, implicit $exec
54 …r8 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 32, 0, 0, 0, 0, 0, implicit $exec
55 …r9 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 36, 0, 0, 0, 0, 0, implicit $exec
[all …]
Doptimize-exec-masking-pre-ra.mir2 # RUN: llc -mtriple=amdgcn-mesa-mesa3d -run-pass=si-optimize-exec-masking-pre-ra -verify-machineins…
18 ; GCN: [[COPY:%[0-9]+]]:sreg_64 = COPY $exec
20 ; GCN: [[V_CMP_NE_U32_e64_:%[0-9]+]]:sreg_64 = V_CMP_NE_U32_e64 0, [[DEF]], implicit $exec
21 ; GCN: [[COPY1:%[0-9]+]]:sreg_64 = COPY $exec, implicit-def $exec
24 ; GCN: $exec = S_MOV_B64_term [[S_AND_B64_]]
25 ; GCN: SI_MASK_BRANCH %bb.2, implicit $exec
31 …]:sreg_64 = S_OR_SAVEEXEC_B64 [[S_XOR_B64_]], implicit-def $exec, implicit-def $scc, implicit $exec
32 ; GCN: $exec = S_AND_B64 $exec, [[COPY]], implicit-def dead $scc
33 …; GCN: [[S_AND_B64_1:%[0-9]+]]:sreg_64 = S_AND_B64 $exec, [[S_OR_SAVEEXEC_B64_]], implicit-def $…
34 ; GCN: $exec = S_XOR_B64_term $exec, [[S_AND_B64_1]], implicit-def $scc
[all …]
Dsplitkit-copy-live-lanes.mir26 …ber %5:vreg_128, early-clobber %6:vreg_128, early-clobber %7:vreg_128 = BUNDLE %3, implicit $exec {
27 …:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %3, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (load 16, align …
28 …vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %3, 0, 16, 0, 0, 0, 0, 0, implicit $exec :: (load 16, addrsp…
29 …vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %3, 0, 32, 0, 0, 0, 0, 0, implicit $exec :: (load 16, align …
30 …vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %3, 0, 48, 0, 0, 0, 0, 0, implicit $exec :: (load 16, addrsp…
32 …undef %47.sub2:vreg_128 = V_LSHRREV_B32_e32 16, [[BUFFER_LOAD_DWORDX4_OFFSET]].sub1, implicit $exec
33 …; CHECK: SI_SPILL_V128_SAVE %47, %stack.0, $sgpr32, 0, implicit $exec :: (store 16 into %stack.0, …
34 …undef %52.sub2:vreg_128 = V_LSHRREV_B32_e32 16, [[BUFFER_LOAD_DWORDX4_OFFSET]].sub0, implicit $exec
35 …; CHECK: SI_SPILL_V128_SAVE %52, %stack.1, $sgpr32, 0, implicit $exec :: (store 16 into %stack.1, …
36 …undef %57.sub2:vreg_128 = V_LSHRREV_B32_e32 16, [[BUFFER_LOAD_DWORDX4_OFFSET]].sub3, implicit $exec
[all …]
Dbreak-vmem-soft-clauses.mir13 ; GCN: $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, 0, implicit $exec, implicit $flat_scr
16 $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, 0, implicit $exec, implicit $flat_scr
26 ; GCN: $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, 0, implicit $exec, implicit $flat_scr
27 … ; GCN-NEXT: $vgpr1 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, 0, implicit $exec, implicit $flat_scr
30 $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, 0, implicit $exec, implicit $flat_scr
31 $vgpr1 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, 0, implicit $exec, implicit $flat_scr
41 ; GCN: $vgpr0 = FLAT_LOAD_DWORD $vgpr3_vgpr4, 0, 0, 0, 0, implicit $exec, implicit $flat_scr
42 … ; GCN-NEXT: $vgpr1 = FLAT_LOAD_DWORD $vgpr5_vgpr6, 0, 0, 0, 0, implicit $exec, implicit $flat_scr
43 … ; GCN-NEXT: $vgpr2 = FLAT_LOAD_DWORD $vgpr7_vgpr8, 0, 0, 0, 0, implicit $exec, implicit $flat_scr
46 $vgpr0 = FLAT_LOAD_DWORD $vgpr3_vgpr4, 0, 0, 0, 0, implicit $exec, implicit $flat_scr
[all …]
Doptimize-if-exec-masking.mir1 # RUN: llc -march=amdgcn -verify-machineinstrs -run-pass si-optimize-exec-masking -o - %s | FileCh…
129 …HECK: $sgpr0_sgpr1 = S_AND_SAVEEXEC_B64 $vcc, implicit-def $exec, implicit-def $scc, implicit $exec
130 # CHECK-NEXT: $sgpr0_sgpr1 = S_XOR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc
140 $sgpr0_sgpr1 = COPY $exec
141 $vcc = V_CMP_EQ_I32_e64 0, killed $vgpr0, implicit $exec
142 $vgpr0 = V_MOV_B32_e32 4, implicit $exec
145 $exec = S_MOV_B64_term killed $sgpr2_sgpr3
146 SI_MASK_BRANCH %bb.2, implicit $exec
154 $vgpr0 = BUFFER_LOAD_DWORD_OFFSET $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, 0, 0, implicit $exec
159 $exec = S_OR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc
[all …]
Dpromote-constOffset-to-imm.mir17 $vgpr0 = V_MOV_B32_e32 0, implicit $exec
19 %7:vgpr_32 = V_AND_B32_e32 255, %6.sub0, implicit $exec
20 %8:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
22 %10:vgpr_32 = V_LSHLREV_B32_e64 7, %6.sub0, implicit $exec
23 %11:vgpr_32 = V_AND_B32_e32 -32768, killed %10, implicit $exec
26 %14:vgpr_32, %15:sreg_64_xexec = V_ADD_CO_U32_e64 %1.sub0, %11, 0, implicit $exec
28 %17:vgpr_32, dead %18:sreg_64_xexec = V_ADDC_U32_e64 %16, %13, killed %15, 0, implicit $exec
30 %20:vreg_64 = V_LSHLREV_B64 3, %9, implicit $exec
31 %21:vgpr_32, %22:sreg_64_xexec = V_ADD_CO_U32_e64 %14, %20.sub0, 0, implicit $exec
32 … %23:vgpr_32, dead %24:sreg_64_xexec = V_ADDC_U32_e64 %17, %20.sub1, killed %22, 0, implicit $exec
[all …]
Dcollapse-endcf2.mir2 # RUN: llc -march=amdgcn -verify-machineinstrs -run-pass=si-optimize-exec-masking-pre-ra %s -o - | …
13 # Call should be assumed to read exec
29 ; GCN: [[V_CMP_LT_U32_e64_:%[0-9]+]]:sreg_64 = V_CMP_LT_U32_e64 1, [[COPY1]], implicit $exec
30 ; GCN: [[COPY2:%[0-9]+]]:sreg_64 = COPY $exec, implicit-def $exec
32 ; GCN: $exec = S_MOV_B64_term [[S_AND_B64_]]
33 ; GCN: SI_MASK_BRANCH %bb.4, implicit $exec
38 ; GCN: undef %6.sub0:vreg_64 = V_LSHLREV_B32_e32 2, [[COPY1]], implicit $exec
39 ; GCN: %6.sub1:vreg_64 = V_MOV_B32_e32 0, implicit $exec
41 …N: undef %8.sub0:vreg_64, %9:sreg_64_xexec = V_ADD_CO_U32_e64 %5.sub0, %6.sub0, 0, implicit $exec
42 …GCN: %8.sub1:vreg_64, dead %10:sreg_64_xexec = V_ADDC_U32_e64 0, [[COPY3]], %9, 0, implicit $exec
[all …]
Dmerge-image-load-gfx10.mir4 …_V4_V2_gfx10 %5, %3, 15, 1, -1, 0, 0, 0, 0, 0, 0, 0, 0, implicit $exec, implicit $exec :: (derefer…
16 …%5:vreg_64 = BUFFER_LOAD_DWORDX2_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (deref…
17 …fx10 %5:vreg_64, %3:sgpr_256, 1, 1, -1, 0, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable…
18 …x10 %5:vreg_64, %3:sgpr_256, 14, 1, -1, 0, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable…
22 …_V4_V2_gfx10 %5, %3, 15, 1, -1, 0, 0, 0, 0, 0, 0, 0, 0, implicit $exec, implicit $exec :: (derefer…
34 …%5:vreg_64 = BUFFER_LOAD_DWORDX2_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (deref…
35 …fx10 %5:vreg_64, %3:sgpr_256, 8, 1, -1, 0, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable…
36 …fx10 %5:vreg_64, %3:sgpr_256, 7, 1, -1, 0, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable…
41 …_V4_V2_gfx10 %5, %3, 15, 1, -1, 0, 0, 0, 0, 0, 0, 0, 0, implicit $exec, implicit $exec :: (derefer…
53 …%5:vreg_64 = BUFFER_LOAD_DWORDX2_OFFSET %2:sgpr_128, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (deref…
[all …]
Dreduce-saveexec.mir1 # RUN: llc -march=amdgcn -verify-machineinstrs -run-pass si-optimize-exec-masking %s -o - | FileChe…
5 # GCN: $exec = S_AND_B64 $exec, killed $vcc
12 $sgpr0_sgpr1 = S_AND_B64 $exec, killed $vcc, implicit-def $scc
13 $exec = COPY killed $sgpr0_sgpr1
18 # GCN: $exec = S_AND_B64 killed $vcc, $exec
25 $sgpr0_sgpr1 = S_AND_B64 killed $vcc, $exec, implicit-def $scc
26 $exec = COPY killed $sgpr0_sgpr1
31 # GCN: $sgpr0_sgpr1 = S_AND_B64 $exec, killed $vcc
32 # GCN-NEXT: $exec = COPY
38 $sgpr0_sgpr1 = S_AND_B64 $exec, killed $vcc, implicit-def $scc
[all …]
/external/igt-gpu-tools/tests/i915/
Dgem_exec_await.c69 struct drm_i915_gem_exec_object2 exec[2]; in wide() member
73 } *exec; in wide() local
86 exec = calloc(nengine, sizeof(*exec)); in wide()
87 igt_assert(exec); in wide()
94 exec[e].obj = calloc(ring_size, sizeof(*exec[e].obj)); in wide()
95 igt_assert(exec[e].obj); in wide()
97 exec[e].obj[n].handle = gem_create(fd, 4096); in wide()
98 exec[e].obj[n].flags = EXEC_OBJECT_WRITE; in wide()
100 obj[e*ring_size + n].handle = exec[e].obj[n].handle; in wide()
103 exec[e].execbuf.buffers_ptr = to_user_pointer(exec[e].exec); in wide()
[all …]
/external/mesa3d/src/gallium/drivers/vc4/kernel/
Dvc4_gem.c33 vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec) in vc4_get_bcl() argument
35 struct drm_vc4_submit_cl *args = exec->args; in vc4_get_bcl()
71 exec->shader_rec_u = temp + shader_rec_offset; in vc4_get_bcl()
72 exec->uniforms_u = temp + uniforms_offset; in vc4_get_bcl()
73 exec->shader_state = temp + exec_size; in vc4_get_bcl()
74 exec->shader_state_size = args->shader_rec_count; in vc4_get_bcl()
84 ret = copy_from_user(exec->shader_rec_u, in vc4_get_bcl()
92 ret = copy_from_user(exec->uniforms_u, in vc4_get_bcl()
100 exec->exec_bo = drm_gem_cma_create(dev, exec_size); in vc4_get_bcl()
102 if (IS_ERR(exec->exec_bo)) { in vc4_get_bcl()
[all …]
Dvc4_validate.c46 struct vc4_exec_info *exec, \
97 vc4_use_bo(struct vc4_exec_info *exec, uint32_t hindex) in vc4_use_bo() argument
102 if (hindex >= exec->bo_count) { in vc4_use_bo()
104 hindex, exec->bo_count); in vc4_use_bo()
107 obj = exec->bo[hindex]; in vc4_use_bo()
120 vc4_use_handle(struct vc4_exec_info *exec, uint32_t gem_handles_packet_index) in vc4_use_handle() argument
122 return vc4_use_bo(exec, exec->bo_index[gem_handles_packet_index]); in vc4_use_handle()
126 validate_bin_pos(struct vc4_exec_info *exec, void *untrusted, uint32_t pos) in validate_bin_pos() argument
131 return (untrusted - 1 == exec->bin_u + pos); in validate_bin_pos()
150 vc4_check_tex_size(struct vc4_exec_info *exec, struct drm_gem_cma_object *fbo, in vc4_check_tex_size() argument
[all …]

12345678910>>...89