• Home
  • Raw
  • Download

Lines Matching refs:COPY8

17   ; GFX9:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
21 …_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.atomic.swap.1d), [[COPY8]](s32), [[TRUNC]](…
35 ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
39 …_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.atomic.swap.1d), [[COPY8]](s32), [[TRUNC]](…
60 ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
64 …G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.atomic.add.1d), [[COPY8]](s32), [[TRUNC]](…
78 ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
82 …G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.atomic.add.1d), [[COPY8]](s32), [[TRUNC]](…
103 ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
107 …G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.atomic.sub.1d), [[COPY8]](s32), [[TRUNC]](…
121 ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
125 …G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.atomic.sub.1d), [[COPY8]](s32), [[TRUNC]](…
146 ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
150 …_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.atomic.smin.1d), [[COPY8]](s32), [[TRUNC]](…
164 ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
168 …_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.atomic.smin.1d), [[COPY8]](s32), [[TRUNC]](…
190 ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
194 …_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.atomic.umin.1d), [[COPY8]](s32), [[TRUNC]](…
208 ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
212 …_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.atomic.umin.1d), [[COPY8]](s32), [[TRUNC]](…
233 ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
237 …_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.atomic.smax.1d), [[COPY8]](s32), [[TRUNC]](…
251 ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
255 …_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.atomic.smax.1d), [[COPY8]](s32), [[TRUNC]](…
276 ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
280 …_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.atomic.umax.1d), [[COPY8]](s32), [[TRUNC]](…
294 ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
298 …_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.atomic.umax.1d), [[COPY8]](s32), [[TRUNC]](…
319 ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
323 …G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.atomic.and.1d), [[COPY8]](s32), [[TRUNC]](…
337 ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
341 …G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.atomic.and.1d), [[COPY8]](s32), [[TRUNC]](…
362 ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
366 … G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.atomic.or.1d), [[COPY8]](s32), [[TRUNC]](…
380 ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
384 … G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.atomic.or.1d), [[COPY8]](s32), [[TRUNC]](…
405 ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
409 …G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.atomic.xor.1d), [[COPY8]](s32), [[TRUNC]](…
423 ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
427 …G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.atomic.xor.1d), [[COPY8]](s32), [[TRUNC]](…
448 ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
452 …G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.atomic.inc.1d), [[COPY8]](s32), [[TRUNC]](…
466 ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
470 …G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.atomic.inc.1d), [[COPY8]](s32), [[TRUNC]](…
491 ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
495 …G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.atomic.dec.1d), [[COPY8]](s32), [[TRUNC]](…
509 ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
513 …G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.atomic.dec.1d), [[COPY8]](s32), [[TRUNC]](…
534 ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
539 ; GFX9: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32)
554 ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
559 …; GFX10NSA: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s3…
581 ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
588 …G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.atomic.add.2d), [[COPY8]](s32), [[BUILD_VE…
602 ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
609 …G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.atomic.add.2d), [[COPY8]](s32), [[BUILD_VE…
630 ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
642 …G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.atomic.add.3d), [[COPY8]](s32), [[CONCAT_V…
656 ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
668 …G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.atomic.add.3d), [[COPY8]](s32), [[CONCAT_V…
689 ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
701 …AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.atomic.add.cube), [[COPY8]](s32), [[CONCAT_V…
715 ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
727 …AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.atomic.add.cube), [[COPY8]](s32), [[CONCAT_V…
748 ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
755 …GPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.atomic.add.1darray), [[COPY8]](s32), [[BUILD_VE…
769 ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
776 …GPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.atomic.add.1darray), [[COPY8]](s32), [[BUILD_VE…
797 ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
809 …GPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.atomic.add.2darray), [[COPY8]](s32), [[CONCAT_V…
823 ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
835 …GPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.atomic.add.2darray), [[COPY8]](s32), [[CONCAT_V…
856 ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
868 …DGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.atomic.add.2dmsaa), [[COPY8]](s32), [[CONCAT_V…
882 ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
894 …DGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.atomic.add.2dmsaa), [[COPY8]](s32), [[CONCAT_V…
915 ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
928 …INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.atomic.add.2darraymsaa), [[COPY8]](s32), [[CONCAT_V…
942 ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
955 …INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.atomic.add.2darraymsaa), [[COPY8]](s32), [[CONCAT_V…
976 ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
980 …G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.atomic.add.1d), [[COPY8]](s32), [[TRUNC]](…
994 ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
998 …G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.atomic.add.1d), [[COPY8]](s32), [[TRUNC]](…
1019 ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
1024 ; GFX9: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32)
1042 ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
1047 …; GFX10NSA: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s3…
1072 ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
1078 ; GFX9: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32)
1100 ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
1106 …; GFX10NSA: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s3…
1135 ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
1142 ; GFX9: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32)
1164 ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
1171 …; GFX10NSA: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s3…