• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=i686-unknown-unknown -mcpu=skx | FileCheck %s --check-prefix=X86
3; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=skx | FileCheck %s --check-prefix=X64
4
5; Check for assert in foldMaskAndShiftToScale due to out of range mask scaling.
6
7@b = common global i8 zeroinitializer, align 1
8@c = common global i8 zeroinitializer, align 1
9@d = common global i64 zeroinitializer, align 8
10@e = common global i64 zeroinitializer, align 8
11
12define void @foo(i64 %x) nounwind {
13; X86-LABEL: foo:
14; X86:       # %bb.0:
15; X86-NEXT:    pushl %eax
16; X86-NEXT:    movl d+4, %eax
17; X86-NEXT:    notl %eax
18; X86-NEXT:    movl d, %ecx
19; X86-NEXT:    notl %ecx
20; X86-NEXT:    andl $-566231040, %ecx # imm = 0xDE400000
21; X86-NEXT:    andl $701685459, %eax # imm = 0x29D2DED3
22; X86-NEXT:    shrdl $21, %eax, %ecx
23; X86-NEXT:    shrl $21, %eax
24; X86-NEXT:    addl $7, %ecx
25; X86-NEXT:    pushl %eax
26; X86-NEXT:    pushl %ecx
27; X86-NEXT:    pushl {{[0-9]+}}(%esp)
28; X86-NEXT:    pushl {{[0-9]+}}(%esp)
29; X86-NEXT:    calll __divdi3
30; X86-NEXT:    addl $16, %esp
31; X86-NEXT:    orl %eax, %edx
32; X86-NEXT:    setne {{[0-9]+}}(%esp)
33; X86-NEXT:    popl %eax
34; X86-NEXT:    retl
35;
36; X64-LABEL: foo:
37; X64:       # %bb.0:
38; X64-NEXT:    movq %rdi, %rax
39; X64-NEXT:    movq {{.*}}(%rip), %rcx
40; X64-NEXT:    movabsq $3013716102212485120, %rdx # imm = 0x29D2DED3DE400000
41; X64-NEXT:    andnq %rdx, %rcx, %rcx
42; X64-NEXT:    shrq $21, %rcx
43; X64-NEXT:    addq $7, %rcx
44; X64-NEXT:    movq %rdi, %rdx
45; X64-NEXT:    orq %rcx, %rdx
46; X64-NEXT:    shrq $32, %rdx
47; X64-NEXT:    je .LBB0_1
48; X64-NEXT:  # %bb.2:
49; X64-NEXT:    cqto
50; X64-NEXT:    idivq %rcx
51; X64-NEXT:    jmp .LBB0_3
52; X64-NEXT:  .LBB0_1:
53; X64-NEXT:    # kill: def $eax killed $eax killed $rax
54; X64-NEXT:    xorl %edx, %edx
55; X64-NEXT:    divl %ecx
56; X64-NEXT:    # kill: def $eax killed $eax def $rax
57; X64-NEXT:  .LBB0_3:
58; X64-NEXT:    testq %rax, %rax
59; X64-NEXT:    setne -{{[0-9]+}}(%rsp)
60; X64-NEXT:    retq
61  %1 = alloca i8, align 1
62  %2 = load i64, i64* @d, align 8
63  %3 = or i64 -3013716102214263007, %2
64  %4 = xor i64 %3, -1
65  %5 = load i64, i64* @e, align 8
66  %6 = load i8, i8* @b, align 1
67  %7 = trunc i8 %6 to i1
68  %8 = zext i1 %7 to i64
69  %9 = xor i64 %5, %8
70  %10 = load i8, i8* @c, align 1
71  %11 = trunc i8 %10 to i1
72  %12 = zext i1 %11 to i32
73  %13 = or i32 551409149, %12
74  %14 = sub nsw i32 %13, 551409131
75  %15 = zext i32 %14 to i64
76  %16 = shl i64 %9, %15
77  %17 = sub nsw i64 %16, 223084523
78  %18 = ashr i64 %4, %17
79  %19 = and i64 %18, 9223372036854775806
80  %20 = add nsw i64 7, %19
81  %21 = sdiv i64 %x, %20
82  %22 = icmp ne i64 %21, 0
83  %23 = zext i1 %22 to i8
84  store i8 %23, i8* %1, align 1
85  ret void
86}
87