• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: llc < %s -march=mips -mcpu=mips2 -relocation-model=pic | FileCheck %s \
2; RUN:    -check-prefixes=ALL,GP32,M2
3; RUN: llc < %s -march=mips -mcpu=mips32 -relocation-model=pic | FileCheck %s \
4; RUN:    -check-prefixes=ALL,GP32,32R1-R5
5; RUN: llc < %s -march=mips -mcpu=mips32r2 -relocation-model=pic | FileCheck %s \
6; RUN:    -check-prefixes=ALL,GP32,32R1-R5
7; RUN: llc < %s -march=mips -mcpu=mips32r3 -relocation-model=pic | FileCheck %s \
8; RUN:    -check-prefixes=ALL,GP32,32R1-R5
9; RUN: llc < %s -march=mips -mcpu=mips32r5 -relocation-model=pic | FileCheck %s \
10; RUN:    -check-prefixes=ALL,GP32,32R1-R5
11; RUN: llc < %s -march=mips -mcpu=mips32r6 -relocation-model=pic | FileCheck %s \
12; RUN:    -check-prefixes=ALL,GP32,32R6
13; RUN: llc < %s -march=mips64 -mcpu=mips3 -relocation-model=pic | FileCheck %s \
14; RUN:    -check-prefixes=ALL,GP64,M3
15; RUN: llc < %s -march=mips64 -mcpu=mips4 -relocation-model=pic | FileCheck %s \
16; RUN:    -check-prefixes=ALL,GP64,GP64-NOT-R6
17; RUN: llc < %s -march=mips64 -mcpu=mips64 -relocation-model=pic | FileCheck %s \
18; RUN:    -check-prefixes=ALL,GP64,GP64-NOT-R6
19; RUN: llc < %s -march=mips64 -mcpu=mips64r2 -relocation-model=pic | FileCheck %s \
20; RUN:    -check-prefixes=ALL,GP64,GP64-NOT-R6
21; RUN: llc < %s -march=mips64 -mcpu=mips64r3 -relocation-model=pic | FileCheck %s \
22; RUN:    -check-prefixes=ALL,GP64,GP64-NOT-R6
23; RUN: llc < %s -march=mips64 -mcpu=mips64r5 -relocation-model=pic | FileCheck %s \
24; RUN:    -check-prefixes=ALL,GP64,GP64-NOT-R6
25; RUN: llc < %s -march=mips64 -mcpu=mips64r6 -relocation-model=pic | FileCheck %s \
26; RUN:    -check-prefixes=ALL,GP64,64R6
27; RUN: llc < %s -march=mips -mcpu=mips32r3 -mattr=+micromips -relocation-model=pic | FileCheck %s \
28; RUN:    -check-prefixes=ALL,MM,MMR3
29; RUN: llc < %s -march=mips -mcpu=mips32r6 -mattr=+micromips -relocation-model=pic | FileCheck %s \
30; RUN:    -check-prefixes=ALL,MM,MMR6
31
32define signext i1 @ashr_i1(i1 signext %a, i1 signext %b) {
33entry:
34; ALL-LABEL: ashr_i1:
35
36  ; ALL:        move    $2, $4
37
38  %r = ashr i1 %a, %b
39  ret i1 %r
40}
41
42define signext i8 @ashr_i8(i8 signext %a, i8 signext %b) {
43entry:
44; ALL-LABEL: ashr_i8:
45
46  ; FIXME: The andi instruction is redundant.
47  ; GP32:       andi    $[[T0:[0-9]+]], $5, 255
48  ; GP64:       andi    $[[T0:[0-9]+]], $5, 255
49  ; MM:         andi16  $[[T0:[0-9]+]], $5, 255
50  ; ALL:        srav    $2, $4, $[[T0]]
51
52  %r = ashr i8 %a, %b
53  ret i8 %r
54}
55
56define signext i16 @ashr_i16(i16 signext %a, i16 signext %b) {
57entry:
58; ALL-LABEL: ashr_i16:
59
60  ; FIXME: The andi instruction is redundant.
61  ; GP32:       andi    $[[T0:[0-9]+]], $5, 65535
62  ; GP64:       andi    $[[T0:[0-9]+]], $5, 65535
63  ; MM:         andi16  $[[T0:[0-9]+]], $5, 65535
64  ; ALL:        srav    $2, $4, $[[T0]]
65
66  %r = ashr i16 %a, %b
67  ret i16 %r
68}
69
70define signext i32 @ashr_i32(i32 signext %a, i32 signext %b) {
71entry:
72; ALL-LABEL: ashr_i32:
73
74  ; ALL:        srav    $2, $4, $5
75
76  %r = ashr i32 %a, %b
77  ret i32 %r
78}
79
80define signext i64 @ashr_i64(i64 signext %a, i64 signext %b) {
81entry:
82; ALL-LABEL: ashr_i64:
83
84  ; M2:         srav      $[[T0:[0-9]+]], $4, $7
85  ; M2:         andi      $[[T1:[0-9]+]], $7, 32
86  ; M2:         bnez      $[[T1]], $[[BB0:BB[0-9_]+]]
87  ; M2:         move      $3, $[[T0]]
88  ; M2:         srlv      $[[T2:[0-9]+]], $5, $7
89  ; M2:         not       $[[T3:[0-9]+]], $7
90  ; M2:         sll       $[[T4:[0-9]+]], $4, 1
91  ; M2:         sllv      $[[T5:[0-9]+]], $[[T4]], $[[T3]]
92  ; M2:         or        $3, $[[T3]], $[[T2]]
93  ; M2:         $[[BB0]]:
94  ; M2:         beqz      $[[T1]], $[[BB1:BB[0-9_]+]]
95  ; M2:         nop
96  ; M2:         sra       $2, $4, 31
97  ; M2:         $[[BB1]]:
98  ; M2:         jr        $ra
99  ; M2:         nop
100
101  ; 32R1-R5:    srlv      $[[T0:[0-9]+]], $5, $7
102  ; 32R1-R5:    not       $[[T1:[0-9]+]], $7
103  ; 32R1-R5:    sll       $[[T2:[0-9]+]], $4, 1
104  ; 32R1-R5:    sllv      $[[T3:[0-9]+]], $[[T2]], $[[T1]]
105  ; 32R1-R5:    or        $3, $[[T3]], $[[T0]]
106  ; 32R1-R5:    srav      $[[T4:[0-9]+]], $4, $7
107  ; 32R1-R5:    andi      $[[T5:[0-9]+]], $7, 32
108  ; 32R1-R5:    movn      $3, $[[T4]], $[[T5]]
109  ; 32R1-R5:    sra       $4, $4, 31
110  ; 32R1-R5:    jr        $ra
111  ; 32R1-R5:    movn      $2, $4, $[[T5]]
112
113  ; 32R6:       srav      $[[T0:[0-9]+]], $4, $7
114  ; 32R6:       andi      $[[T1:[0-9]+]], $7, 32
115  ; 32R6:       seleqz    $[[T2:[0-9]+]], $[[T0]], $[[T1]]
116  ; 32R6:       sra       $[[T3:[0-9]+]], $4, 31
117  ; 32R6:       selnez    $[[T4:[0-9]+]], $[[T3]], $[[T1]]
118  ; 32R6:       or        $[[T5:[0-9]+]], $[[T4]], $[[T2]]
119  ; 32R6:       srlv      $[[T6:[0-9]+]], $5, $7
120  ; 32R6:       not       $[[T7:[0-9]+]], $7
121  ; 32R6:       sll       $[[T8:[0-9]+]], $4, 1
122  ; 32R6:       sllv      $[[T9:[0-9]+]], $[[T8]], $[[T7]]
123  ; 32R6:       or        $[[T10:[0-9]+]], $[[T9]], $[[T6]]
124  ; 32R6:       seleqz    $[[T11:[0-9]+]], $[[T10]], $[[T1]]
125  ; 32R6:       selnez    $[[T12:[0-9]+]], $[[T0]], $[[T1]]
126  ; 32R6:       jr        $ra
127  ; 32R6:       or        $3, $[[T0]], $[[T11]]
128
129  ; GP64:       dsrav     $2, $4, $5
130
131  ; MMR3:       srlv      $[[T0:[0-9]+]], $5, $7
132  ; MMR3:       sll16     $[[T1:[0-9]+]], $4, 1
133  ; MMR3:       not16     $[[T2:[0-9]+]], $7
134  ; MMR3:       sllv      $[[T3:[0-9]+]], $[[T1]], $[[T2]]
135  ; MMR3:       or16      $[[T4:[0-9]+]], $[[T0]]
136  ; MMR3:       srav      $[[T5:[0-9]+]], $4, $7
137  ; MMR3:       andi16    $[[T6:[0-9]+]], $7, 32
138  ; MMR3:       movn      $[[T7:[0-9]+]], $[[T5]], $[[T6]]
139  ; MMR3:       sra       $[[T8:[0-9]+]], $4, 31
140  ; MMR3:       movn      $2, $[[T8]], $[[T6]]
141
142  ; MMR6:       srav      $[[T0:[0-9]+]], $4, $7
143  ; MMR6:       andi16    $[[T1:[0-9]+]], $7, 32
144  ; MMR6:       seleqz    $[[T2:[0-9]+]], $[[T0]], $[[T1]]
145  ; MMR6:       sra       $[[T3:[0-9]+]], $4, 31
146  ; MMR6:       selnez    $[[T4:[0-9]+]], $[[T3]], $[[T1]]
147  ; MMR6:       or        $[[T5:[0-9]+]], $[[T4]], $[[T2]]
148  ; MMR6:       srlv      $[[T6:[0-9]+]], $5, $7
149  ; MMR6:       sll16     $[[T7:[0-9]+]], $4, 1
150  ; MMR6:       not16     $[[T8:[0-9]+]], $7
151  ; MMR6:       sllv      $[[T9:[0-9]+]], $[[T7]], $[[T8]]
152  ; MMR6:       or16      $[[T10:[0-9]+]], $[[T6]]
153  ; MMR6:       seleqz    $[[T11:[0-9]+]], $[[T10]], $[[T1]]
154  ; MMR6:       selnez    $[[T12:[0-9]+]], $[[T0]], $[[T1]]
155  ; MMR6:       or        $3, $[[T12]], $[[T11]]
156
157  %r = ashr i64 %a, %b
158  ret i64 %r
159}
160
161define signext i128 @ashr_i128(i128 signext %a, i128 signext %b) {
162entry:
163; ALL-LABEL: ashr_i128:
164
165  ; GP32:           lw        $25, %call16(__ashrti3)($gp)
166
167  ; M3:             sll       $[[T0:[0-9]+]], $7, 0
168  ; M3:             dsrav     $[[T1:[0-9]+]], $4, $7
169  ; M3:             andi      $[[T2:[0-9]+]], $[[T0]], 64
170  ; M3:             bnez      $[[T3:[0-9]+]], $[[BB0:BB[0-9_]+]]
171  ; M3:             move      $3, $[[T1]]
172  ; M3:             dsrlv     $[[T4:[0-9]+]], $5, $7
173  ; M3:             dsll      $[[T5:[0-9]+]], $4, 1
174  ; M3:             not       $[[T6:[0-9]+]], $[[T0]]
175  ; M3:             dsllv     $[[T7:[0-9]+]], $[[T5]], $[[T6]]
176  ; M3:             or        $3, $[[T7]], $[[T4]]
177  ; M3:             $[[BB0]]:
178  ; M3:             beqz      $[[T3]], $[[BB1:BB[0-9_]+]]
179  ; M3:             nop
180  ; M3:             dsra      $2, $4, 63
181  ; M3:             $[[BB1]]:
182  ; M3:             jr        $ra
183  ; M3:             nop
184
185  ; GP64-NOT-R6:    dsrlv     $[[T0:[0-9]+]], $5, $7
186  ; GP64-NOT-R6:    dsll      $[[T1:[0-9]+]], $4, 1
187  ; GP64-NOT-R6:    sll       $[[T2:[0-9]+]], $7, 0
188  ; GP64-NOT-R6:    not       $[[T3:[0-9]+]], $[[T2]]
189  ; GP64-NOT-R6:    dsllv     $[[T4:[0-9]+]], $[[T1]], $[[T3]]
190  ; GP64-NOT-R6:    or        $3, $[[T4]], $[[T0]]
191  ; GP64-NOT-R6:    dsrav     $2, $4, $7
192  ; GP64-NOT-R6:    andi      $[[T5:[0-9]+]], $[[T2]], 64
193  ; GP64-NOT-R6:    movn      $3, $2, $[[T5]]
194  ; GP64-NOT-R6:    dsra      $[[T6:[0-9]+]], $4, 63
195  ; GP64-NOT-R6:    jr        $ra
196  ; GP64-NOT-R6:    movn      $2, $[[T6]], $[[T5]]
197
198  ; 64R6:           dsrav     $[[T0:[0-9]+]], $4, $7
199  ; 64R6:           sll       $[[T1:[0-9]+]], $7, 0
200  ; 64R6:           andi      $[[T2:[0-9]+]], $[[T1]], 64
201  ; 64R6:           sll       $[[T3:[0-9]+]], $[[T2]], 0
202  ; 64R6:           seleqz    $[[T4:[0-9]+]], $[[T0]], $[[T3]]
203  ; 64R6:           dsra      $[[T5:[0-9]+]], $4, 63
204  ; 64R6:           selnez    $[[T6:[0-9]+]], $[[T5]], $[[T3]]
205  ; 64R6:           or        $2, $[[T6]], $[[T4]]
206  ; 64R6:           dsrlv     $[[T7:[0-9]+]], $5, $7
207  ; 64R6:           dsll      $[[T8:[0-9]+]], $4, 1
208  ; 64R6:           not       $[[T9:[0-9]+]], $[[T1]]
209  ; 64R6:           dsllv     $[[T10:[0-9]+]], $[[T8]], $[[T9]]
210  ; 64R6:           or        $[[T11:[0-9]+]], $[[T10]], $[[T7]]
211  ; 64R6:           seleqz    $[[T12:[0-9]+]], $[[T11]], $[[T3]]
212  ; 64R6:           selnez    $[[T13:[0-9]+]], $[[T0]], $[[T3]]
213  ; 64R6:           jr        $ra
214  ; 64R6:           or        $3, $[[T13]], $[[T12]]
215
216  ; MM:             lw        $25, %call16(__ashrti3)($2)
217
218  %r = ashr i128 %a, %b
219  ret i128 %r
220}
221