• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=mips -mcpu=mips2 -relocation-model=pic \
3; RUN:   -mips-jalr-reloc=false | FileCheck %s -check-prefixes=GP32,GP32R0R2
4; RUN: llc < %s -mtriple=mips -mcpu=mips32 -relocation-model=pic \
5; RUN:   -mips-jalr-reloc=false | FileCheck %s -check-prefixes=GP32,GP32R0R2
6; RUN: llc < %s -mtriple=mips -mcpu=mips32r2 -relocation-model=pic \
7; RUN:   -mips-jalr-reloc=false | FileCheck %s -check-prefixes=GP32,GP32R2R5
8; RUN: llc < %s -mtriple=mips -mcpu=mips32r3 -relocation-model=pic \
9; RUN:   -mips-jalr-reloc=false | FileCheck %s -check-prefixes=GP32,GP32R2R5
10; RUN: llc < %s -mtriple=mips -mcpu=mips32r5 -relocation-model=pic \
11; RUN:   -mips-jalr-reloc=false | FileCheck %s -check-prefixes=GP32,GP32R2R5
12; RUN: llc < %s -mtriple=mips -mcpu=mips32r6 -relocation-model=pic \
13; RUN:   -mips-jalr-reloc=false | FileCheck %s -check-prefix=GP32R6
14
15; RUN: llc < %s -mtriple=mips64 -mcpu=mips3 -relocation-model=pic \
16; RUN:   -mips-jalr-reloc=false | FileCheck %s -check-prefixes=GP64,GP64R0R1
17; RUN: llc < %s -mtriple=mips64 -mcpu=mips4 -relocation-model=pic \
18; RUN:   -mips-jalr-reloc=false | FileCheck %s -check-prefixes=GP64,GP64R0R1
19; RUN: llc < %s -mtriple=mips64 -mcpu=mips64 -relocation-model=pic \
20; RUN:   -mips-jalr-reloc=false | FileCheck %s -check-prefixes=GP64,GP64R0R1
21; RUN: llc < %s -mtriple=mips64 -mcpu=mips64r2 -relocation-model=pic \
22; RUN:   -mips-jalr-reloc=false | FileCheck %s -check-prefixes=GP64,GP64R2R5
23; RUN: llc < %s -mtriple=mips64 -mcpu=mips64r3 -relocation-model=pic \
24; RUN:   -mips-jalr-reloc=false | FileCheck %s -check-prefixes=GP64,GP64R2R5
25; RUN: llc < %s -mtriple=mips64 -mcpu=mips64r5 -relocation-model=pic \
26; RUN:   -mips-jalr-reloc=false | FileCheck %s -check-prefixes=GP64,GP64R2R5
27; RUN: llc < %s -mtriple=mips64 -mcpu=mips64r6 -relocation-model=pic \
28; RUN:   -mips-jalr-reloc=false | FileCheck %s -check-prefix=GP64R6
29
30; RUN: llc < %s -mtriple=mips -mcpu=mips32r3 -mattr=+micromips \
31; RUN:   -relocation-model=pic -mips-jalr-reloc=false | \
32; RUN:   FileCheck %s -check-prefix=MMR3
33; RUN: llc < %s -mtriple=mips -mcpu=mips32r6 -mattr=+micromips \
34; RUN:   -relocation-model=pic -mips-jalr-reloc=false | \
35; RUN:   FileCheck %s -check-prefix=MMR6
36
37define signext i1 @sdiv_i1(i1 signext %a, i1 signext %b) {
38; GP32-LABEL: sdiv_i1:
39; GP32:       # %bb.0: # %entry
40; GP32-NEXT:    jr $ra
41; GP32-NEXT:    move $2, $4
42;
43; GP32R6-LABEL: sdiv_i1:
44; GP32R6:       # %bb.0: # %entry
45; GP32R6-NEXT:    jr $ra
46; GP32R6-NEXT:    move $2, $4
47;
48; GP64-LABEL: sdiv_i1:
49; GP64:       # %bb.0: # %entry
50; GP64-NEXT:    jr $ra
51; GP64-NEXT:    move $2, $4
52;
53; GP64R6-LABEL: sdiv_i1:
54; GP64R6:       # %bb.0: # %entry
55; GP64R6-NEXT:    jr $ra
56; GP64R6-NEXT:    move $2, $4
57;
58; MMR3-LABEL: sdiv_i1:
59; MMR3:       # %bb.0: # %entry
60; MMR3-NEXT:    move $2, $4
61; MMR3-NEXT:    jrc $ra
62;
63; MMR6-LABEL: sdiv_i1:
64; MMR6:       # %bb.0: # %entry
65; MMR6-NEXT:    move $2, $4
66; MMR6-NEXT:    jrc $ra
67entry:
68  %r = sdiv i1 %a, %b
69  ret i1 %r
70}
71
72define signext i8 @sdiv_i8(i8 signext %a, i8 signext %b) {
73; GP32R0R2-LABEL: sdiv_i8:
74; GP32R0R2:       # %bb.0: # %entry
75; GP32R0R2-NEXT:    div $zero, $4, $5
76; GP32R0R2-NEXT:    teq $5, $zero, 7
77; GP32R0R2-NEXT:    mflo $1
78; GP32R0R2-NEXT:    sll $1, $1, 24
79; GP32R0R2-NEXT:    jr $ra
80; GP32R0R2-NEXT:    sra $2, $1, 24
81;
82; GP32R2R5-LABEL: sdiv_i8:
83; GP32R2R5:       # %bb.0: # %entry
84; GP32R2R5-NEXT:    div $zero, $4, $5
85; GP32R2R5-NEXT:    teq $5, $zero, 7
86; GP32R2R5-NEXT:    mflo $1
87; GP32R2R5-NEXT:    jr $ra
88; GP32R2R5-NEXT:    seb $2, $1
89;
90; GP32R6-LABEL: sdiv_i8:
91; GP32R6:       # %bb.0: # %entry
92; GP32R6-NEXT:    div $1, $4, $5
93; GP32R6-NEXT:    teq $5, $zero, 7
94; GP32R6-NEXT:    jr $ra
95; GP32R6-NEXT:    seb $2, $1
96;
97; GP64R0R1-LABEL: sdiv_i8:
98; GP64R0R1:       # %bb.0: # %entry
99; GP64R0R1-NEXT:    div $zero, $4, $5
100; GP64R0R1-NEXT:    teq $5, $zero, 7
101; GP64R0R1-NEXT:    mflo $1
102; GP64R0R1-NEXT:    sll $1, $1, 24
103; GP64R0R1-NEXT:    jr $ra
104; GP64R0R1-NEXT:    sra $2, $1, 24
105;
106; GP64R2R5-LABEL: sdiv_i8:
107; GP64R2R5:       # %bb.0: # %entry
108; GP64R2R5-NEXT:    div $zero, $4, $5
109; GP64R2R5-NEXT:    teq $5, $zero, 7
110; GP64R2R5-NEXT:    mflo $1
111; GP64R2R5-NEXT:    jr $ra
112; GP64R2R5-NEXT:    seb $2, $1
113;
114; GP64R6-LABEL: sdiv_i8:
115; GP64R6:       # %bb.0: # %entry
116; GP64R6-NEXT:    div $1, $4, $5
117; GP64R6-NEXT:    teq $5, $zero, 7
118; GP64R6-NEXT:    jr $ra
119; GP64R6-NEXT:    seb $2, $1
120;
121; MMR3-LABEL: sdiv_i8:
122; MMR3:       # %bb.0: # %entry
123; MMR3-NEXT:    div $zero, $4, $5
124; MMR3-NEXT:    teq $5, $zero, 7
125; MMR3-NEXT:    mflo16 $1
126; MMR3-NEXT:    jr $ra
127; MMR3-NEXT:    seb $2, $1
128;
129; MMR6-LABEL: sdiv_i8:
130; MMR6:       # %bb.0: # %entry
131; MMR6-NEXT:    div $1, $4, $5
132; MMR6-NEXT:    teq $5, $zero, 7
133; MMR6-NEXT:    seb $2, $1
134; MMR6-NEXT:    jrc $ra
135entry:
136  %r = sdiv i8 %a, %b
137  ret i8 %r
138}
139
140define signext i16 @sdiv_i16(i16 signext %a, i16 signext %b) {
141; GP32R0R2-LABEL: sdiv_i16:
142; GP32R0R2:       # %bb.0: # %entry
143; GP32R0R2-NEXT:    div $zero, $4, $5
144; GP32R0R2-NEXT:    teq $5, $zero, 7
145; GP32R0R2-NEXT:    mflo $1
146; GP32R0R2-NEXT:    sll $1, $1, 16
147; GP32R0R2-NEXT:    jr $ra
148; GP32R0R2-NEXT:    sra $2, $1, 16
149;
150; GP32R2R5-LABEL: sdiv_i16:
151; GP32R2R5:       # %bb.0: # %entry
152; GP32R2R5-NEXT:    div $zero, $4, $5
153; GP32R2R5-NEXT:    teq $5, $zero, 7
154; GP32R2R5-NEXT:    mflo $1
155; GP32R2R5-NEXT:    jr $ra
156; GP32R2R5-NEXT:    seh $2, $1
157;
158; GP32R6-LABEL: sdiv_i16:
159; GP32R6:       # %bb.0: # %entry
160; GP32R6-NEXT:    div $1, $4, $5
161; GP32R6-NEXT:    teq $5, $zero, 7
162; GP32R6-NEXT:    jr $ra
163; GP32R6-NEXT:    seh $2, $1
164;
165; GP64R0R1-LABEL: sdiv_i16:
166; GP64R0R1:       # %bb.0: # %entry
167; GP64R0R1-NEXT:    div $zero, $4, $5
168; GP64R0R1-NEXT:    teq $5, $zero, 7
169; GP64R0R1-NEXT:    mflo $1
170; GP64R0R1-NEXT:    sll $1, $1, 16
171; GP64R0R1-NEXT:    jr $ra
172; GP64R0R1-NEXT:    sra $2, $1, 16
173;
174; GP64R2R5-LABEL: sdiv_i16:
175; GP64R2R5:       # %bb.0: # %entry
176; GP64R2R5-NEXT:    div $zero, $4, $5
177; GP64R2R5-NEXT:    teq $5, $zero, 7
178; GP64R2R5-NEXT:    mflo $1
179; GP64R2R5-NEXT:    jr $ra
180; GP64R2R5-NEXT:    seh $2, $1
181;
182; GP64R6-LABEL: sdiv_i16:
183; GP64R6:       # %bb.0: # %entry
184; GP64R6-NEXT:    div $1, $4, $5
185; GP64R6-NEXT:    teq $5, $zero, 7
186; GP64R6-NEXT:    jr $ra
187; GP64R6-NEXT:    seh $2, $1
188;
189; MMR3-LABEL: sdiv_i16:
190; MMR3:       # %bb.0: # %entry
191; MMR3-NEXT:    div $zero, $4, $5
192; MMR3-NEXT:    teq $5, $zero, 7
193; MMR3-NEXT:    mflo16 $1
194; MMR3-NEXT:    jr $ra
195; MMR3-NEXT:    seh $2, $1
196;
197; MMR6-LABEL: sdiv_i16:
198; MMR6:       # %bb.0: # %entry
199; MMR6-NEXT:    div $1, $4, $5
200; MMR6-NEXT:    teq $5, $zero, 7
201; MMR6-NEXT:    seh $2, $1
202; MMR6-NEXT:    jrc $ra
203entry:
204  %r = sdiv i16 %a, %b
205  ret i16 %r
206}
207
208define signext i32 @sdiv_i32(i32 signext %a, i32 signext %b) {
209; GP32-LABEL: sdiv_i32:
210; GP32:       # %bb.0: # %entry
211; GP32-NEXT:    div $zero, $4, $5
212; GP32-NEXT:    teq $5, $zero, 7
213; GP32-NEXT:    jr $ra
214; GP32-NEXT:    mflo $2
215;
216; GP32R6-LABEL: sdiv_i32:
217; GP32R6:       # %bb.0: # %entry
218; GP32R6-NEXT:    div $2, $4, $5
219; GP32R6-NEXT:    teq $5, $zero, 7
220; GP32R6-NEXT:    jrc $ra
221;
222; GP64-LABEL: sdiv_i32:
223; GP64:       # %bb.0: # %entry
224; GP64-NEXT:    div $zero, $4, $5
225; GP64-NEXT:    teq $5, $zero, 7
226; GP64-NEXT:    jr $ra
227; GP64-NEXT:    mflo $2
228;
229; GP64R6-LABEL: sdiv_i32:
230; GP64R6:       # %bb.0: # %entry
231; GP64R6-NEXT:    div $2, $4, $5
232; GP64R6-NEXT:    teq $5, $zero, 7
233; GP64R6-NEXT:    jrc $ra
234;
235; MMR3-LABEL: sdiv_i32:
236; MMR3:       # %bb.0: # %entry
237; MMR3-NEXT:    div $zero, $4, $5
238; MMR3-NEXT:    teq $5, $zero, 7
239; MMR3-NEXT:    mflo16 $2
240; MMR3-NEXT:    jrc $ra
241;
242; MMR6-LABEL: sdiv_i32:
243; MMR6:       # %bb.0: # %entry
244; MMR6-NEXT:    div $2, $4, $5
245; MMR6-NEXT:    teq $5, $zero, 7
246; MMR6-NEXT:    jrc $ra
247entry:
248  %r = sdiv i32 %a, %b
249  ret i32 %r
250}
251
252define signext i64 @sdiv_i64(i64 signext %a, i64 signext %b) {
253; GP32-LABEL: sdiv_i64:
254; GP32:       # %bb.0: # %entry
255; GP32-NEXT:    lui $2, %hi(_gp_disp)
256; GP32-NEXT:    addiu $2, $2, %lo(_gp_disp)
257; GP32-NEXT:    addiu $sp, $sp, -24
258; GP32-NEXT:    .cfi_def_cfa_offset 24
259; GP32-NEXT:    sw $ra, 20($sp) # 4-byte Folded Spill
260; GP32-NEXT:    .cfi_offset 31, -4
261; GP32-NEXT:    addu $gp, $2, $25
262; GP32-NEXT:    lw $25, %call16(__divdi3)($gp)
263; GP32-NEXT:    jalr $25
264; GP32-NEXT:    nop
265; GP32-NEXT:    lw $ra, 20($sp) # 4-byte Folded Reload
266; GP32-NEXT:    jr $ra
267; GP32-NEXT:    addiu $sp, $sp, 24
268;
269; GP32R6-LABEL: sdiv_i64:
270; GP32R6:       # %bb.0: # %entry
271; GP32R6-NEXT:    lui $2, %hi(_gp_disp)
272; GP32R6-NEXT:    addiu $2, $2, %lo(_gp_disp)
273; GP32R6-NEXT:    addiu $sp, $sp, -24
274; GP32R6-NEXT:    .cfi_def_cfa_offset 24
275; GP32R6-NEXT:    sw $ra, 20($sp) # 4-byte Folded Spill
276; GP32R6-NEXT:    .cfi_offset 31, -4
277; GP32R6-NEXT:    addu $gp, $2, $25
278; GP32R6-NEXT:    lw $25, %call16(__divdi3)($gp)
279; GP32R6-NEXT:    jalrc $25
280; GP32R6-NEXT:    lw $ra, 20($sp) # 4-byte Folded Reload
281; GP32R6-NEXT:    jr $ra
282; GP32R6-NEXT:    addiu $sp, $sp, 24
283;
284; GP64-LABEL: sdiv_i64:
285; GP64:       # %bb.0: # %entry
286; GP64-NEXT:    ddiv $zero, $4, $5
287; GP64-NEXT:    teq $5, $zero, 7
288; GP64-NEXT:    jr $ra
289; GP64-NEXT:    mflo $2
290;
291; GP64R6-LABEL: sdiv_i64:
292; GP64R6:       # %bb.0: # %entry
293; GP64R6-NEXT:    ddiv $2, $4, $5
294; GP64R6-NEXT:    teq $5, $zero, 7
295; GP64R6-NEXT:    jrc $ra
296;
297; MMR3-LABEL: sdiv_i64:
298; MMR3:       # %bb.0: # %entry
299; MMR3-NEXT:    lui $2, %hi(_gp_disp)
300; MMR3-NEXT:    addiu $2, $2, %lo(_gp_disp)
301; MMR3-NEXT:    addiusp -24
302; MMR3-NEXT:    .cfi_def_cfa_offset 24
303; MMR3-NEXT:    sw $ra, 20($sp) # 4-byte Folded Spill
304; MMR3-NEXT:    .cfi_offset 31, -4
305; MMR3-NEXT:    addu $2, $2, $25
306; MMR3-NEXT:    lw $25, %call16(__divdi3)($2)
307; MMR3-NEXT:    move $gp, $2
308; MMR3-NEXT:    jalr $25
309; MMR3-NEXT:    nop
310; MMR3-NEXT:    lw $ra, 20($sp) # 4-byte Folded Reload
311; MMR3-NEXT:    addiusp 24
312; MMR3-NEXT:    jrc $ra
313;
314; MMR6-LABEL: sdiv_i64:
315; MMR6:       # %bb.0: # %entry
316; MMR6-NEXT:    lui $2, %hi(_gp_disp)
317; MMR6-NEXT:    addiu $2, $2, %lo(_gp_disp)
318; MMR6-NEXT:    addiu $sp, $sp, -24
319; MMR6-NEXT:    .cfi_def_cfa_offset 24
320; MMR6-NEXT:    sw $ra, 20($sp) # 4-byte Folded Spill
321; MMR6-NEXT:    .cfi_offset 31, -4
322; MMR6-NEXT:    addu $2, $2, $25
323; MMR6-NEXT:    lw $25, %call16(__divdi3)($2)
324; MMR6-NEXT:    move $gp, $2
325; MMR6-NEXT:    jalr $25
326; MMR6-NEXT:    lw $ra, 20($sp) # 4-byte Folded Reload
327; MMR6-NEXT:    addiu $sp, $sp, 24
328; MMR6-NEXT:    jrc $ra
329entry:
330  %r = sdiv i64 %a, %b
331  ret i64 %r
332}
333
334define signext i128 @sdiv_i128(i128 signext %a, i128 signext %b) {
335; GP32-LABEL: sdiv_i128:
336; GP32:       # %bb.0: # %entry
337; GP32-NEXT:    lui $2, %hi(_gp_disp)
338; GP32-NEXT:    addiu $2, $2, %lo(_gp_disp)
339; GP32-NEXT:    addiu $sp, $sp, -40
340; GP32-NEXT:    .cfi_def_cfa_offset 40
341; GP32-NEXT:    sw $ra, 36($sp) # 4-byte Folded Spill
342; GP32-NEXT:    .cfi_offset 31, -4
343; GP32-NEXT:    addu $gp, $2, $25
344; GP32-NEXT:    lw $1, 60($sp)
345; GP32-NEXT:    lw $2, 64($sp)
346; GP32-NEXT:    lw $3, 68($sp)
347; GP32-NEXT:    sw $3, 28($sp)
348; GP32-NEXT:    sw $2, 24($sp)
349; GP32-NEXT:    sw $1, 20($sp)
350; GP32-NEXT:    lw $1, 56($sp)
351; GP32-NEXT:    sw $1, 16($sp)
352; GP32-NEXT:    lw $25, %call16(__divti3)($gp)
353; GP32-NEXT:    jalr $25
354; GP32-NEXT:    nop
355; GP32-NEXT:    lw $ra, 36($sp) # 4-byte Folded Reload
356; GP32-NEXT:    jr $ra
357; GP32-NEXT:    addiu $sp, $sp, 40
358;
359; GP32R6-LABEL: sdiv_i128:
360; GP32R6:       # %bb.0: # %entry
361; GP32R6-NEXT:    lui $2, %hi(_gp_disp)
362; GP32R6-NEXT:    addiu $2, $2, %lo(_gp_disp)
363; GP32R6-NEXT:    addiu $sp, $sp, -40
364; GP32R6-NEXT:    .cfi_def_cfa_offset 40
365; GP32R6-NEXT:    sw $ra, 36($sp) # 4-byte Folded Spill
366; GP32R6-NEXT:    .cfi_offset 31, -4
367; GP32R6-NEXT:    addu $gp, $2, $25
368; GP32R6-NEXT:    lw $1, 60($sp)
369; GP32R6-NEXT:    lw $2, 64($sp)
370; GP32R6-NEXT:    lw $3, 68($sp)
371; GP32R6-NEXT:    sw $3, 28($sp)
372; GP32R6-NEXT:    sw $2, 24($sp)
373; GP32R6-NEXT:    sw $1, 20($sp)
374; GP32R6-NEXT:    lw $1, 56($sp)
375; GP32R6-NEXT:    sw $1, 16($sp)
376; GP32R6-NEXT:    lw $25, %call16(__divti3)($gp)
377; GP32R6-NEXT:    jalrc $25
378; GP32R6-NEXT:    lw $ra, 36($sp) # 4-byte Folded Reload
379; GP32R6-NEXT:    jr $ra
380; GP32R6-NEXT:    addiu $sp, $sp, 40
381;
382; GP64-LABEL: sdiv_i128:
383; GP64:       # %bb.0: # %entry
384; GP64-NEXT:    daddiu $sp, $sp, -16
385; GP64-NEXT:    .cfi_def_cfa_offset 16
386; GP64-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
387; GP64-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
388; GP64-NEXT:    .cfi_offset 31, -8
389; GP64-NEXT:    .cfi_offset 28, -16
390; GP64-NEXT:    lui $1, %hi(%neg(%gp_rel(sdiv_i128)))
391; GP64-NEXT:    daddu $1, $1, $25
392; GP64-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(sdiv_i128)))
393; GP64-NEXT:    ld $25, %call16(__divti3)($gp)
394; GP64-NEXT:    jalr $25
395; GP64-NEXT:    nop
396; GP64-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
397; GP64-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
398; GP64-NEXT:    jr $ra
399; GP64-NEXT:    daddiu $sp, $sp, 16
400;
401; GP64R6-LABEL: sdiv_i128:
402; GP64R6:       # %bb.0: # %entry
403; GP64R6-NEXT:    daddiu $sp, $sp, -16
404; GP64R6-NEXT:    .cfi_def_cfa_offset 16
405; GP64R6-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
406; GP64R6-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
407; GP64R6-NEXT:    .cfi_offset 31, -8
408; GP64R6-NEXT:    .cfi_offset 28, -16
409; GP64R6-NEXT:    lui $1, %hi(%neg(%gp_rel(sdiv_i128)))
410; GP64R6-NEXT:    daddu $1, $1, $25
411; GP64R6-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(sdiv_i128)))
412; GP64R6-NEXT:    ld $25, %call16(__divti3)($gp)
413; GP64R6-NEXT:    jalrc $25
414; GP64R6-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
415; GP64R6-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
416; GP64R6-NEXT:    jr $ra
417; GP64R6-NEXT:    daddiu $sp, $sp, 16
418;
419; MMR3-LABEL: sdiv_i128:
420; MMR3:       # %bb.0: # %entry
421; MMR3-NEXT:    lui $2, %hi(_gp_disp)
422; MMR3-NEXT:    addiu $2, $2, %lo(_gp_disp)
423; MMR3-NEXT:    addiusp -48
424; MMR3-NEXT:    .cfi_def_cfa_offset 48
425; MMR3-NEXT:    sw $ra, 44($sp) # 4-byte Folded Spill
426; MMR3-NEXT:    swp $16, 36($sp)
427; MMR3-NEXT:    .cfi_offset 31, -4
428; MMR3-NEXT:    .cfi_offset 17, -8
429; MMR3-NEXT:    .cfi_offset 16, -12
430; MMR3-NEXT:    addu $16, $2, $25
431; MMR3-NEXT:    move $1, $7
432; MMR3-NEXT:    lw $7, 68($sp)
433; MMR3-NEXT:    lw $17, 72($sp)
434; MMR3-NEXT:    lw $3, 76($sp)
435; MMR3-NEXT:    move $2, $sp
436; MMR3-NEXT:    sw16 $3, 28($2)
437; MMR3-NEXT:    sw16 $17, 24($2)
438; MMR3-NEXT:    sw16 $7, 20($2)
439; MMR3-NEXT:    lw $3, 64($sp)
440; MMR3-NEXT:    sw16 $3, 16($2)
441; MMR3-NEXT:    lw $25, %call16(__divti3)($16)
442; MMR3-NEXT:    move $7, $1
443; MMR3-NEXT:    move $gp, $16
444; MMR3-NEXT:    jalr $25
445; MMR3-NEXT:    nop
446; MMR3-NEXT:    lwp $16, 36($sp)
447; MMR3-NEXT:    lw $ra, 44($sp) # 4-byte Folded Reload
448; MMR3-NEXT:    addiusp 48
449; MMR3-NEXT:    jrc $ra
450;
451; MMR6-LABEL: sdiv_i128:
452; MMR6:       # %bb.0: # %entry
453; MMR6-NEXT:    lui $2, %hi(_gp_disp)
454; MMR6-NEXT:    addiu $2, $2, %lo(_gp_disp)
455; MMR6-NEXT:    addiu $sp, $sp, -48
456; MMR6-NEXT:    .cfi_def_cfa_offset 48
457; MMR6-NEXT:    sw $ra, 44($sp) # 4-byte Folded Spill
458; MMR6-NEXT:    sw $17, 40($sp) # 4-byte Folded Spill
459; MMR6-NEXT:    sw $16, 36($sp) # 4-byte Folded Spill
460; MMR6-NEXT:    .cfi_offset 31, -4
461; MMR6-NEXT:    .cfi_offset 17, -8
462; MMR6-NEXT:    .cfi_offset 16, -12
463; MMR6-NEXT:    addu $16, $2, $25
464; MMR6-NEXT:    move $1, $7
465; MMR6-NEXT:    lw $7, 68($sp)
466; MMR6-NEXT:    lw $17, 72($sp)
467; MMR6-NEXT:    lw $3, 76($sp)
468; MMR6-NEXT:    move $2, $sp
469; MMR6-NEXT:    sw16 $3, 28($2)
470; MMR6-NEXT:    sw16 $17, 24($2)
471; MMR6-NEXT:    sw16 $7, 20($2)
472; MMR6-NEXT:    lw $3, 64($sp)
473; MMR6-NEXT:    sw16 $3, 16($2)
474; MMR6-NEXT:    lw $25, %call16(__divti3)($16)
475; MMR6-NEXT:    move $7, $1
476; MMR6-NEXT:    move $gp, $16
477; MMR6-NEXT:    jalr $25
478; MMR6-NEXT:    lw $16, 36($sp) # 4-byte Folded Reload
479; MMR6-NEXT:    lw $17, 40($sp) # 4-byte Folded Reload
480; MMR6-NEXT:    lw $ra, 44($sp) # 4-byte Folded Reload
481; MMR6-NEXT:    addiu $sp, $sp, 48
482; MMR6-NEXT:    jrc $ra
483entry:
484  %r = sdiv i128 %a, %b
485  ret i128 %r
486}
487