• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -simplifycfg -S | FileCheck %s
3; RUN: opt < %s -simplifycfg -phi-node-folding-threshold=2 -S | FileCheck %s
4; RUN: opt < %s -simplifycfg -phi-node-folding-threshold=3 -S | FileCheck %s
5
6; This is checking that the multiplication does overflow, with a leftover
7; guard against division-by-zero that was needed before InstCombine
8; produced llvm.umul.with.overflow.
9
10define i1 @will_overflow(i64 %size, i64 %nmemb) {
11; CHECK-LABEL: @will_overflow(
12; CHECK-NEXT:  entry:
13; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i64 [[SIZE:%.*]], 0
14; CHECK-NEXT:    [[UMUL:%.*]] = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 [[SIZE]], i64 [[NMEMB:%.*]])
15; CHECK-NEXT:    [[UMUL_OV:%.*]] = extractvalue { i64, i1 } [[UMUL]], 1
16; CHECK-NEXT:    [[UMUL_NOT_OV:%.*]] = xor i1 [[UMUL_OV]], true
17; CHECK-NEXT:    [[TMP0:%.*]] = select i1 [[CMP]], i1 true, i1 [[UMUL_NOT_OV]]
18; CHECK-NEXT:    ret i1 [[TMP0]]
19;
20entry:
21  %cmp = icmp eq i64 %size, 0
22  br i1 %cmp, label %land.end, label %land.rhs
23
24land.rhs:                                         ; preds = %entry
25  %umul = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 %size, i64 %nmemb)
26  %umul.ov = extractvalue { i64, i1 } %umul, 1
27  %umul.not.ov = xor i1 %umul.ov, true
28  br label %land.end
29
30land.end:                                         ; preds = %land.rhs, %entry
31  %0 = phi i1 [ true, %entry ], [ %umul.not.ov, %land.rhs ]
32  ret i1 %0
33}
34
35; Function Attrs: nounwind readnone speculatable
36declare { i64, i1 } @llvm.umul.with.overflow.i64(i64, i64) #0
37