• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: opt -S -passes=openmpopt -aa-pipeline=basic-aa -openmp-hide-memory-transfer-latency -debug-only=openmp-opt < %s 2>&1 | FileCheck %s
2; REQUIRES: asserts
3
4target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
5
6@.__omp_offloading_heavyComputation.region_id = weak constant i8 0
7@.offload_maptypes. = private unnamed_addr constant [2 x i64] [i64 35, i64 35]
8
9%struct.ident_t = type { i32, i32, i32, i32, i8* }
10
11@.str = private unnamed_addr constant [23 x i8] c";unknown;unknown;0;0;;\00", align 1
12@0 = private unnamed_addr global %struct.ident_t { i32 0, i32 2, i32 0, i32 0, i8* getelementptr inbounds ([23 x i8], [23 x i8]* @.str, i32 0, i32 0) }, align 8
13
14; CHECK-LABEL: {{[^@]+}}Successfully got offload values:
15; CHECK-NEXT: offload_baseptrs: double* %a ---   %size.addr = alloca i32, align 4 ---
16; CHECK-NEXT: offload_ptrs: double* %a ---   %size.addr = alloca i32, align 4 ---
17; CHECK-NEXT: offload_sizes:   %0 = shl nuw nsw i64 %conv, 3 --- i64 4 ---
18
19;int heavyComputation(double* a, unsigned size) {
20;  int random = rand() % 7;
21;
22;  //#pragma omp target data map(a[0:size], size)
23;  void* args[2];
24;  args[0] = &a;
25;  args[1] = &size;
26;  __tgt_target_data_begin(..., args, ...)
27;
28;  #pragma omp target teams
29;  for (int i = 0; i < size; ++i) {
30;    a[i] = ++a[i] * 3.141624;
31;  }
32;
33;  return random;
34;}
35define dso_local i32 @heavyComputation(double* %a, i32 %size) {
36entry:
37  %size.addr = alloca i32, align 4
38  %.offload_baseptrs = alloca [2 x i8*], align 8
39  %.offload_ptrs = alloca [2 x i8*], align 8
40  %.offload_sizes = alloca [2 x i64], align 8
41
42  store i32 %size, i32* %size.addr, align 4
43  %call = tail call i32 (...) @rand()
44
45  %conv = zext i32 %size to i64
46  %0 = shl nuw nsw i64 %conv, 3
47  %1 = getelementptr inbounds [2 x i8*], [2 x i8*]* %.offload_baseptrs, i64 0, i64 0
48  %2 = bitcast [2 x i8*]* %.offload_baseptrs to double**
49  store double* %a, double** %2, align 8
50  %3 = getelementptr inbounds [2 x i8*], [2 x i8*]* %.offload_ptrs, i64 0, i64 0
51  %4 = bitcast [2 x i8*]* %.offload_ptrs to double**
52  store double* %a, double** %4, align 8
53  %5 = getelementptr inbounds [2 x i64], [2 x i64]* %.offload_sizes, i64 0, i64 0
54  store i64 %0, i64* %5, align 8
55  %6 = getelementptr inbounds [2 x i8*], [2 x i8*]* %.offload_baseptrs, i64 0, i64 1
56  %7 = bitcast i8** %6 to i32**
57  store i32* %size.addr, i32** %7, align 8
58  %8 = getelementptr inbounds [2 x i8*], [2 x i8*]* %.offload_ptrs, i64 0, i64 1
59  %9 = bitcast i8** %8 to i32**
60  store i32* %size.addr, i32** %9, align 8
61  %10 = getelementptr inbounds [2 x i64], [2 x i64]* %.offload_sizes, i64 0, i64 1
62  store i64 4, i64* %10, align 8
63  call void @__tgt_target_data_begin_mapper(%struct.ident_t* @0, i64 -1, i32 2, i8** nonnull %1, i8** nonnull %3, i64* nonnull %5, i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes., i64 0, i64 0), i8** null, i8** null)
64  %rem = srem i32 %call, 7
65  call void @__tgt_target_data_end_mapper(%struct.ident_t* @0, i64 -1, i32 2, i8** nonnull %1, i8** nonnull %3, i64* nonnull %5, i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes., i64 0, i64 0), i8** null, i8** null)
66  ret i32 %rem
67}
68
69declare void @__tgt_target_data_begin_mapper(%struct.ident_t*, i64, i32, i8**, i8**, i64*, i64*, i8**, i8**)
70declare void @__tgt_target_data_end_mapper(%struct.ident_t*, i64, i32, i8**, i8**, i64*, i64*, i8**, i8**)
71
72declare dso_local i32 @rand(...)
73