Home
last modified time | relevance | path

Searched refs:nsz (Results 1 – 25 of 94) sorted by relevance

1234

/external/jemalloc/test/integration/
Dmallocx.c134 size_t nsz, rsz; in TEST_BEGIN() local
136 nsz = nallocx(sz, 0); in TEST_BEGIN()
137 assert_zu_ne(nsz, 0, "Unexpected nallocx() error"); in TEST_BEGIN()
143 assert_zu_eq(nsz, rsz, "nallocx()/sallocx() size mismatch"); in TEST_BEGIN()
151 nsz = nallocx(sz, MALLOCX_ZERO); in TEST_BEGIN()
152 assert_zu_ne(nsz, 0, "Unexpected nallocx() error"); in TEST_BEGIN()
156 nsz); in TEST_BEGIN()
158 assert_zu_eq(nsz, rsz, "nallocx()/sallocx() rsize mismatch"); in TEST_BEGIN()
170 size_t nsz, rsz, sz, alignment, total; in TEST_BEGIN() local
185 nsz = nallocx(sz, MALLOCX_ALIGN(alignment) | in TEST_BEGIN()
[all …]
Dxallocx.c417 size_t sz, nsz; in test_zero() local
442 for (sz = szmin; sz < szmax; sz = nsz) { in test_zero()
443 nsz = nallocx(sz+1, flags); in test_zero()
444 assert_zu_eq(xallocx(p, sz+1, 0, flags), nsz, in test_zero()
448 assert_false(validate_fill(p, 0x00, sz, nsz-sz), in test_zero()
449 "Memory not filled: sz=%zu, nsz-sz=%zu", sz, nsz-sz); in test_zero()
450 memset((void *)((uintptr_t)p + sz), FILL_BYTE, nsz-sz); in test_zero()
451 assert_false(validate_fill(p, FILL_BYTE, 0, nsz), in test_zero()
452 "Memory not filled: nsz=%zu", nsz); in test_zero()
Dsdallocx.c15 size_t nsz, sz, alignment, total; in TEST_BEGIN() local
30 nsz = nallocx(sz, MALLOCX_ALIGN(alignment) | in TEST_BEGIN()
34 total += nsz; in TEST_BEGIN()
/external/jemalloc_new/test/integration/
Dmallocx.c121 size_t nsz, rsz; in TEST_BEGIN() local
123 nsz = nallocx(sz, 0); in TEST_BEGIN()
124 assert_zu_ne(nsz, 0, "Unexpected nallocx() error"); in TEST_BEGIN()
130 assert_zu_eq(nsz, rsz, "nallocx()/sallocx() size mismatch"); in TEST_BEGIN()
138 nsz = nallocx(sz, MALLOCX_ZERO); in TEST_BEGIN()
139 assert_zu_ne(nsz, 0, "Unexpected nallocx() error"); in TEST_BEGIN()
143 nsz); in TEST_BEGIN()
145 assert_zu_eq(nsz, rsz, "nallocx()/sallocx() rsize mismatch"); in TEST_BEGIN()
164 size_t nsz, rsz, alignment, total; in TEST_BEGIN() local
180 nsz = nallocx(sz, MALLOCX_ALIGN(alignment) | in TEST_BEGIN()
[all …]
Dxallocx.c317 size_t sz, nsz; in test_zero() local
344 for (sz = szmin; sz < szmax; sz = nsz) { in test_zero()
345 nsz = nallocx(sz+1, flags); in test_zero()
346 if (xallocx(p, sz+1, 0, flags) != nsz) { in test_zero()
352 assert_false(validate_fill(p, 0x00, sz, nsz-sz), in test_zero()
353 "Memory not filled: sz=%zu, nsz-sz=%zu", sz, nsz-sz); in test_zero()
354 memset((void *)((uintptr_t)p + sz), FILL_BYTE, nsz-sz); in test_zero()
355 assert_false(validate_fill(p, FILL_BYTE, 0, nsz), in test_zero()
356 "Memory not filled: nsz=%zu", nsz); in test_zero()
Dsdallocx.c13 size_t nsz, sz, alignment, total; in TEST_BEGIN() local
29 nsz = nallocx(sz, MALLOCX_ALIGN(alignment) | in TEST_BEGIN()
33 total += nsz; in TEST_BEGIN()
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/InstCombine/
Dfast-math.ll53 ; Check again with 'reassoc' and 'nsz' ('nsz' not technically required).
56 ; CHECK-NEXT: [[TMP1:%.*]] = fmul reassoc nsz double [[F1:%.*]], 6.000000e+00
59 %t1 = fmul reassoc nsz double 5.000000e+00, %f1
60 %t2 = fadd reassoc nsz double %f1, %t1
64 ; TODO: This doesn't require 'nsz'. It should fold to f1 * 6.0.
89 ; Check again with 'reassoc' and 'nsz' ('nsz' not technically required).
92 ; CHECK-NEXT: [[TMP1:%.*]] = fadd reassoc nsz float [[F1:%.*]], [[F2:%.*]]
93 ; CHECK-NEXT: [[TMP2:%.*]] = fsub reassoc nsz float 9.000000e+00, [[TMP1]]
98 %add = fadd reassoc nsz float %sub, %sub1
102 ; TODO: This doesn't require 'nsz'. It should fold to (9.0 - (f1 + f2)).
[all …]
D2006-10-26-VectorReassoc.ll27 ; Verify this folds with 'reassoc' and 'nsz' ('nsz' not technically required)
30 ; CHECK-NEXT: [[TMP1:%.*]] = fmul reassoc nsz <4 x float> [[V:%.*]], <float 1.000000e+00, float…
32 …%Y = fmul reassoc nsz <4 x float> %V, < float 1.000000e+00, float 2.000000e+00, float 3.000000e+00…
33 …%Z = fmul reassoc nsz <4 x float> %Y, < float 1.000000e+00, float 2.000000e+05, float -3.000000e+0…
38 ; TODO: This doesn't require 'nsz'. It should fold to V * { 1.0, 4.0e+05, -9.0, 16.0 }
73 ; Verify this folds with 'reassoc' and 'nsz' ('nsz' not technically required)
76 ; CHECK-NEXT: [[TMP1:%.*]] = fadd reassoc nsz <4 x float> [[V:%.*]], <float 2.000000e+00, float…
78 …%Y = fadd reassoc nsz <4 x float> %V, < float 1.000000e+00, float 2.000000e+00, float 3.000000e+00…
79 …%Z = fadd reassoc nsz <4 x float> %Y, < float 1.000000e+00, float 2.000000e+00, float -3.000000e+0…
84 ; TODO: This doesn't require 'nsz'. It should fold to V + { 2.0, 4.0, 0.0, 8.0 }
[all …]
Dfsub.ll17 ; Can't do anything with the test above because -0.0 - 0.0 = -0.0, but if we have nsz:
22 ; CHECK-NEXT: [[TMP1:%.*]] = fsub nsz float [[Y:%.*]], [[X:%.*]]
26 %t2 = fsub nsz float -0.0, %t1
40 ; CHECK-NEXT: [[T2:%.*]] = fsub nsz float -0.000000e+00, [[T1]]
45 %t2 = fsub nsz float -0.0, %t1
50 ; With nsz: Z - (X - Y) --> Z + (Y - X)
54 ; CHECK-NEXT: [[TMP1:%.*]] = fsub nsz float [[Y:%.*]], [[X:%.*]]
55 ; CHECK-NEXT: [[T2:%.*]] = fadd nsz float [[TMP1]], [[Z:%.*]]
59 %t2 = fsub nsz float %z, %t1
Dfabs.ll321 ; CHECK-NEXT: [[TMP1:%.*]] = call nnan nsz double @llvm.fabs.f64(double [[X:%.*]])
324 %ltzero = fcmp nnan nsz olt double %x, 0.0
334 ; CHECK-NEXT: [[TMP1:%.*]] = call nnan ninf nsz float @llvm.fabs.f32(float [[X:%.*]])
337 %ltzero = fcmp nnan nsz ninf olt float %x, -0.0
360 ; CHECK-NEXT: [[TMP1:%.*]] = call nnan nsz float @llvm.fabs.f32(float [[X:%.*]])
363 %lezero = fcmp nnan nsz ole float %x, -0.0
373 ; CHECK-NEXT: [[TMP1:%.*]] = call nnan nsz arcp <2 x float> @llvm.fabs.v2f32(<2 x float> [[X:%.*…
376 %gtzero = fcmp nnan nsz arcp ogt <2 x float> %x, zeroinitializer
399 ; CHECK-NEXT: [[TMP1:%.*]] = call reassoc nnan nsz <2 x double> @llvm.fabs.v2f64(<2 x double> [[…
402 %gezero = fcmp nnan nsz reassoc oge <2 x double> %x, zeroinitializer
[all …]
/external/llvm/test/Bitcode/
Dfcmp-fast.ll13 ; CHECK: %nsz = fcmp nsz uge float %a, %b
14 %nsz = fcmp nsz uge float %a, %b
15 ; CHECK: %nnan = fcmp nnan nsz oge double %c, %d
16 %nnan = fcmp nnan nsz oge double %c, %d
19 %dce2 = or i1 %dce1, %nsz
/external/swiftshader/third_party/llvm-7.0/llvm/test/Bitcode/
Dfcmp-fast.ll13 ; CHECK: %nsz = fcmp nsz uge float %a, %b
14 %nsz = fcmp nsz uge float %a, %b
15 ; CHECK: %nnan = fcmp nnan nsz oge double %c, %d
16 %nnan = fcmp nnan nsz oge double %c, %d
19 %dce2 = or i1 %dce1, %nsz
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/
Dsqrt-fastmath-mir.ll34 ; CHECK: %3:fr32 = nnan ninf nsz arcp contract afn reassoc VMULSSrr %0, %1
36 ; CHECK: %5:fr32 = nnan ninf nsz arcp contract afn reassoc VFMADD213SSr %1, killed %3, %4
38 ; CHECK: %7:fr32 = nnan ninf nsz arcp contract afn reassoc VMULSSrr %1, %6
39 ; CHECK: %8:fr32 = nnan ninf nsz arcp contract afn reassoc VMULSSrr killed %7, killed %5
40 ; CHECK: %9:fr32 = nnan ninf nsz arcp contract afn reassoc VMULSSrr %0, %8
41 ; CHECK: %10:fr32 = nnan ninf nsz arcp contract afn reassoc VFMADD213SSr %8, killed %9, %4
42 ; CHECK: %11:fr32 = nnan ninf nsz arcp contract afn reassoc VMULSSrr %8, %6
43 ; CHECK: %12:fr32 = nnan ninf nsz arcp contract afn reassoc VMULSSrr killed %11, killed %10
Dfp-fold.ll32 %neg = fsub nsz float 0.0, %x
43 %r = fadd reassoc nsz float %sum, 12.0
51 %r = fadd nsz float %x, -0.0
59 %r = fadd nsz float %x, 0.0
86 %neg = fsub nsz float 0.0, %x
87 %r = fadd nsz float %neg, %y
109 %r = fsub nsz float %x, 0.0
118 %r = fsub nsz float 0.0, %x
126 %r = fsub nsz float %x, -0.0
135 %r = fmul nnan nsz float %x, 0.0
Dnegative-sin.ll40 define double @nsz(double %e) nounwind {
41 ; CHECK-LABEL: nsz:
44 %f = fsub nsz double 0.0, %e
46 %h = fsub nsz double 0.0, %g
64 %h = fsub nsz double 0.0, %g
79 %f = fsub nsz double 0.0, %e
Dfmf-propagation.ll8 ; CHECK: t5: f32 = fadd nsz t2, t4
15 ; CHECK-NEXT: t12: f32 = fadd nnan ninf nsz arcp contract afn reassoc t11, t4
20 %f1 = fadd nsz float %x, %y
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/InstSimplify/
Dfast-math.ll9 %b = fmul nsz nnan float %a, 0.0
25 %b = fmul nsz nnan <2 x float> %a, <float 0.0, float undef>
32 ; CHECK-NEXT: [[B:%.*]] = fmul nsz float [[A:%.*]], 0.000000e+00
35 %b = fmul nsz float %a, 0.0
164 %ret = fadd nsz float %no_zero, %zero1
169 ; fsub nsz 0.0, (fsub 0.0, X) ==> X
175 %ret = fsub nsz float 0.0, %t1
184 %ret = fsub nsz <2 x float> zeroinitializer, %t1
193 %ret = fsub nsz <2 x float> <float undef, float -0.0>, %t1
197 ; fadd nsz X, 0 ==> X
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/Reassociate/
Dfast-basictest.ll16 ; Both 'reassoc' and 'nsz' are required.
19 ; CHECK-NEXT: [[ARG_NEG:%.*]] = fsub reassoc nsz float -0.000000e+00, [[ARG:%.*]]
22 %t1 = fsub reassoc nsz float -1.200000e+01, %arg
23 %t2 = fadd reassoc nsz float %t1, 1.200000e+01
27 ; Verify the fold is not done with only 'reassoc' ('nsz' is required).
226 ; Check again with 'reassoc' and 'nsz' ('nsz' not technically required).
229 ; CHECK-NEXT: [[FACTOR:%.*]] = fmul reassoc nsz float [[X:%.*]], 9.400000e+01
232 %Y = fmul reassoc nsz float %X, 4.700000e+01
233 %Z = fadd reassoc nsz float %Y, %Y
237 ; TODO: This doesn't require 'nsz'. It should fold to X * 94.0
[all …]
Dfast-MissedTree.ll15 ; Both 'reassoc' and 'nsz' are required.
18 ; CHECK-NEXT: [[Z:%.*]] = fadd reassoc nsz float %A, %B
21 %W = fadd reassoc nsz float %B, -5.0
22 %Y = fadd reassoc nsz float %A, 5.0
23 %Z = fadd reassoc nsz float %W, %Y
27 ; Verify the fold is not done with only 'reassoc' ('nsz' is required).
Dfast-SubReassociate.ll32 ; Both 'reassoc' and 'nsz' are required.
35 ; CHECK-NEXT: [[Z:%.*]] = fsub reassoc nsz float %A, %B
38 %W = fadd reassoc nsz float %B, 5.000000e+00
39 %X = fadd reassoc nsz float %A, -7.000000e+00
40 %Y = fsub reassoc nsz float %X, %W
41 %Z = fadd reassoc nsz float %Y, 1.200000e+01
45 ; Verify the fold is not done with only 'reassoc' ('nsz' is required).
/external/llvm/test/Transforms/InstSimplify/
Dfast-math.ll9 %b = fmul nsz nnan float %a, 0.0
24 ; CHECK: [[B:%.*]] = fmul nsz float %a, 0.000000e+00
27 %b = fmul nsz float %a, 0.0
81 %ret = fadd nsz float %no_zero, %zero7
103 %ret = fadd nsz float %no_zero, %zero1
108 ; fsub nsz 0.0, (fsub 0.0, X) ==> X
114 %ret = fsub nsz float 0.0, %t1
118 ; fadd nsz X, 0 ==> X
133 ; fdiv nsz nnan 0, X ==> 0
139 %r = fdiv nnan nsz double 0.0, %X
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/MIR/X86/
Dfastmath.mir17 ; CHECK: %3:fr32 = nsz VMULSSrr %2, %2
18 %3:fr32 = nsz VMULSSrr %2, %2
27 ; CHECK: %8:fr32 = nsz arcp contract afn reassoc VMULSSrr %7, %7
28 %8:fr32 = nsz arcp contract afn reassoc VMULSSrr %7, %7
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/ARM/
Dfcmp-xo.ll5 %1 = fcmp nsz olt float undef, 0.000000e+00
16 %2 = fcmp nsz olt float undef, 1.000000e+00
29 %1 = fcmp nsz olt float undef, 128.000000e+00
42 %1 = fcmp nsz olt double undef, 1.000000e+00
51 %1 = fcmp nsz olt double undef, 128.000000e+00
/external/llvm/test/Assembler/
Dfast-math-flags.ll126 %b = fsub nnan nsz fast float %x, %y
130 %c = fmul nsz fast arcp float %x, %y
131 ; CHECK: %c_vec = fmul nsz <3 x float> %vec, %vec
132 %c_vec = fmul nsz <3 x float> %vec, %vec
137 ; CHECK: %e = frem nnan nsz float %x, %y
138 %e = frem nnan nsz float %x, %y
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/PowerPC/
Dnon-simple-args-intrin.ll33 %16 = call nsz <16 x double> @llvm.sqrt.v16f64(<16 x double> %15)
34 %17 = fmul nsz <16 x double> %16, undef
35 %18 = fadd nsz <16 x double> %17, undef

1234