/external/libcxxabi/test/ |
D | test_fallback_malloc.pass.cpp | 22 container ptrs; in alloc_series() local 26 ptrs.push_back ( p ); in alloc_series() 27 return ptrs; in alloc_series() 31 container ptrs; in alloc_series() local 35 ptrs.push_back ( p ); in alloc_series() 39 return ptrs; in alloc_series() 43 container ptrs; in alloc_series() local 50 ptrs.push_back ( p ); in alloc_series() 53 return ptrs; in alloc_series() 70 container ptrs; in exhaustion_test1() local [all …]
|
/external/jemalloc_new/test/unit/ |
D | arena_reset.c | 98 do_arena_reset_pre(unsigned arena_ind, void ***ptrs, unsigned *nptrs) { in do_arena_reset_pre() argument 110 *ptrs = (void **)malloc(*nptrs * sizeof(void *)); in do_arena_reset_pre() 111 assert_ptr_not_null(*ptrs, "Unexpected malloc() failure"); in do_arena_reset_pre() 116 (*ptrs)[i] = mallocx(sz, flags); in do_arena_reset_pre() 117 assert_ptr_not_null((*ptrs)[i], in do_arena_reset_pre() 122 (*ptrs)[nsmall + i] = mallocx(sz, flags); in do_arena_reset_pre() 123 assert_ptr_not_null((*ptrs)[i], in do_arena_reset_pre() 131 assert_zu_gt(ivsalloc(tsdn, (*ptrs)[i]), 0, in do_arena_reset_pre() 137 do_arena_reset_post(void **ptrs, unsigned nptrs, unsigned arena_ind) { in do_arena_reset_post() argument 149 assert_zu_eq(vsalloc(tsdn, ptrs[i]), 0, in do_arena_reset_post() [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Verifier/ |
D | scatter_gather.ll | 5 define <16 x float> @gather2(<16 x float*> %ptrs, <16 x i1>* %mask, <16 x float> %passthru) { 6 …%res = call <16 x float> @llvm.masked.gather.v16f32.v16p0f32(<16 x float*> %ptrs, i32 4, <16 x i1>… 13 define <8 x float> @gather3(<8 x float*> %ptrs, <16 x i1> %mask, <8 x float> %passthru) { 14 …%res = call <8 x float> @llvm.masked.gather.v8f32.v8p0f32(<8 x float*> %ptrs, i32 4, <16 x i1> %ma… 21 define <8 x float>* @gather4(<8 x float*> %ptrs, <8 x i1> %mask, <8 x float> %passthru) { 22 …%res = call <8 x float>* @llvm.masked.gather.p0v8f32.v8p0f32(<8 x float*> %ptrs, i32 4, <8 x i1> %… 29 define <8 x float> @gather5(<8 x float*>* %ptrs, <8 x i1> %mask, <8 x float> %passthru) { 30 …%res = call <8 x float> @llvm.masked.gather.v8f32.p0v8p0f32(<8 x float*>* %ptrs, i32 4, <8 x i1> %… 37 define <8 x float> @gather6(<8 x float> %ptrs, <8 x i1> %mask, <8 x float> %passthru) { 38 …%res = call <8 x float> @llvm.masked.gather.v8f32.v8f32(<8 x float> %ptrs, i32 4, <8 x i1> %mask, … [all …]
|
/external/jemalloc/test/unit/ |
D | arena_reset.c | 86 void **ptrs; in TEST_BEGIN() local 104 ptrs = (void **)malloc(nptrs * sizeof(void *)); in TEST_BEGIN() 105 assert_ptr_not_null(ptrs, "Unexpected malloc() failure"); in TEST_BEGIN() 110 ptrs[i] = mallocx(sz, flags); in TEST_BEGIN() 111 assert_ptr_not_null(ptrs[i], in TEST_BEGIN() 116 ptrs[nsmall + i] = mallocx(sz, flags); in TEST_BEGIN() 117 assert_ptr_not_null(ptrs[i], in TEST_BEGIN() 122 ptrs[nsmall + nlarge + i] = mallocx(sz, flags); in TEST_BEGIN() 123 assert_ptr_not_null(ptrs[i], in TEST_BEGIN() 131 assert_zu_gt(ivsalloc(tsdn, ptrs[i], false), 0, in TEST_BEGIN() [all …]
|
/external/tensorflow/tensorflow/core/common_runtime/gpu/ |
D | gpu_bfc_allocator_test.cc | 61 std::vector<void*> ptrs; in TEST() local 64 ptrs.push_back(raw); in TEST() 68 std::sort(ptrs.begin(), ptrs.end()); in TEST() 71 for (size_t i = 1; i < ptrs.size(); i++) { in TEST() 72 ASSERT_NE(ptrs[i], ptrs[i - 1]); // No dups in TEST() 73 size_t req_size = a.RequestedSize(ptrs[i - 1]); in TEST() 75 ASSERT_GE(static_cast<char*>(ptrs[i]) - static_cast<char*>(ptrs[i - 1]), in TEST() 79 for (size_t i = 0; i < ptrs.size(); i++) { in TEST() 80 a.DeallocateRaw(ptrs[i]); in TEST() 389 std::vector<void*> ptrs; in BM_AllocationDelayed() local [all …]
|
/external/ltp/testcases/kernel/device-drivers/misc_modules/per_cpu_allocator_module/ |
D | test_per_cpu_allocator_module.c | 86 static void *ptrs[NR_CMDS]; variable 137 if (!ptrs[cmdno]) in free_cmd() 140 verify_area(ptrs[cmdno], cmds[cmdno].size, cmdno); in free_cmd() 141 free_percpu(ptrs[cmdno]); in free_cmd() 142 ptrs[cmdno] = NULL; in free_cmd() 155 ptrs[i] = __alloc_percpu(cmd->size, in run_test() 158 if (ptrs[i]) in run_test() 159 fill_area(ptrs[i], cmd->size, i); in run_test()
|
/external/jemalloc_new/msvc/test_threads/ |
D | test_threads.cpp | 43 uint8_t* ptrs[numAllocsMax]; in test_threads() local 53 ptrs[j] = (uint8_t*)je_malloc(sz); in test_threads() 54 if (!ptrs[j]) { in test_threads() 59 ptrs[j][k] = tid + k; in test_threads() 63 if (ptrs[j][k] != (uint8_t)(tid + k)) { in test_threads() 64 …error in thread %d, iter %d, alloc %d @ %d : %02X!=%02X\n", tid, i, j, k, ptrs[j][k], (uint8_t)(ti… in test_threads() 67 je_free(ptrs[j]); in test_threads()
|
/external/jemalloc/msvc/projects/vc2015/test_threads/ |
D | test_threads.cpp | 44 uint8_t* ptrs[numAllocsMax]; in test_threads() local 54 ptrs[j] = (uint8_t*)je_malloc(sz); in test_threads() 55 if (!ptrs[j]) { in test_threads() 60 ptrs[j][k] = tid + k; in test_threads() 64 if (ptrs[j][k] != (uint8_t)(tid + k)) { in test_threads() 65 …error in thread %d, iter %d, alloc %d @ %d : %02X!=%02X\n", tid, i, j, k, ptrs[j][k], (uint8_t)(ti… in test_threads() 68 je_free(ptrs[j]); in test_threads()
|
/external/ltp/testcases/kernel/mem/mtest07/ |
D | mallocstress.c | 127 long *ptrs[MAXPTRS]; in allocate_free() local 140 ptrs[num_alloc] = malloc(size); in allocate_free() 142 if (!ptrs[num_alloc]) in allocate_free() 144 ptrs[num_alloc][0] = num_alloc; in allocate_free() 173 if (ptrs[i][0] != i) { in allocate_free() 179 free(ptrs[i]); in allocate_free()
|
/external/tensorflow/tensorflow/core/framework/ |
D | allocator_test.cc | 91 std::vector<void*> ptrs; in TEST() local 94 ptrs.push_back(raw); in TEST() 96 std::sort(ptrs.begin(), ptrs.end()); in TEST() 98 for (size_t i = 0; i < ptrs.size(); i++) { in TEST() 100 CHECK_NE(ptrs[i], ptrs[i - 1]); // No dups in TEST() 102 a->DeallocateRaw(ptrs[i]); in TEST()
|
/external/grpc-grpc/src/core/lib/gpr/ |
D | arena.cc | 42 void** ptrs; member 55 gpr_free(arena->ptrs[i]); in gpr_arena_destroy() 57 gpr_free(arena->ptrs); in gpr_arena_destroy() 64 arena->ptrs = in gpr_arena_alloc() 65 (void**)gpr_realloc(arena->ptrs, sizeof(void*) * (arena->num_ptrs + 1)); in gpr_arena_alloc() 66 void* retval = arena->ptrs[arena->num_ptrs++] = gpr_zalloc(size); in gpr_arena_alloc()
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/InstCombine/ |
D | masked_intrinsics.ll | 3 declare <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* %ptrs, i32, <2 x i1> %mask, <2 x… 4 declare void @llvm.masked.store.v2f64.p0v2f64(<2 x double> %val, <2 x double>* %ptrs, i32, <2 x i1>… 5 declare <2 x double> @llvm.masked.gather.v2f64.v2p0f64(<2 x double*> %ptrs, i32, <2 x i1> %mask, <2… 6 declare void @llvm.masked.scatter.v2f64.v2p0f64(<2 x double> %val, <2 x double*> %ptrs, i32, <2 x i… 51 define <2 x double> @gather_zeromask(<2 x double*> %ptrs, <2 x double> %passthru) { 52 …%res = call <2 x double> @llvm.masked.gather.v2f64.v2p0f64(<2 x double*> %ptrs, i32 5, <2 x i1> ze… 59 define void @scatter_zeromask(<2 x double*> %ptrs, <2 x double> %val) { 60 …call void @llvm.masked.scatter.v2f64.v2p0f64(<2 x double> %val, <2 x double*> %ptrs, i32 6, <2 x i…
|
/external/llvm/test/Transforms/InstCombine/ |
D | masked_intrinsics.ll | 3 declare <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* %ptrs, i32, <2 x i1> %mask, <2 x… 4 declare void @llvm.masked.store.v2f64.p0v2f64(<2 x double> %val, <2 x double>* %ptrs, i32, <2 x i1>… 5 declare <2 x double> @llvm.masked.gather.v2f64(<2 x double*> %ptrs, i32, <2 x i1> %mask, <2 x doubl… 6 declare void @llvm.masked.scatter.v2f64(<2 x double> %val, <2 x double*> %ptrs, i32, <2 x i1> %mask) 51 define <2 x double> @gather_zeromask(<2 x double*> %ptrs, <2 x double> %passthru) { 52 …%res = call <2 x double> @llvm.masked.gather.v2f64(<2 x double*> %ptrs, i32 5, <2 x i1> zeroinitia… 59 define void @scatter_zeromask(<2 x double*> %ptrs, <2 x double> %val) { 60 …call void @llvm.masked.scatter.v2f64(<2 x double> %val, <2 x double*> %ptrs, i32 6, <2 x i1> zeroi…
|
/external/u-boot/lib/libfdt/ |
D | fdt_region.c | 343 struct fdt_region_ptrs *p = &info->ptrs; in fdt_first_region() 446 if (info->ptrs.done < FDT_DONE_MEM_RSVMAP && in fdt_next_region() 454 info->ptrs.done = FDT_DONE_MEM_RSVMAP; in fdt_next_region() 464 while (info->ptrs.done < FDT_DONE_STRUCT) { in fdt_next_region() 481 p = info->ptrs; in fdt_next_region() 626 info->ptrs = p; in fdt_next_region() 630 if (info->ptrs.done < FDT_DONE_END) { in fdt_next_region() 631 if (info->ptrs.nextoffset != fdt_size_dt_struct(fdt)) in fdt_next_region() 635 info->ptrs.nextoffset - info->start)) in fdt_next_region() 637 info->ptrs.done++; in fdt_next_region() [all …]
|
/external/jemalloc/test/integration/ |
D | mallocx.c | 91 void *ptrs[3]; in TEST_BEGIN() local 100 for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) { in TEST_BEGIN() 101 ptrs[i] = mallocx(hugemax, 0); in TEST_BEGIN() 102 if (ptrs[i] == NULL) in TEST_BEGIN() 108 for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) { in TEST_BEGIN() 109 if (ptrs[i] != NULL) in TEST_BEGIN() 110 dallocx(ptrs[i], 0); in TEST_BEGIN()
|
/external/jemalloc_new/test/integration/ |
D | mallocx.c | 77 void *ptrs[3]; in TEST_BEGIN() local 86 for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) { in TEST_BEGIN() 87 ptrs[i] = mallocx(largemax, 0); in TEST_BEGIN() 88 if (ptrs[i] == NULL) { in TEST_BEGIN() 95 for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) { in TEST_BEGIN() 96 if (ptrs[i] != NULL) { in TEST_BEGIN() 97 dallocx(ptrs[i], 0); in TEST_BEGIN()
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/ |
D | avx512-bugfix-26264.ll | 4 define <32 x double> @test_load_32f64(<32 x double>* %ptrs, <32 x i1> %mask, <32 x double> %src0) { 17 …%res = call <32 x double> @llvm.masked.load.v32f64.p0v32f64(<32 x double>* %ptrs, i32 4, <32 x i1>… 21 define <32 x i64> @test_load_32i64(<32 x i64>* %ptrs, <32 x i1> %mask, <32 x i64> %src0) { 34 …%res = call <32 x i64> @llvm.masked.load.v32i64.p0v32i64(<32 x i64>* %ptrs, i32 4, <32 x i1> %mask… 38 declare <32 x i64> @llvm.masked.load.v32i64.p0v32i64(<32 x i64>* %ptrs, i32, <32 x i1> %mask, <32 x… 39 declare <32 x double> @llvm.masked.load.v32f64.p0v32f64(<32 x double>* %ptrs, i32, <32 x i1> %mask,…
|
D | post-ra-sched.ll | 12 @ptrs = external global [0 x i32*], align 4 22 ; CHECK-NEXT: movl ptrs(,%ecx,4), %ecx 23 ; CHECK-NEXT: movl ptrs(,%eax,4), %eax 30 %arrayidx = getelementptr inbounds [0 x i32*], [0 x i32*]* @ptrs, i32 0, i32 %0 34 %arrayidx1 = getelementptr inbounds [0 x i32*], [0 x i32*]* @ptrs, i32 0, i32 %3
|
/external/strace/tests-m32/ |
D | pkey_mprotect.c | 56 static const kernel_ulong_t ptrs[] = { in main() local 93 for (i = 0; i < ARRAY_SIZE(ptrs); i++) { in main() 98 ptrs[i], sizes[j], in main() 102 sprintptr(ptrs[i]), in main()
|
/external/strace/tests/ |
D | pkey_mprotect.c | 56 static const kernel_ulong_t ptrs[] = { in main() local 93 for (i = 0; i < ARRAY_SIZE(ptrs); i++) { in main() 98 ptrs[i], sizes[j], in main() 102 sprintptr(ptrs[i]), in main()
|
/external/strace/tests-mx32/ |
D | pkey_mprotect.c | 56 static const kernel_ulong_t ptrs[] = { in main() local 93 for (i = 0; i < ARRAY_SIZE(ptrs); i++) { in main() 98 ptrs[i], sizes[j], in main() 102 sprintptr(ptrs[i]), in main()
|
/external/llvm/test/CodeGen/X86/ |
D | avx512-bugfix-26264.ll | 4 define <32 x double> @test_load_32f64(<32 x double>* %ptrs, <32 x i1> %mask, <32 x double> %src0) { 21 …%res = call <32 x double> @llvm.masked.load.v32f64.p0v32f64(<32 x double>* %ptrs, i32 4, <32 x i1>… 25 define <32 x i64> @test_load_32i64(<32 x i64>* %ptrs, <32 x i1> %mask, <32 x i64> %src0) { 42 …%res = call <32 x i64> @llvm.masked.load.v32i64.p0v32i64(<32 x i64>* %ptrs, i32 4, <32 x i1> %mask… 46 declare <32 x i64> @llvm.masked.load.v32i64.p0v32i64(<32 x i64>* %ptrs, i32, <32 x i1> %mask, <32 x… 47 declare <32 x double> @llvm.masked.load.v32f64.p0v32f64(<32 x double>* %ptrs, i32, <32 x i1> %mask,…
|
D | post-ra-sched.ll | 12 @ptrs = external global [0 x i32*], align 4 22 ; CHECK-NEXT: movl ptrs(,%ecx,4), %ecx 23 ; CHECK-NEXT: movl ptrs(,%eax,4), %eax 30 %arrayidx = getelementptr inbounds [0 x i32*], [0 x i32*]* @ptrs, i32 0, i32 %0 34 %arrayidx1 = getelementptr inbounds [0 x i32*], [0 x i32*]* @ptrs, i32 0, i32 %3
|
/external/llvm/test/Transforms/FunctionAttrs/ |
D | readattrs.ll | 76 define void @test9(<4 x i32*> %ptrs, <4 x i32>%val) { 77 …call void @llvm.masked.scatter.v4i32(<4 x i32>%val, <4 x i32*> %ptrs, i32 4, <4 x i1><i1 true, i1 … 85 define <4 x i32> @test10(<4 x i32*> %ptrs) { 86 …%res = call <4 x i32> @llvm.masked.gather.v4i32(<4 x i32*> %ptrs, i32 4, <4 x i1><i1 true, i1 fals… 95 define <4 x i32> @test11_2(<4 x i32*> %ptrs) { 96 %res = call <4 x i32> @test11_1(<4 x i32*> %ptrs) 103 define <4 x i32> @test12_2(<4 x i32*> %ptrs) { 104 %res = call <4 x i32> @test12_1(<4 x i32*> %ptrs)
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/FunctionAttrs/ |
D | readattrs.ll | 77 define void @test9(<4 x i32*> %ptrs, <4 x i32>%val) { 78 …call void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32>%val, <4 x i32*> %ptrs, i32 4, <4 x i1><i1 t… 86 define <4 x i32> @test10(<4 x i32*> %ptrs) { 87 …%res = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %ptrs, i32 4, <4 x i1><i1 true,… 96 define <4 x i32> @test11_2(<4 x i32*> %ptrs) { 97 %res = call <4 x i32> @test11_1(<4 x i32*> %ptrs) 104 define <4 x i32> @test12_2(<4 x i32*> %ptrs) { 105 %res = call <4 x i32> @test12_1(<4 x i32*> %ptrs)
|