/external/llvm-project/libcxxabi/test/ |
D | test_fallback_malloc.pass.cpp | 21 container ptrs; in alloc_series() local 25 ptrs.push_back ( p ); in alloc_series() 26 return ptrs; in alloc_series() 30 container ptrs; in alloc_series() local 34 ptrs.push_back ( p ); in alloc_series() 38 return ptrs; in alloc_series() 42 container ptrs; in alloc_series() local 49 ptrs.push_back ( p ); in alloc_series() 52 return ptrs; in alloc_series() 69 container ptrs; in exhaustion_test1() local [all …]
|
/external/libcxxabi/test/ |
D | test_fallback_malloc.pass.cpp | 22 container ptrs; in alloc_series() local 26 ptrs.push_back ( p ); in alloc_series() 27 return ptrs; in alloc_series() 31 container ptrs; in alloc_series() local 35 ptrs.push_back ( p ); in alloc_series() 39 return ptrs; in alloc_series() 43 container ptrs; in alloc_series() local 50 ptrs.push_back ( p ); in alloc_series() 53 return ptrs; in alloc_series() 70 container ptrs; in exhaustion_test1() local [all …]
|
/external/jemalloc_new/test/unit/ |
D | arena_reset.c | 98 do_arena_reset_pre(unsigned arena_ind, void ***ptrs, unsigned *nptrs) { in do_arena_reset_pre() argument 110 *ptrs = (void **)malloc(*nptrs * sizeof(void *)); in do_arena_reset_pre() 111 assert_ptr_not_null(*ptrs, "Unexpected malloc() failure"); in do_arena_reset_pre() 116 (*ptrs)[i] = mallocx(sz, flags); in do_arena_reset_pre() 117 assert_ptr_not_null((*ptrs)[i], in do_arena_reset_pre() 122 (*ptrs)[nsmall + i] = mallocx(sz, flags); in do_arena_reset_pre() 123 assert_ptr_not_null((*ptrs)[i], in do_arena_reset_pre() 131 assert_zu_gt(ivsalloc(tsdn, (*ptrs)[i]), 0, in do_arena_reset_pre() 137 do_arena_reset_post(void **ptrs, unsigned nptrs, unsigned arena_ind) { in do_arena_reset_post() argument 149 assert_zu_eq(vsalloc(tsdn, ptrs[i]), 0, in do_arena_reset_post() [all …]
|
/external/llvm-project/llvm/test/Verifier/ |
D | scatter_gather.ll | 5 define <16 x float> @gather2(<16 x float*> %ptrs, <16 x i1>* %mask, <16 x float> %passthru) { 6 …%res = call <16 x float> @llvm.masked.gather.v16f32.v16p0f32(<16 x float*> %ptrs, i32 4, <16 x i1>… 13 define <8 x float> @gather3(<8 x float*> %ptrs, <16 x i1> %mask, <8 x float> %passthru) { 14 …%res = call <8 x float> @llvm.masked.gather.v8f32.v8p0f32(<8 x float*> %ptrs, i32 4, <16 x i1> %ma… 21 define <8 x float>* @gather4(<8 x float*> %ptrs, <8 x i1> %mask, <8 x float> %passthru) { 22 …%res = call <8 x float>* @llvm.masked.gather.p0v8f32.v8p0f32(<8 x float*> %ptrs, i32 4, <8 x i1> %… 29 define <8 x float> @gather5(<8 x float*>* %ptrs, <8 x i1> %mask, <8 x float> %passthru) { 30 …%res = call <8 x float> @llvm.masked.gather.v8f32.p0v8p0f32(<8 x float*>* %ptrs, i32 4, <8 x i1> %… 37 define <8 x float> @gather6(<8 x float> %ptrs, <8 x i1> %mask, <8 x float> %passthru) { 38 …%res = call <8 x float> @llvm.masked.gather.v8f32.v8f32(<8 x float> %ptrs, i32 4, <8 x i1> %mask, … [all …]
|
/external/llvm-project/llvm/test/CodeGen/AArch64/ |
D | llvm-masked-scatter-legal-for-sve.ll | 12 define void @masked_scatter_nxv4i32(<vscale x 4 x i32> %data, <vscale x 4 x i32*> %ptrs, <vscale x … 13 …call void @llvm.masked.scatter.nxv4i32(<vscale x 4 x i32> %data, <vscale x 4 x i32*> %ptrs, i32 0,… 22 define void @masked_scatter_nxv2f64(<vscale x 2 x double> %data, <vscale x 2 x double*> %ptrs, <vsc… 23 …call void @llvm.masked.scatter.nxv2f64(<vscale x 2 x double> %data, <vscale x 2 x double*> %ptrs, … 32 define void @masked_scatter_nxv2f16(<vscale x 2 x half> %data, <vscale x 2 x half*> %ptrs, <vscale … 33 …call void @llvm.masked.scatter.nxv2f16(<vscale x 2 x half> %data, <vscale x 2 x half*> %ptrs, i32 … 43 define void @masked_scatter_v2f32(<2 x float> %data, <2 x float*> %ptrs, <2 x i1> %masks) { 44 …call void @llvm.masked.scatter.v2f32(<2 x float> %data, <2 x float*> %ptrs, i32 0, <2 x i1> %masks) 54 define void @masked_scatter_v4i32(<4 x i32> %data, <4 x i32*> %ptrs, <4 x i1> %masks) { 55 call void @llvm.masked.scatter.v4i32(<4 x i32> %data, <4 x i32*> %ptrs, i32 0, <4 x i1> %masks) [all …]
|
D | sve-masked-scatter-32b-scaled.ll | 14 %ptrs = getelementptr i16, i16* %base, <vscale x 2 x i64> %ext 15 …call void @llvm.masked.scatter.nxv2i16(<vscale x 2 x i16> %data, <vscale x 2 x i16*> %ptrs, i32 0,… 25 %ptrs = getelementptr i32, i32* %base, <vscale x 2 x i64> %ext 26 …call void @llvm.masked.scatter.nxv2i32(<vscale x 2 x i32> %data, <vscale x 2 x i32*> %ptrs, i32 0,… 36 %ptrs = getelementptr i64, i64* %base, <vscale x 2 x i64> %ext 37 …call void @llvm.masked.scatter.nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x i64*> %ptrs, i32 0,… 47 %ptrs = getelementptr half, half* %base, <vscale x 2 x i64> %ext 48 …call void @llvm.masked.scatter.nxv2f16(<vscale x 2 x half> %data, <vscale x 2 x half*> %ptrs, i32 … 58 %ptrs = getelementptr bfloat, bfloat* %base, <vscale x 2 x i64> %ext 59 …call void @llvm.masked.scatter.nxv2bf16(<vscale x 2 x bfloat> %data, <vscale x 2 x bfloat*> %ptrs,… [all …]
|
D | sve-masked-scatter-32b-unscaled.ll | 15 %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x i8*> 16 …call void @llvm.masked.scatter.nxv2i8(<vscale x 2 x i8> %data, <vscale x 2 x i8*> %ptrs, i32 0, <v… 27 %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x i16*> 28 …call void @llvm.masked.scatter.nxv2i16(<vscale x 2 x i16> %data, <vscale x 2 x i16*> %ptrs, i32 0,… 39 %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x i32*> 40 …call void @llvm.masked.scatter.nxv2i32(<vscale x 2 x i32> %data, <vscale x 2 x i32*> %ptrs, i32 0,… 51 %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x i64*> 52 …call void @llvm.masked.scatter.nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x i64*> %ptrs, i32 0,… 63 %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x half*> 64 …call void @llvm.masked.scatter.nxv2f16(<vscale x 2 x half> %data, <vscale x 2 x half*> %ptrs, i32 … [all …]
|
D | sve-masked-gather-32b-signed-scaled.ll | 14 %ptrs = getelementptr i16, i16* %base, <vscale x 2 x i32> %offsets 15 …%vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x i16*> %ptrs, i32 2, <vsc… 25 %ptrs = getelementptr i32, i32* %base, <vscale x 2 x i32> %offsets 26 …%vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x i32*> %ptrs, i32 4, <vsc… 36 %ptrs = getelementptr i64, i64* %base, <vscale x 2 x i32> %offsets 37 …%vals = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x i64*> %ptrs, i32 8, <vsc… 46 %ptrs = getelementptr half, half* %base, <vscale x 2 x i32> %offsets 47 …%vals = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x half*> %ptrs, i32 2, <v… 56 %ptrs = getelementptr float, float* %base, <vscale x 2 x i32> %offsets 57 …%vals = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x float*> %ptrs, i32 4, … [all …]
|
D | sve-masked-scatter-64b-scaled.ll | 13 %ptrs = getelementptr i16, i16* %base, <vscale x 2 x i64> %offsets 14 …call void @llvm.masked.scatter.nxv2i16(<vscale x 2 x i16> %data, <vscale x 2 x i16*> %ptrs, i32 2,… 23 %ptrs = getelementptr i32, i32* %base, <vscale x 2 x i64> %offsets 24 …call void @llvm.masked.scatter.nxv2i32(<vscale x 2 x i32> %data, <vscale x 2 x i32*> %ptrs, i32 4,… 33 %ptrs = getelementptr i64, i64* %base, <vscale x 2 x i64> %offsets 34 …call void @llvm.masked.scatter.nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x i64*> %ptrs, i32 8,… 43 %ptrs = getelementptr half, half* %base, <vscale x 2 x i64> %offsets 44 …call void @llvm.masked.scatter.nxv2f16(<vscale x 2 x half> %data, <vscale x 2 x half*> %ptrs, i32 … 53 %ptrs = getelementptr float, float* %base, <vscale x 2 x i64> %offsets 54 …call void @llvm.masked.scatter.nxv2f32(<vscale x 2 x float> %data, <vscale x 2 x float*> %ptrs, i3… [all …]
|
D | sve-masked-gather-64b-unscaled.ll | 10 %ptrs = getelementptr i8, i8* %base, <vscale x 2 x i64> %offsets 11 …%vals = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x i8*> %ptrs, i32 1, <vscale… 22 %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x i16*> 23 …%vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x i16*> %ptrs, i32 2, <vsc… 34 %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x i32*> 35 …%vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x i32*> %ptrs, i32 4, <vsc… 46 %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x i64*> 47 …%vals = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x i64*> %ptrs, i32 8, <vsc… 57 %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x half*> 58 …%vals = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x half*> %ptrs, i32 2, <v… [all …]
|
D | sve-masked-gather-32b-signed-unscaled.ll | 14 %ptrs = getelementptr i8, i8* %base, <vscale x 2 x i32> %offsets 15 …%vals = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x i8*> %ptrs, i32 1, <vscale… 26 %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x i16*> 27 …%vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x i16*> %ptrs, i32 2, <vsc… 38 %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x i32*> 39 …%vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x i32*> %ptrs, i32 4, <vsc… 50 %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x i64*> 51 …%vals = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x i64*> %ptrs, i32 8, <vsc… 61 %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x half*> 62 …%vals = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x half*> %ptrs, i32 2, <v… [all …]
|
D | sve-masked-gather-32b-unsigned-unscaled.ll | 15 %ptrs = getelementptr i8, i8* %base, <vscale x 2 x i64> %offsets.zext 16 …%vals = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x i8*> %ptrs, i32 1, <vscale… 28 %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x i16*> 29 …%vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x i16*> %ptrs, i32 2, <vsc… 41 %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x i32*> 42 …%vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x i32*> %ptrs, i32 4, <vsc… 54 %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x i64*> 55 …%vals = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x i64*> %ptrs, i32 8, <vsc… 66 %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x half*> 67 …%vals = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x half*> %ptrs, i32 2, <v… [all …]
|
D | sve-masked-gather-64b-scaled.ll | 10 %ptrs = getelementptr i16, i16* %base, <vscale x 2 x i64> %offsets 11 …%vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x i16*> %ptrs, i32 2, <vsc… 21 %ptrs = getelementptr i32, i32* %base, <vscale x 2 x i64> %offsets 22 …%vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x i32*> %ptrs, i32 4, <vsc… 32 %ptrs = getelementptr i64, i64* %base, <vscale x 2 x i64> %offsets 33 …%vals = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x i64*> %ptrs, i32 8, <vsc… 42 %ptrs = getelementptr half, half* %base, <vscale x 2 x i64> %offsets 43 …%vals = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x half*> %ptrs, i32 2, <v… 52 %ptrs = getelementptr float, float* %base, <vscale x 2 x i64> %offsets 53 …%vals = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x float*> %ptrs, i32 4, … [all …]
|
D | sve-masked-gather-32b-unsigned-scaled.ll | 15 %ptrs = getelementptr i16, i16* %base, <vscale x 2 x i64> %offsets.zext 16 …%vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x i16*> %ptrs, i32 2, <vsc… 27 %ptrs = getelementptr i32, i32* %base, <vscale x 2 x i64> %offsets.zext 28 …%vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x i32*> %ptrs, i32 4, <vsc… 39 %ptrs = getelementptr i64, i64* %base, <vscale x 2 x i64> %offsets.zext 40 …%vals = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x i64*> %ptrs, i32 8, <vsc… 50 %ptrs = getelementptr half, half* %base, <vscale x 2 x i64> %offsets.zext 51 …%vals = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x half*> %ptrs, i32 2, <v… 61 %ptrs = getelementptr float, float* %base, <vscale x 2 x i64> %offsets.zext 62 …%vals = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x float*> %ptrs, i32 4, … [all …]
|
/external/llvm-project/llvm/test/CodeGen/Thumb2/ |
D | mve-gather-ind32-scaled.ll | 12 %ptrs = getelementptr inbounds i16, i16* %base, <4 x i32> %offs 13 …%gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*> %ptrs, i32 2, <4 x i1> <i1 t… 26 %ptrs = getelementptr inbounds i16, i16* %base, <4 x i32> %offs 27 …%gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*> %ptrs, i32 2, <4 x i1> <i1 t… 40 %ptrs = getelementptr inbounds i32, i32* %base, <4 x i32> %offs 41 …%gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %ptrs, i32 4, <4 x i1> <i1 t… 56 %ptrs = bitcast <4 x i32*> %i32_ptrs to <4 x float*> 57 …%gather = call <4 x float> @llvm.masked.gather.v4f32.v4p0f32(<4 x float*> %ptrs, i32 4, <4 x i1> <… 70 %ptrs = getelementptr inbounds i32, i32* %base, <4 x i32> %offs.zext 71 …%gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %ptrs, i32 4, <4 x i1> <i1 t… [all …]
|
D | mve-gather-ind32-unscaled.ll | 12 %ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs 13 …%gather = call <4 x i8> @llvm.masked.gather.v4i8.v4p0i8(<4 x i8*> %ptrs, i32 1, <4 x i1> <i1 true,… 26 %ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs 27 …%gather = call <4 x i8> @llvm.masked.gather.v4i8.v4p0i8(<4 x i8*> %ptrs, i32 1, <4 x i1> <i1 true,… 41 %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x i16*> 42 …%gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*> %ptrs, i32 2, <4 x i1> <i1 t… 56 %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x i16*> 57 …%gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*> %ptrs, i32 2, <4 x i1> <i1 t… 71 %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x i32*> 72 …%gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %ptrs, i32 4, <4 x i1> <i1 t… [all …]
|
D | mve-scatter-ind32-scaled.ll | 13 %ptrs = getelementptr inbounds i16, i16* %base, <4 x i32> %offs 15 …call void @llvm.masked.scatter.v4i16.v4p0i16(<4 x i16> %t, <4 x i16*> %ptrs, i32 2, <4 x i1> <i1 t… 28 %ptrs = getelementptr inbounds i32, i32* %base, <4 x i32> %offs 29 …call void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32> %input, <4 x i32*> %ptrs, i32 4, <4 x i1> <… 43 %ptrs = bitcast <4 x i32*> %i32_ptrs to <4 x float*> 44 …call void @llvm.masked.scatter.v4f32.v4p0f32(<4 x float> %input, <4 x float*> %ptrs, i32 4, <4 x i… 58 %ptrs = getelementptr inbounds i32, i32* %base, <4 x i32> %offs.zext 59 …call void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32> %input, <4 x i32*> %ptrs, i32 4, <4 x i1> <… 73 %ptrs = getelementptr inbounds i32, i32* %base, <4 x i32> %offs.sext 74 …call void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32> %input, <4 x i32*> %ptrs, i32 4, <4 x i1> <… [all …]
|
D | mve-scatter-ind32-unscaled.ll | 13 %ptrs = getelementptr inbounds i8, i8* %base, <4 x i32> %offs 15 …call void @llvm.masked.scatter.v4i8.v4p0i8(<4 x i8> %t, <4 x i8*> %ptrs, i32 2, <4 x i1> <i1 true,… 29 %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x i16*> 31 …call void @llvm.masked.scatter.v4i16.v4p0i16(<4 x i16> %t, <4 x i16*> %ptrs, i32 2, <4 x i1> <i1 t… 45 %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x i32*> 46 …call void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32> %input, <4 x i32*> %ptrs, i32 4, <4 x i1> <… 60 %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x float*> 61 …call void @llvm.masked.scatter.v4f32.v4p0f32(<4 x float> %input, <4 x float*> %ptrs, i32 4, <4 x i… 76 %ptrs = bitcast <4 x i8*> %byte_ptrs to <4 x i32*> 77 …call void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32> %input, <4 x i32*> %ptrs, i32 4, <4 x i1> <… [all …]
|
/external/ltp/testcases/kernel/device-drivers/misc_modules/per_cpu_allocator_module/ |
D | test_per_cpu_allocator_module.c | 86 static void *ptrs[NR_CMDS]; variable 137 if (!ptrs[cmdno]) in free_cmd() 140 verify_area(ptrs[cmdno], cmds[cmdno].size, cmdno); in free_cmd() 141 free_percpu(ptrs[cmdno]); in free_cmd() 142 ptrs[cmdno] = NULL; in free_cmd() 155 ptrs[i] = __alloc_percpu(cmd->size, in run_test() 158 if (ptrs[i]) in run_test() 159 fill_area(ptrs[i], cmd->size, i); in run_test()
|
/external/jemalloc_new/msvc/test_threads/ |
D | test_threads.cpp | 43 uint8_t* ptrs[numAllocsMax]; in test_threads() local 53 ptrs[j] = (uint8_t*)je_malloc(sz); in test_threads() 54 if (!ptrs[j]) { in test_threads() 59 ptrs[j][k] = tid + k; in test_threads() 63 if (ptrs[j][k] != (uint8_t)(tid + k)) { in test_threads() 64 …error in thread %d, iter %d, alloc %d @ %d : %02X!=%02X\n", tid, i, j, k, ptrs[j][k], (uint8_t)(ti… in test_threads() 67 je_free(ptrs[j]); in test_threads()
|
/external/llvm-project/compiler-rt/test/hwasan/TestCases/ |
D | malloc-align.c | 25 void *ptrs[cnt]; in main() local 27 int res = posix_memalign(&ptrs[k], alignment, size); in main() 29 fprintf(stderr, "... addr 0x%zx\n", (size_t)ptrs[k]); in main() 30 assert(((size_t)ptrs[k] & (alignment - 1)) == 0); in main() 33 free(ptrs[k]); in main()
|
/external/ltp/testcases/kernel/mem/mtest07/ |
D | mallocstress.c | 73 long *ptrs[MAXPTRS]; in allocate_free() local 86 ptrs[num_alloc] = malloc(size); in allocate_free() 88 if (!ptrs[num_alloc]) in allocate_free() 90 ptrs[num_alloc][0] = num_alloc; in allocate_free() 119 if (ptrs[i][0] != i) { in allocate_free() 125 free(ptrs[i]); in allocate_free()
|
/external/llvm-project/clang/test/SemaObjCXX/Inputs/ |
D | nullability-consistency-arrays.h | 26 void *ptrs[], // expected-warning {{pointer is missing a nullability type specifier}} 45 void *ptrs[_Nonnull], // expected-warning {{pointer is missing a nullability type specifier}} 53 void * _Nullable ptrs[_Nonnull], 118 void *ptrs[], 135 void * _Nullable ptrs[_Nonnull], 139 void * ptrs[_Nonnull], // backwards-compatibility
|
/external/OpenCL-CTS/test_conformance/SVM/ |
D | test_migrate.cpp | 171 char* ptrs[] = { asvm, bsvm, csvm }; in test_svm_migrate() local 172 …error = clEnqueueSVMMigrateMem(queues[1], 3, (const void**)ptrs, NULL, CL_MIGRATE_MEM_OBJECT_CONTE… in test_svm_migrate() 178 char *ptrs[] = { asvm+1, bsvm+3, csvm+5 }; in test_svm_migrate() local 180 …error = clEnqueueSVMMigrateMem(queues[0], 3, (const void**)ptrs, szs, CL_MIGRATE_MEM_OBJECT_HOST, … in test_svm_migrate() 242 char *ptrs[] = { asvm+100, bsvm+17, csvm+1000, asvm+101, bsvm+19, csvm+1017 }; in test_svm_migrate() local 245 … error = clEnqueueSVMMigrateMem(queues[0], 3, (const void**)ptrs, szs, 0, 1, &evs[2], &evs[3]); in test_svm_migrate() 254 char *ptrs[] = { asvm+8, bsvm+17, csvm+31, csvm+83 }; in test_svm_migrate() local 257 … error = clEnqueueSVMMigrateMem(queues[1], 4, (const void**)ptrs, szs, 0, 1, &evs[4], &evs[5]); in test_svm_migrate() 266 char *ptrs[] = { asvm+64, asvm+128, bsvm+64, bsvm+128, csvm, csvm+64 }; in test_svm_migrate() local 269 … error = clEnqueueSVMMigrateMem(queues[0], 6, (const void**)ptrs, szs, 0, 1, &evs[6], &evs[7]); in test_svm_migrate() [all …]
|
/external/tensorflow/tensorflow/core/common_runtime/gpu/ |
D | gpu_bfc_allocator_test.cc | 110 std::vector<void*> ptrs; in TEST_P() local 113 ptrs.push_back(raw); in TEST_P() 117 std::sort(ptrs.begin(), ptrs.end()); in TEST_P() 120 for (size_t i = 1; i < ptrs.size(); i++) { in TEST_P() 121 ASSERT_NE(ptrs[i], ptrs[i - 1]); // No dups in TEST_P() 122 size_t req_size = a.RequestedSize(ptrs[i - 1]); in TEST_P() 124 ASSERT_GE(static_cast<char*>(ptrs[i]) - static_cast<char*>(ptrs[i - 1]), in TEST_P() 128 for (size_t i = 0; i < ptrs.size(); i++) { in TEST_P() 129 a.DeallocateRaw(ptrs[i]); in TEST_P() 404 std::vector<void*> ptrs; in BM_AllocationDelayed() local [all …]
|