/external/webrtc/talk/session/media/ |
D | planarfunctions_unittest.cc | 64 static const int kAlignment = 16; variable 165 uint8_t* image_pointer = new uint8_t[y_size + u_size + v_size + kAlignment]; in CreateFakeYuvTestingImage() 166 y_pointer = ALIGNP(image_pointer, kAlignment); in CreateFakeYuvTestingImage() 167 u_pointer = ALIGNP(&image_pointer[y_size], kAlignment); in CreateFakeYuvTestingImage() 168 v_pointer = ALIGNP(&image_pointer[y_size + u_size], kAlignment); in CreateFakeYuvTestingImage() 207 uint8_t* image_pointer = new uint8_t[2 * height * awidth + kAlignment]; in CreateFakeInterleaveYuvTestingImage() 208 yuv_pointer = ALIGNP(image_pointer, kAlignment); in CreateFakeInterleaveYuvTestingImage() 287 ((height + 1) / 2) * ((width + 1) / 2) * 2 + kAlignment]; in CreateFakeNV12TestingImage() 288 y_pointer = ALIGNP(image_pointer, kAlignment); in CreateFakeNV12TestingImage() 326 ((height + 1) / 2) * ((width + 1) / 2) * 2 + kAlignment]; in CreateFakeM420TestingImage() [all …]
|
D | yuvscaler_unittest.cc | 53 static const int kAlignment = 16; variable 105 new uint8_t[isize + kAlignment + memoffset]()); in TestScale() 107 new uint8_t[osize + kAlignment + memoffset]()); in TestScale() 109 new uint8_t[osize + kAlignment + memoffset]()); in TestScale() 111 uint8_t* ibuf = ALIGNP(ibuffer.get(), kAlignment) + memoffset; in TestScale() 112 uint8_t* obuf = ALIGNP(obuffer.get(), kAlignment) + memoffset; in TestScale() 113 uint8_t* xbuf = ALIGNP(xbuffer.get(), kAlignment) + memoffset; in TestScale() 212 scoped_ptr<uint8_t[]> ibuffer(new uint8_t[I420_SIZE(iw, ih) + kAlignment]); in TEST_F() 213 scoped_ptr<uint8_t[]> obuffer(new uint8_t[I420_SIZE(ow, oh) + kAlignment]); in TEST_F() 215 uint8_t* ibuf = ALIGNP(ibuffer.get(), kAlignment); in TEST_F() [all …]
|
/external/perfetto/src/profiling/memory/ |
D | shared_ring_buffer.cc | 44 constexpr auto kAlignment = 8; // 64 bits to use aligned memcpy(). variable 45 constexpr auto kHeaderSize = kAlignment; 198 base::AlignUp<kAlignment>(size + kHeaderSize); in BeginWrite() 235 PERFETTO_DCHECK(reinterpret_cast<uintptr_t>(wr_ptr) % kAlignment == 0); in EndWrite() 264 PERFETTO_DCHECK(reinterpret_cast<uintptr_t>(rd_ptr) % kAlignment == 0); in BeginRead() 272 const size_t size_with_header = base::AlignUp<kAlignment>(size + kHeaderSize); in BeginRead() 285 PERFETTO_DCHECK(reinterpret_cast<uintptr_t>(rd_ptr) % kAlignment == 0); in BeginRead() 292 size_t size_with_header = base::AlignUp<kAlignment>(buf.size + kHeaderSize); in EndRead() 299 pos.write_pos % kAlignment || pos.read_pos % kAlignment) { in IsCorrupt()
|
/external/skia/src/gpu/ |
D | GrMemoryPool.cpp | 23 static constexpr size_t kPoolSize = GrAlignTo(sizeof(GrMemoryPool), kAlignment); in Make() 71 size = GrAlignTo(size, kAlignment); in allocate() 157 SkASSERT(!(reinterpret_cast<intptr_t>(mem) % kAlignment)); in InitBlock() 193 SkASSERT(!(b % kAlignment)); in validate() 194 SkASSERT(!(totalSize % kAlignment)); in validate() 195 SkASSERT(!(block->fCurrPtr % kAlignment)); in validate() 223 static constexpr size_t kOpPoolSize = GrAlignTo(sizeof(GrOpMemoryPool), GrMemoryPool::kAlignment); 230 GrAlignTo(sizeof(GrOpMemoryPool), GrMemoryPool::kAlignment); in Make() 231 static constexpr size_t kPoolSize = GrAlignTo(sizeof(GrMemoryPool), GrMemoryPool::kAlignment); in Make()
|
D | GrMemoryPool.h | 35 static constexpr size_t kAlignment = 8; 38 static constexpr size_t kAlignment = alignof(std::max_align_t); 132 static constexpr size_t kHeaderSize = GrAlignTo(sizeof(BlockHeader), kAlignment); 133 static constexpr size_t kPerAllocPad = GrAlignTo(sizeof(AllocHeader), kAlignment);
|
/external/skqp/src/gpu/ |
D | GrMemoryPool.cpp | 34 minAllocSize = SkTMax<size_t>(GrSizeAlignUp(minAllocSize, kAlignment), kSmallestMinAllocSize); in GrMemoryPool() 35 preallocSize = SkTMax<size_t>(GrSizeAlignUp(preallocSize, kAlignment), minAllocSize); in GrMemoryPool() 71 size = GrSizeAlignUp(size, kAlignment); in allocate() 156 SkASSERT(!(reinterpret_cast<intptr_t>(block) % kAlignment)); in CreateBlock() 191 SkASSERT(!(b % kAlignment)); in validate() 192 SkASSERT(!(totalSize % kAlignment)); in validate() 193 SkASSERT(!(block->fCurrPtr % kAlignment)); in validate()
|
D | GrMemoryPool.h | 121 kAlignment = 8, enumerator 122 kHeaderSize = GR_CT_ALIGN_UP(sizeof(BlockHeader), kAlignment), 123 kPerAllocPad = GR_CT_ALIGN_UP(sizeof(AllocHeader), kAlignment),
|
/external/skia/bench/ |
D | GrMemoryPoolBench.cpp | 18 using Aligned = std::aligned_storage<32, GrMemoryPool::kAlignment>::type; 20 static_assert(sizeof(Aligned) % GrMemoryPool::kAlignment == 0); 26 static_assert(sizeof(Unaligned) % GrMemoryPool::kAlignment != 0); 29 static_assert(GrAlignTo(sizeof(Unaligned), GrMemoryPool::kAlignment) == sizeof(Aligned));
|
/external/gemmlowp/internal/ |
D | allocator.h | 89 static const std::size_t kAlignment = kDefaultCacheLineSize; 101 storage_ = aligned_alloc(kAlignment, storage_size_); 138 const std::size_t bytes = RoundUp<kAlignment>(n * sizeof(T));
|
/external/gemmlowp/test/ |
D | test_allocator.cc | 32 !(reinterpret_cast<std::uintptr_t>(int32_array) % Allocator::kAlignment)); in test_allocator() 34 !(reinterpret_cast<std::uintptr_t>(int8_array) % Allocator::kAlignment)); in test_allocator()
|
/external/boringssl/src/tool/ |
D | speed.cc | 355 static const unsigned kAlignment = 16; in SpeedAEADChunk() local 367 std::unique_ptr<uint8_t[]> in_storage(new uint8_t[chunk_len + kAlignment]); in SpeedAEADChunk() 372 new uint8_t[chunk_len + overhead_len + kAlignment]); in SpeedAEADChunk() 374 new uint8_t[chunk_len + overhead_len + kAlignment]); in SpeedAEADChunk() 378 new uint8_t[overhead_len + kAlignment]); in SpeedAEADChunk() 381 uint8_t *const in = align(in_storage.get(), kAlignment); in SpeedAEADChunk() 383 uint8_t *const out = align(out_storage.get(), kAlignment); in SpeedAEADChunk() 385 uint8_t *const tag = align(tag_storage.get(), kAlignment); in SpeedAEADChunk() 387 uint8_t *const in2 = align(in2_storage.get(), kAlignment); in SpeedAEADChunk()
|
/external/webrtc/talk/media/base/ |
D | videoframe_unittest.h | 69 static const int kAlignment = 16; variable 576 rtc::scoped_ptr<uint8_t[]> buf(new uint8_t[buf_size + kAlignment]); in ConstructI422() 577 uint8_t* y = ALIGNP(buf.get(), kAlignment); in ConstructI422() 597 rtc::scoped_ptr<uint8_t[]> buf(new uint8_t[buf_size + kAlignment]); in ConstructYuy2() 598 uint8_t* yuy2 = ALIGNP(buf.get(), kAlignment); in ConstructYuy2() 614 rtc::scoped_ptr<uint8_t[]> buf(new uint8_t[buf_size + kAlignment + 1]); in ConstructYuy2Unaligned() 615 uint8_t* yuy2 = ALIGNP(buf.get(), kAlignment) + 1; in ConstructYuy2Unaligned() 771 rtc::scoped_ptr<uint8_t[]> outbuf(new uint8_t[out_size + kAlignment]); in ConstructRGB565() 772 uint8_t* out = ALIGNP(outbuf.get(), kAlignment); in ConstructRGB565() 787 rtc::scoped_ptr<uint8_t[]> outbuf(new uint8_t[out_size + kAlignment]); in ConstructARGB1555() [all …]
|
/external/tensorflow/tensorflow/core/kernels/ |
D | meta_support.cc | 42 const int kAlignment = 32; variable 43 const int kScratchSize = 2048 * 1024 + kAlignment; 51 scratch_.get() + kAlignment - in Scratch() 52 (reinterpret_cast<uintptr_t>(scratch_.get()) % kAlignment); in Scratch()
|
/external/tensorflow/tensorflow/lite/ |
D | model_test.cc | 81 const uintptr_t kAlignment = 4; in TEST() local 82 const uintptr_t kAlignmentBits = kAlignment - 1; in TEST() 92 reinterpret_cast<char*>(malloc(empty_model_data.size() + kAlignment)), in TEST() 97 (reinterpret_cast<uintptr_t>(buffer.get()) + kAlignment) & in TEST()
|
/external/v8/src/heap/ |
D | memory-allocator.cc | 116 Address aligned_base = RoundUp(base, MemoryChunk::kAlignment); in InitializeCodePageAllocator() 129 static_cast<size_t>(MemoryChunk::kAlignment)); in InitializeCodePageAllocator() 371 AlignedAddress(heap->GetRandomMmapAddr(), MemoryChunk::kAlignment); in AllocateBasicChunk() 414 AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment, in AllocateBasicChunk() 436 AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment, in AllocateBasicChunk()
|
D | basic-memory-chunk.h | 111 static const intptr_t kAlignment = variable 114 static const intptr_t kAlignmentMask = kAlignment - 1;
|
/external/tensorflow/tensorflow/core/common_runtime/ |
D | mkl_cpu_allocator.h | 280 return cpu_allocator()->AllocateRaw(kAlignment, size); in MallocHook() 309 static constexpr const size_t kAlignment = 64; variable
|
/external/libchrome/mojo/public/js/lib/ |
D | codec.js | 15 var kAlignment = 8; 18 return size + (kAlignment - (size % kAlignment)) % kAlignment; 22 return offset >= 0 && (offset % kAlignment) === 0;
|
/external/webrtc/webrtc/base/ |
D | stream.cc | 642 buffer_alloc_ = new char[buffer_length_ + kAlignment]; in SetData() 643 buffer_ = reinterpret_cast<char*>(ALIGNP(buffer_alloc_, kAlignment)); in SetData() 652 if (char* new_buffer_alloc = new char[size + kAlignment]) { in DoReserve() 654 ALIGNP(new_buffer_alloc, kAlignment)); in DoReserve()
|
D | stream.h | 478 static const int kAlignment = 16; variable
|