Home
last modified time | relevance | path

Searched refs:memory (Results 1 – 25 of 115) sorted by relevance

12345

/packages/modules/NeuralNetworks/common/types/src/
DSharedMemoryAndroid.cpp81 GeneralResult<Mapping> map(const Memory::Ashmem& memory) { in map() argument
86 base::MappedFile::FromFd(memory.fd, offset, memory.size, prot); in map()
94 .size = memory.size, in map()
104 const CompatibilityLayerMemory& memory = loadCompatibilityLayerMemory(); in allocateSharedMemory() local
105 auto fd = base::unique_fd(memory.create(nullptr, size)); in allocateSharedMemory()
110 const size_t readSize = memory.getSize(fd.get()); in allocateSharedMemory()
124 size_t getSize(const Memory::Ashmem& memory) { in getSize() argument
125 return memory.size; in getSize()
128 size_t getSize(const Memory::Fd& memory) { in getSize() argument
129 return memory.size; in getSize()
[all …]
DSharedMemory.cpp125 auto memory = NN_TRY(mBuilder.finish()); in finish() local
128 const auto [pointer, size, context] = NN_TRY(map(memory);); in finish()
138 return memory; in finish()
170 auto memory = NN_TRY(memoryBuilder.finish()); in flushDataFromPointerToShared() local
171 modelInShared.pools.push_back(std::move(memory)); in flushDataFromPointerToShared()
233 auto memory = NN_TRY(inputBuilder.finish()); in convertRequestFromPointerToShared() local
234 requestInShared.pools.push_back(memory); in convertRequestFromPointerToShared()
236 InputRelocationTracker::create(std::move(inputRelocationInfos), std::move(memory))); in convertRequestFromPointerToShared()
257 auto memory = NN_TRY(outputBuilder.finish()); in convertRequestFromPointerToShared() local
258 requestInShared.pools.push_back(memory); in convertRequestFromPointerToShared()
[all …]
DTypeUtils.cpp270 [](const SharedMemory& memory) { return getSize(memory); }); in getMemorySizes() argument
607 static std::ostream& operator<<(std::ostream& os, const Memory::Ashmem& memory) { in operator <<() argument
608 return os << "Ashmem{.fd=" << (memory.fd.ok() ? "<valid fd>" : "<invalid fd>") in operator <<()
609 << ", .size=" << memory.size << "}"; in operator <<()
612 static std::ostream& operator<<(std::ostream& os, const Memory::Fd& memory) { in operator <<() argument
613 return os << "Fd{.size=" << memory.size << ", .prot=" << memory.prot in operator <<()
614 << ", .fd=" << (memory.fd.ok() ? "<valid fd>" : "<invalid fd>") in operator <<()
615 << ", .offset=" << memory.offset << "}"; in operator <<()
618 static std::ostream& operator<<(std::ostream& os, const Memory::HardwareBuffer& memory) { in operator <<() argument
619 if (memory.handle.get() == nullptr) { in operator <<()
[all …]
/packages/modules/NeuralNetworks/runtime/
DMemory.cpp187 RuntimeMemory::RuntimeMemory(SharedMemory memory) : kMemory(std::move(memory)) { in RuntimeMemory() argument
192 RuntimeMemory::RuntimeMemory(SharedMemory memory, std::unique_ptr<MemoryValidatorBase> validator) in RuntimeMemory() argument
193 : kMemory(std::move(memory)), mValidator(std::move(validator)) { in RuntimeMemory()
266 const SharedMemory& memory = runtimeMemory->getMemory(); in copyIBuffers() local
267 if (!validate(memory).ok()) return ANEURALNETWORKS_OUT_OF_MEMORY; in copyIBuffers()
268 NN_RETURN_IF_ERROR(copyIBufferToMemory(src, memory)); in copyIBuffers()
269 NN_RETURN_IF_ERROR(copyMemoryToIBuffer(memory, dst, srcMetadata.dimensions)); in copyIBuffers()
491 std::unique_ptr<RuntimeMemory> memory; in allocate() local
496 std::tie(n, memory) = mAllocator->allocate(mDesc, mOperand->type); in allocate()
504 std::tie(n, memory) = MemoryRuntimeAHWB::create(size); in allocate()
[all …]
DExecutionBuilder.cpp213 const RuntimeMemory* memory, size_t offset, in setInputFromMemory() argument
232 if (!memory->getValidator().validate(mCompilation, IOType::INPUT, index, type, offset, in setInputFromMemory()
241 if (validate(memory->getMemory()).ok() && offset == 0 && length == 0) { in setInputFromMemory()
242 length = memory->getSize(); in setInputFromMemory()
245 uint32_t poolIndex = mMemories.add(memory); in setInputFromMemory()
294 const RuntimeMemory* memory, size_t offset, in setOutputFromMemory() argument
313 if (!memory->getValidator().validate(mCompilation, IOType::OUTPUT, index, type, offset, in setOutputFromMemory()
322 if (validate(memory->getMemory()).ok() && offset == 0 && length == 0) { in setOutputFromMemory()
323 length = memory->getSize(); in setOutputFromMemory()
326 uint32_t poolIndex = mMemories.add(memory); in setOutputFromMemory()
[all …]
DMemory.h198 explicit RuntimeMemory(SharedMemory memory);
199 RuntimeMemory(SharedMemory memory, std::unique_ptr<MemoryValidatorBase> validator);
285 MemoryAshmem(SharedMemory memory, Mapping mapped);
302 explicit MemoryFd(SharedMemory memory);
315 MemoryAHWB(SharedMemory memory, std::unique_ptr<MemoryValidatorBase> validator) in MemoryAHWB() argument
316 : RuntimeMemory(std::move(memory), std::move(validator)) {} in MemoryAHWB()
339 MemoryRuntimeAHWB(SharedMemory memory, Mapping mapping);
DNeuralNetworks.cpp1009 ANeuralNetworksMemory** memory) { in ANeuralNetworksMemory_createFromDesc() argument
1011 if (memory != nullptr) { in ANeuralNetworksMemory_createFromDesc()
1012 *memory = nullptr; in ANeuralNetworksMemory_createFromDesc()
1014 if (!desc || !memory) { in ANeuralNetworksMemory_createFromDesc()
1023 *memory = reinterpret_cast<ANeuralNetworksMemory*>(m.release()); in ANeuralNetworksMemory_createFromDesc()
1039 ANeuralNetworksMemory** memory) { in ANeuralNetworksMemory_createFromFd() argument
1041 if (memory != nullptr) { in ANeuralNetworksMemory_createFromFd()
1042 *memory = nullptr; in ANeuralNetworksMemory_createFromFd()
1044 if (!memory) { in ANeuralNetworksMemory_createFromFd()
1054 *memory = reinterpret_cast<ANeuralNetworksMemory*>(m.release()); in ANeuralNetworksMemory_createFromFd()
[all …]
DExecutionBuilder.h69 const RuntimeMemory* memory, size_t offset, size_t length);
73 const RuntimeMemory* memory, size_t offset, size_t length);
391 int setInputFromMemory(uint32_t inputIndex, const RuntimeMemory* memory, uint32_t offset,
393 return setInputOrOutputFromMemory(mModel->getInputOperand(inputIndex), memory, offset,
396 int setOutputFromMemory(uint32_t outputIndex, const RuntimeMemory* memory, uint32_t offset,
398 return setInputOrOutputFromMemory(mModel->getOutputOperand(outputIndex), memory, offset,
434 int setInputOrOutputFromMemory(const Operand& inputOrOutputOperand, const RuntimeMemory* memory,
/packages/modules/NeuralNetworks/runtime/test/
DTestMemoryDomain.cpp243 ANeuralNetworksMemory* memory; in allocateDeviceMemory() local
244 int n = ANeuralNetworksMemory_createFromDesc(desc, &memory); in allocateDeviceMemory()
246 return {n, test_wrapper::Memory(memory)}; in allocateDeviceMemory()
301 auto [n, memory] = allocateDeviceMemory(compilation, {0}, {0}); in TEST_P()
305 const RuntimeMemory* m = reinterpret_cast<const RuntimeMemory*>(memory.get()); in TEST_P()
315 const RuntimeMemory* m = reinterpret_cast<const RuntimeMemory*>(memory.get()); in TEST_P()
318 const auto& memory = m->getMemory(); in TEST_P() local
319 EXPECT_TRUE(validate(memory).ok()); in TEST_P()
321 EXPECT_FALSE(isAhwbBlob(memory)); in TEST_P()
323 EXPECT_EQ(isAhwbBlob(memory), kRunningOnAndroid); in TEST_P()
[all …]
DTestValidation.cpp252 for (auto* memory : mMemories) ANeuralNetworksMemory_free(memory); in TearDown() local
368 ANeuralNetworksMemory* memory, int expectedResult) { in executeWithMemoryAsInput() argument
373 ASSERT_EQ(ANeuralNetworksExecution_setInputFromMemory(execution, 0, nullptr, memory, 0, 0), in executeWithMemoryAsInput()
382 ANeuralNetworksMemory* memory, int expectedResult) { in executeWithMemoryAsOutput() argument
389 ASSERT_EQ(ANeuralNetworksExecution_setOutputFromMemory(execution, 0, nullptr, memory, 0, 0), in executeWithMemoryAsOutput()
634 ANeuralNetworksMemory* memory; in TEST_F() local
636 &memory), in TEST_F()
639 EXPECT_EQ(ANeuralNetworksModel_setOperandValueFromMemory(nullptr, 0, memory, 0, sizeof(float)), in TEST_F()
645 EXPECT_EQ(ANeuralNetworksModel_setOperandValueFromMemory(mModel, -1, memory, 0, sizeof(float)), in TEST_F()
649 EXPECT_EQ(ANeuralNetworksModel_setOperandValueFromMemory(mModel, 0, memory, 0, memorySize), in TEST_F()
[all …]
DTestGenerated.cpp162 ANeuralNetworksMemory* memory = nullptr; in createDeviceMemoryForInput() local
163 EXPECT_EQ(ANeuralNetworksMemory_createFromDesc(desc, &memory), ANEURALNETWORKS_NO_ERROR); in createDeviceMemoryForInput()
165 return memory; in createDeviceMemoryForInput()
175 ANeuralNetworksMemory* memory = nullptr; in createDeviceMemoryForOutput() local
176 EXPECT_EQ(ANeuralNetworksMemory_createFromDesc(desc, &memory), ANEURALNETWORKS_NO_ERROR); in createDeviceMemoryForOutput()
178 return memory; in createDeviceMemoryForOutput()
200 ANeuralNetworksMemory* memory = createDeviceMemoryForInput(compilation, i); in createRequestWithDeviceMemories() local
201 ASSERT_NE(memory, nullptr); in createRequestWithDeviceMemories()
202 auto& wrapperMemory = inputMemories->emplace_back(memory); in createRequestWithDeviceMemories()
207 ASSERT_EQ(ANeuralNetworksMemory_copy(ashmem->get()->get(), memory), in createRequestWithDeviceMemories()
[all …]
DSupportLibraryTestUtils.h35 sl_wrapper::Memory memory) in TestAshmem() argument
36 : mFd(std::move(fd)), mMapped(std::move(mapped)), mMemory(std::move(memory)) {} in TestAshmem()
59 sl_wrapper::Memory memory(nnapi, length, PROT_READ | PROT_WRITE, fd, 0); in createFrom()
60 if (!memory.isValid()) return nullptr; in createFrom()
64 std::move(memory)); in createFrom()
DTestUtils.h38 test_wrapper::Memory memory) in TestAshmem() argument
39 : mFd(std::move(fd)), mMapped(std::move(mapped)), mMemory(std::move(memory)) {} in TestAshmem()
66 test_wrapper::Memory memory(length, PROT_READ | PROT_WRITE, fd, 0); in createFrom()
67 if (!memory.isValid()) return nullptr; in createFrom()
71 std::move(memory)); in createFrom()
DGeneratedTestUtils.cpp137 const std::unique_ptr<MemoryWithPointer>& memory, argument
150 std::memcpy(memory->getPointer() + *memoryOffset, operand.data.get<void>(), length);
151 model->setOperandValueFromMemory(index, memory.get(), *memoryOffset, length);
165 memory, memoryOffset, refModel, refModels);
197 std::unique_ptr<MemoryWithPointer> memory = createConstantReferenceMemory(nnapi, testModel); local
199 std::unique_ptr<MemoryWithPointer> memory = createConstantReferenceMemory(testModel); local
211 createModelFromSubgraph(testModel.main, testDynamicOutputShape, testModel.referenced, memory,
214 model->setConstantReferenceMemory(std::move(memory));
DSupportLibraryTestGenerated.cpp266 ANeuralNetworksMemory* memory = nullptr; in createDeviceMemoryForInput() local
267 mNnApi->getFL5()->ANeuralNetworksMemory_createFromDesc(desc, &memory); in createDeviceMemoryForInput()
269 return memory; in createDeviceMemoryForInput()
280 ANeuralNetworksMemory* memory = nullptr; in createDeviceMemoryForOutput() local
281 mNnApi->getFL5()->ANeuralNetworksMemory_createFromDesc(desc, &memory); in createDeviceMemoryForOutput()
283 return memory; in createDeviceMemoryForOutput()
310 ANeuralNetworksMemory* memory = createDeviceMemoryForInput(compilation, i); in computeWithDeviceMemories() local
311 ASSERT_NE(memory, nullptr); in computeWithDeviceMemories()
312 auto& wrapperMemory = inputMemories.emplace_back(Memory(mNnApi.get(), memory)); in computeWithDeviceMemories()
317 ASSERT_EQ(mNnApi->getFL5()->ANeuralNetworksMemory_copy(ashmem->get()->get(), memory), in computeWithDeviceMemories()
[all …]
/packages/services/Car/car-lib/src/com/android/car/internal/
DLargeParcelableBase.java106 SharedMemory memory = SharedMemory.CREATOR.createFromParcel(in); in LargeParcelableBase() local
107 deserializeSharedMemoryAndClose(memory); in LargeParcelableBase()
176 private void writeSharedMemoryCompatibleToParcel(Parcel dest, SharedMemory memory, int flags) { in writeSharedMemoryCompatibleToParcel() argument
178 if (memory == null) { in writeSharedMemoryCompatibleToParcel()
185 memory.writeToParcel(dest, flags); in writeSharedMemoryCompatibleToParcel()
218 SharedMemory memory = null; in serializeParcelToSharedMemory() local
222 memory = SharedMemory.create(LargeParcelableBase.class.getSimpleName(), size); in serializeParcelToSharedMemory()
223 buffer = memory.mapReadWrite(); in serializeParcelToSharedMemory()
245 if (!memory.setProtect(PROT_READ)) { in serializeParcelToSharedMemory()
246 memory.close(); in serializeParcelToSharedMemory()
[all …]
/packages/modules/NeuralNetworks/common/types/include/nnapi/
DSharedMemory.h97 size_t getSize(const SharedMemory& memory);
99 bool isAhwbBlob(const Memory::HardwareBuffer& memory);
102 bool isAhwbBlob(const SharedMemory& memory);
110 GeneralResult<Mapping> map(const SharedMemory& memory);
147 std::vector<RelocationInfoType> relocationInfos, SharedMemory memory) { in create() argument
148 auto mapping = NN_TRY(map(memory)); in create()
150 std::move(relocationInfos), std::move(memory), std::move(mapping)); in create()
153 RelocationTracker(std::vector<RelocationInfoType> relocationInfos, SharedMemory memory, in RelocationTracker() argument
156 kMemory(std::move(memory)), in RelocationTracker()
/packages/modules/NeuralNetworks/tools/api/
DNeuralNetworksTypes.t191 * Failure caused by not enough available memory.
219 * Failure caused by not being able to map a file into memory.
222 * Mitigate by reading its content into memory.
328 * ANeuralNetworksMemory is an opaque type that represents memory.
330 * This type is used to represent shared memory, memory mapped files,
333 * By using shared memory, a program can efficiently communicate to the
336 * should typically create one shared memory object that contains every constant tensor
338 * create shared memory from a file handle.
340 * create shared memory from an AHardwareBuffer handle.
349 * memory object must be aligned on a boundary of a byte size that is a multiple
[all …]
/packages/services/Car/car-lib/src/android/car/telemetry/
Dtelemetry.proto84 // Collects all the app start events with the initial used RSS/CACHE/SWAP memory.
86 // Collects memory state of processes in 5-minute buckets (1 memory measurement per bucket).
99 // Collects memory snapshot of processes in 5-minute buckets (1 memory measurement per bucket).
100 // It differs from PROCESS_MEMORY_STATE in that the snapshot can be used for leaked memory
140 // Publisher for device-wide memory statistics as well as process memory statistics.
146 // For reference, collecting process memory on 1 process takes ~70ms, and for 10 processes it takes
150 // The number of seconds in between each memory snapshot.
160 // The maximum number of memory snapshots to collect before terminating the MetricsConfig.
171 // The package names to get process memory statistics on. If specified, it will be published
172 // along with device memory.
[all …]
/packages/modules/NeuralNetworks/shim_and_sl/
DShimConverter.cpp381 std::unique_ptr<::android::nn::sl_wrapper::Memory> memory = convertFromHAL(nnapi, pool); in convertFromHAL() local
382 if (!memory) { in convertFromHAL()
387 memoryPools.push_back(std::move(memory)); in convertFromHAL()
417 return ShimConvertedModel{.memory = std::move(memoryPools), .models = std::move(result)}; in convertFromHAL()
429 auto memory = std::make_unique<::android::nn::sl_wrapper::Memory>( in convertFromHAL() local
431 if (!memory->isValid()) { in convertFromHAL()
434 return memory; in convertFromHAL()
443 auto memory = std::make_unique<::android::nn::sl_wrapper::Memory>( in convertFromHAL() local
445 if (!memory->isValid()) { in convertFromHAL()
448 return memory; in convertFromHAL()
[all …]
DShimDevice.cpp198 std::shared_ptr<::android::nn::sl_wrapper::Memory> memory, in ShimBuffer() argument
203 mMemory(std::move(memory)), in ShimBuffer()
253 auto memory = convertFromHAL(mNnApi, src); in copyFrom() local
255 if (!memory) { in copyFrom()
268 Result result = memory->copyTo(*mMemory.get()); in copyFrom()
276 if (memory->getSize() != mMemory->getSize()) { in copyFrom()
287 auto memory = convertFromHAL(mNnApi, dst); in copyTo() local
289 if (!memory) { in copyTo()
294 Result result = mMemory->copyTo(*memory); in copyTo()
301 if (memory->getSize() != mMemory->getSize()) { in copyTo()
[all …]
/packages/modules/NetworkStack/tests/unit/src/android/net/testutils/
DHandlerUtilsTest.kt49 var memory = StringBuilder() in <lambda>() variable
51 memory.append("b") in <lambda>()
55 handlerThread.threadHandler.post({ tempRunnable.memory.append("a"); }) in <lambda>()
58 assertEquals(tempRunnable.memory.toString(), "ab".repeat(i + 1)) in <lambda>()
/packages/modules/NeuralNetworks/runtime/include/
DNeuralNetworks.h298 ANeuralNetworksMemory** memory) __NNAPI_INTRODUCED_IN(30);
764 ANeuralNetworksMemory** memory)
847 ANeuralNetworksMemory** memory) __NNAPI_INTRODUCED_IN(27);
861 void ANeuralNetworksMemory_free(ANeuralNetworksMemory* memory) __NNAPI_INTRODUCED_IN(27);
1081 const ANeuralNetworksMemory* memory,
1538 const ANeuralNetworksMemory* memory, size_t offset,
1678 const ANeuralNetworksMemory* memory, size_t offset,
/packages/apps/Test/connectivity/sl4n/rapidjson/doc/
Dinternals.md103 * To reduce memory consumption for 64-bit architecture, `SizeType` is typedef as `unsigned` instead…
127 This optimization can reduce memory usage for copy-string. It can also improve cache-coherence thus…
136 // Allocate a memory block.
137 // \param size of the memory block in bytes.
138 // \returns pointer to the memory block.
141 // Resize a memory block.
142 // \param originalPtr The pointer to current memory block. Null pointer is permitted.
143 …(Design issue: since some allocator may not book-keep this, explicitly pass to it can save memory.)
147 // Free a memory block.
148 // \param pointer to the memory block. Null pointer is permitted.
[all …]
Ddom.md3 Document Object Model(DOM) is an in-memory representation of JSON for query and manipulation. The b…
34 The `Encoding` parameter specifies the encoding of JSON String value in memory. Possible options ar…
36 …ding. No matter what encoding was used in JSON files, we can store the strings in UTF-16 in memory.
61memory for `Document`/`Value`. `Document` owns, or references to an `Allocator` instance. On the o…
63 …ericDocument` is `MemoryPoolAllocator`. This allocator actually allocate memory sequentially, and …
174 …o be an in situ algorithm, or in-place algorithm, if the extra amount of memory required to execut…
213 In situ parsing minimizes allocation overheads and memory copying. Generally this improves cache co…
217 1. The whole JSON is in memory.
220 …ter parsing, and there are few JSON strings in the DOM, retaining the buffer may be a memory waste.
222 …term JSON that only need to be processed once, and then be released from memory. In practice, thes…
[all …]

12345