/packages/modules/NeuralNetworks/common/operations/ |
D | QLSTM.cpp | 183 const uint32_t batchSize = getSizeOfDimension(inputShape, 0); in prepare() local 300 NN_RET_CHECK_EQ(getSizeOfDimension(outputStateShape, 0), batchSize); in prepare() 304 NN_RET_CHECK_EQ(getSizeOfDimension(cellStateShape, 0), batchSize); in prepare() 392 const uint32_t batchSize = inputShape.dimensions[0]; in execute() local 657 std::vector<int16_t> inputGateBuffer(batchSize * numUnits); in execute() 658 std::vector<int16_t> forgetGateBuffer(batchSize * numUnits); in execute() 659 std::vector<int16_t> cellGateBuffer(batchSize * numUnits); in execute() 660 std::vector<int16_t> outputGateBuffer(batchSize * numUnits); in execute() 661 std::vector<int8_t> buffer8(batchSize * numUnits); in execute() 676 inputToForgetEffectiveScaleB, batchSize, inputSize, in execute() [all …]
|
D | BidirectionalSequenceRNN.cpp | 218 const uint32_t batchSize = getSizeOfDimension(inputShape, 1); in executeTyped() local 250 tempHiddenState.resize(std::max(batchSize * fwNumUnits, batchSize * bwNumUnits)); in executeTyped() 257 const T* inputBatchPtr = input + i * batchSize * inputSize; in executeTyped() 260 auxInputBatchPtr = auxInput + i * batchSize * auxInputSize; in executeTyped() 263 T* fwOutputBatchPtr = fwOutput + i * batchSize * fwOutputBatchStride; in executeTyped() 276 const T* inputBatchPtr = bwInput + i * batchSize * inputSize; in executeTyped() 279 auxInputBatchPtr = auxInput + i * batchSize * auxInputSize; in executeTyped() 287 bwOutputBatchPtr = fwOutput + i * batchSize * bwOutputBatchStride; in executeTyped() 290 bwOutputBatchPtr = bwOutput + i * batchSize * bwOutputBatchStride; in executeTyped() 385 const uint32_t batchSize = in prepare() local [all …]
|
D | UnidirectionalSequenceRNN.cpp | 97 const uint32_t batchSize = getSizeOfDimension(inputShape, 1); in executeTyped() local 110 input += batchSize * inputSize; in executeTyped() 112 output += batchSize * numUnits; in executeTyped() 123 std::copy(hiddenState, hiddenState + batchSize * numUnits, stateOutput); in executeTyped() 162 const uint32_t batchSize = in prepare() local 179 NN_RET_CHECK_EQ(batchSize, getSizeOfDimension(hiddenState, 0)); in prepare() 184 output.dimensions[0] = timeMajor ? maxTime : batchSize; in prepare() 185 output.dimensions[1] = timeMajor ? batchSize : maxTime; in prepare() 192 outputStateShape.dimensions[0] = batchSize; in prepare()
|
D | LSTM.cpp | 434 const uint32_t batchSize = (inputRank == 3) ? getSizeOfDimension(input_shape, timeMajor ? 1 : 0) in LSTMEvalFloat32() local 441 batchInputShape.dimensions = {batchSize, inputSize}; in LSTMEvalFloat32() 442 const uint32_t batchInputSize = batchSize * inputSize; in LSTMEvalFloat32() 443 const uint32_t batchOutputSize = batchSize * outputSize; in LSTMEvalFloat32() 470 output_state_in_buffer, output_state_in_buffer + batchSize * outputSize); in LSTMEvalFloat32() 472 cell_state_in_buffer + batchSize * numCells); in LSTMEvalFloat32() 507 output_state_out_buffer + batchSize * outputSize); in LSTMEvalFloat32() 509 cell_state_out_buffer + batchSize * numCells); in LSTMEvalFloat32() 554 const uint32_t batchSize = (inputRank == 3) ? getSizeOfDimension(input_shape, timeMajor ? 1 : 0) in LSTMEvalFloat16() local 561 batchInputShape.dimensions = {batchSize, inputSize}; in LSTMEvalFloat16() [all …]
|
D | UnidirectionalSequenceLSTM.cpp | 205 const uint32_t batchSize = getSizeOfDimension(inputShape, isTimeMajor(context) ? 1 : 0); in prepare() local 322 NN_RET_CHECK_EQ(getSizeOfDimension(outputStateShape, 0), batchSize); in prepare() 326 NN_RET_CHECK_EQ(getSizeOfDimension(cellStateShape, 0), batchSize); in prepare() 386 outputStateOutTensor.dimensions[0] = batchSize; in prepare() 392 cellStateOutTensor.dimensions[0] = batchSize; in prepare()
|
D | ResizeImageOps.cpp | 70 const int batchSize = getSizeOfDimension(inputShape, 0); in resizeNearestNeighbor() local 83 for (int b = 0; b < batchSize; ++b) { in resizeNearestNeighbor()
|
D | GenerateProposals.cpp | 975 uint32_t batchSize = height * width * numAnchors; in generateProposalsNhwcFloat32Compute() local 976 uint32_t roiBufferSize = batchSize * kRoiDim; in generateProposalsNhwcFloat32Compute() 1004 tempRoiShape.dimensions = {batchSize, kRoiDim}; in generateProposalsNhwcFloat32Compute() 1006 tempBBoxDeltasShape.dimensions = {batchSize, kRoiDim}; in generateProposalsNhwcFloat32Compute() 1007 std::vector<int32_t> tempBatchSplitData(batchSize, 0); in generateProposalsNhwcFloat32Compute() 1008 Shape tempbatchSplitShape = {.dimensions = {batchSize}}; in generateProposalsNhwcFloat32Compute() 1024 std::vector<uint32_t> select(batchSize); in generateProposalsNhwcFloat32Compute() 1054 scoresBase += batchSize; in generateProposalsNhwcFloat32Compute()
|
/packages/apps/Calendar/src/com/android/calendar/alerts/ |
D | AlarmScheduler.java | 111 int batchSize, long currentMillis) { in scheduleNextAlarm() argument 118 context.getContentResolver(), alarmManager, batchSize, currentMillis); in scheduleNextAlarm() 178 int batchSize, long currentMillis) { in queryNextReminderAndSchedule() argument 200 while (index++ < batchSize && instancesCursor.moveToNext()) { in queryNextReminderAndSchedule()
|
/packages/apps/RemoteProvisioner/src/com/android/remoteprovisioner/ |
D | PeriodicProvisioner.java | 158 int batchSize = min(keysToCertify, SAFE_CSR_BATCH_SIZE); in run() local 159 Provisioner.provisionCerts(batchSize, in run() 165 keysToCertify -= batchSize; in run()
|
/packages/apps/QuickSearchBox/src/com/android/quicksearchbox/util/ |
D | BatchingNamedTaskExecutor.java | 64 public void executeNextBatch(int batchSize) { in executeNextBatch() argument 67 int count = Math.min(mQueuedTasks.size(), batchSize); in executeNextBatch()
|
/packages/apps/Contacts/src/com/android/contacts/ |
D | ContactSaveService.java | 1278 final int batchSize = MAX_CONTACTS_PROVIDER_BATCH_SIZE; in splitContact() local 1280 final ArrayList<ContentProviderOperation> operations = new ArrayList<>(batchSize); in splitContact() 1323 final int batchSize = MAX_CONTACTS_PROVIDER_BATCH_SIZE; in buildSplitTwoContacts() local 1328 if (operations.size() > 0 && operations.size() % batchSize == 0) { in buildSplitTwoContacts() 1426 final int batchSize = MAX_CONTACTS_PROVIDER_BATCH_SIZE; in joinSeveralContacts() local 1427 final ArrayList<ContentProviderOperation> operations = new ArrayList<>(batchSize); in joinSeveralContacts() 1434 if (operations.size() > 0 && operations.size() % batchSize == 0) { in joinSeveralContacts()
|
/packages/apps/Bluetooth/src/com/android/bluetooth/opp/ |
D | BluetoothOppReceiver.java | 83 int batchSize = mOppManager.getBatchSize(); in onReceive() local 85 toastMsg = context.getString(R.string.bt_toast_5, Integer.toString(batchSize), in onReceive()
|
/packages/modules/NeuralNetworks/runtime/test/ |
D | TestValidateOperations.cpp | 3925 const uint32_t batchSize = 2; in bidirectionlSequenceRNNTest() local 3930 uint32_t inputDims[3] = {maxTime, batchSize, inputSize}; in bidirectionlSequenceRNNTest() 3934 uint32_t hiddenStateDims[2] = {batchSize, numUnits}; in bidirectionlSequenceRNNTest() 3935 uint32_t outputDims[2] = {batchSize, numUnits}; in bidirectionlSequenceRNNTest() 4001 const uint32_t batchSize = 2; in unidirectionlSequenceRNNTest() local 4006 uint32_t inputDims[3] = {maxTime, batchSize, inputSize}; in unidirectionlSequenceRNNTest() 4010 uint32_t hiddenStateDims[2] = {batchSize, numUnits}; in unidirectionlSequenceRNNTest() 4011 uint32_t outputDims[2] = {batchSize, numUnits}; in unidirectionlSequenceRNNTest() 4067 const uint32_t batchSize = 3; in unidirectionalSequenceLSTMTest() local 4072 uint32_t inputDims[3] = {maxTime, batchSize, inputSize}; in unidirectionalSequenceLSTMTest() [all …]
|
/packages/apps/DocumentsUI/src/com/android/documentsui/services/ |
D | CopyJob.java | 275 final boolean verifySpaceAvailable(long batchSize) { in verifySpaceAvailable() argument 279 if (batchSize >= 0) { in verifySpaceAvailable() 287 available = (batchSize <= root.availableBytes); in verifySpaceAvailable()
|
/packages/apps/TV/src/com/android/tv/data/epg/ |
D | EpgFetcherImpl.java | 458 int batchSize = (int) Math.max(1, mBackendKnobsFlags.epgFetcherChannelsPerProgramFetch()); in batchFetchEpg() local 459 for (Iterable<EpgChannel> batch : Iterables.partition(epgChannels, batchSize)) { in batchFetchEpg()
|
/packages/modules/NeuralNetworks/tools/api/ |
D | types.spec | 3599 * it is set to true, then the input has a shape [maxTime, batchSize, 3600 * inputSize], otherwise the input has a shape [batchSize, maxTime, 3609 * A 2-D tensor of shape [batchSize, fwNumUnits]. Specifies a hidden 3618 * A 2-D tensor of shape [batchSize, bwNumUnits]. Specifies a hidden 3623 * it is set to true, then the input has a shape [maxTime, batchSize, 3624 * auxInputSize], otherwise the input has a shape [batchSize, maxTime, 3656 * two dimensions are [maxTime, batchSize], otherwise they are set to 3657 * [batchSize, maxTime]. If mergeOutputs is set to true, then the third 3664 * [maxTime, batchSize, bwNumUnits], otherwise the shape is set to 3665 * [batchSize, maxTime, bwNumUnits]. [all …]
|