/packages/modules/NeuralNetworks/common/ |
D | CpuExecutor.cpp | 85 bool setOutputShape(uint32_t index, const Shape& shape) override; 127 return getInputInfo(index)->shape(); in getInputShape() 143 return getOutputInfo(index)->shape(); in getOutputShape() 168 bool setInfoAndAllocateIfNeeded(RunTimeOperandInfo* info, const Shape& shape, int* result) { in setInfoAndAllocateIfNeeded() argument 172 if (info->type != shape.type) { in setInfoAndAllocateIfNeeded() 177 if (info->scale != shape.scale) { in setInfoAndAllocateIfNeeded() 182 if (info->zeroPoint != shape.offset) { in setInfoAndAllocateIfNeeded() 187 if (info->extraParams != shape.extraParams) { in setInfoAndAllocateIfNeeded() 194 auto combined = combineDimensions(shape.dimensions, info->dimensions); in setInfoAndAllocateIfNeeded() 201 info->type = shape.type; in setInfoAndAllocateIfNeeded() [all …]
|
D | IndexedShapeWrapper.cpp | 28 IndexedShapeWrapper::IndexedShapeWrapper(const Shape& wrapped_shape) : shape(&wrapped_shape) { in IndexedShapeWrapper() 29 strides.resize(shape->dimensions.size()); in IndexedShapeWrapper() 32 strides[i] = shape->dimensions[i + 1] * strides[i + 1]; in IndexedShapeWrapper() 41 if (index->at(i) < shape->dimensions[i] - 1) { in nextIndexInplace() 52 if (index->at(i) == shape->dimensions[i]) { in nextIndexInplace() 79 uint32_t currentDimSize = shape->dimensions[shape->dimensions.size() - i]; in broadcastedIndexToFlatIndex() 89 if (index.size() != shape->dimensions.size()) { in isValid() 92 << toString(shape->dimensions); in isValid() 96 if (index[i] >= shape->dimensions[i]) { in isValid() 98 << " is out of range for shape: " << toString(shape->dimensions); in isValid()
|
D | OperationsUtils.cpp | 134 uint32_t getNumberOfElements(const Shape& shape) { in getNumberOfElements() argument 136 for (size_t i = 0; i < shape.dimensions.size(); i++) { in getNumberOfElements() 137 count *= shape.dimensions[i]; in getNumberOfElements() 142 uint32_t getNumberOfElements(const Shape& shape, size_t firstAxisInclusive, in getNumberOfElements() argument 146 nnAssert(lastAxisExclusive <= shape.dimensions.size()); in getNumberOfElements() 149 count *= shape.dimensions[i]; in getNumberOfElements() 154 uint32_t getNumberOfDimensions(const Shape& shape) { in getNumberOfDimensions() argument 155 return shape.dimensions.size(); in getNumberOfDimensions() 158 uint32_t getSizeOfDimension(const Shape& shape, uint32_t dimensionIdx) { in getSizeOfDimension() argument 159 nnAssert(0 <= dimensionIdx && dimensionIdx < shape.dimensions.size()); in getSizeOfDimension() [all …]
|
/packages/modules/NeuralNetworks/common/operations/ |
D | BidirectionalSequenceLSTM.cpp | 336 NN_CHECK_EQ(aux_input_->shape().dimensions[0], input_->shape().dimensions[0]); in Prepare() 337 NN_CHECK_EQ(aux_input_->shape().dimensions[1], input_->shape().dimensions[1]); in Prepare() 382 const Shape& inputShape = input_->shape(); in Prepare() 419 *fwOutputActivationState = fw_activation_state_->shape(); in Prepare() 420 *fwOutputCellState = fw_cell_state_->shape(); in Prepare() 421 *bwOutputActivationState = bw_activation_state_->shape(); in Prepare() 422 *bwOutputCellState = bw_cell_state_->shape(); in Prepare() 442 std::vector<uint32_t> fw_output_dims = input_->shape().dimensions; in Eval() 459 Shape bwInputShape = input_->shape(); in Eval() 463 bwInputShape = aux_input_->shape(); in Eval() [all …]
|
D | RNN.cpp | 68 const Shape& inputShape = input->shape(); in Prepare() 84 RNNStep<_Float16>(reinterpret_cast<_Float16*>(input_->buffer), input_->shape(), in Eval() 87 reinterpret_cast<_Float16*>(weights_->buffer), weights_->shape(), in Eval() 89 recurrent_weights_->shape(), activation_, in Eval() 92 sizeof(_Float16) * getNumberOfElements(output_->shape())); in Eval() 96 RNNStep<float>(reinterpret_cast<float*>(input_->buffer), input_->shape(), in Eval() 99 reinterpret_cast<float*>(weights_->buffer), weights_->shape(), in Eval() 101 recurrent_weights_->shape(), activation_, in Eval() 104 sizeof(float) * getNumberOfElements(output_->shape())); in Eval()
|
D | SVDF.cpp | 95 const Shape& inputShape = input->shape(); in Prepare() 114 std::vector<float> inputDataFloat32(getNumberOfElements(input_->shape())); in Eval() 116 std::vector<float> inputStateDataFloat32(getNumberOfElements(state_in_->shape())); in Eval() 119 std::vector<float> biasDataFloat32(getNumberOfElements(bias_->shape())); in Eval() 125 getNumberOfElements(weights_feature_->shape())); in Eval() 128 std::vector<float> weightsTimeDataFloat32(getNumberOfElements(weights_time_->shape())); in Eval() 131 std::vector<float> outputDataFloat32(getNumberOfElements(output_->shape())); in Eval() 132 std::vector<float> outputStateDataFloat32(getNumberOfElements(state_out_->shape())); in Eval()
|
D | QuantizedLSTMTest.cpp | 34 std::vector<uint32_t> shape; member 38 OperandTypeParams(Type type, std::vector<uint32_t> shape, float scale, int32_t zeroPoint) in OperandTypeParams() 39 : type(type), shape(shape), scale(scale), zeroPoint(zeroPoint) {} in OperandTypeParams() 56 OperandType curType(curOTP.type, curOTP.shape, curOTP.scale, curOTP.zeroPoint); in QuantizedLSTMOpModel() 60 const uint32_t numBatches = inputOperandTypeParams[0].shape[0]; in QuantizedLSTMOpModel() 61 inputSize_ = inputOperandTypeParams[0].shape[0]; in QuantizedLSTMOpModel() 63 inputOperandTypeParams[QuantizedLSTMCell::kPrevCellStateTensor].shape[1]; in QuantizedLSTMOpModel() 188 for (int d : params.shape) { in initializeInputData()
|
D | LogSoftmax.cpp | 42 inline bool compute(const T* input, const Shape& shape, T beta, uint32_t axis, T* output) { in compute() argument 43 const uint32_t outerSize = getNumberOfElements(shape, 0, axis); in compute() 44 const uint32_t axisSize = getSizeOfDimension(shape, axis); in compute() 45 const uint32_t innerSize = getNumberOfElements(shape, axis + 1, getNumberOfDimensions(shape)); in compute()
|
D | QuantizedLSTM.cpp | 210 const std::vector<uint32_t> submatrixDims = submatrix->shape().dimensions; in assignWeightsSubmatrix() 332 *cellStateOutShape = prevCellState->shape(); in prepare() 333 *outputShape = prevOutput->shape(); in prepare() 412 GetBuffer<const uint8_t>(input_), convertShapeToDims(input_->shape()), in eval() 413 GetBuffer<const uint8_t>(prevOutput_), convertShapeToDims(prevOutput_->shape()), in eval() 416 convertShapeToDims(prevCellState_->shape()), in eval() 418 GetBuffer<int16_t>(cellStateOut_), convertShapeToDims(cellStateOut_->shape()), in eval() 419 GetBuffer<uint8_t>(output_), convertShapeToDims(output_->shape()), concatTemp.data(), in eval()
|
D | EmbeddingLookup.cpp | 37 const int row_size = value_->shape().dimensions[0]; in Eval() 41 for (uint32_t i = 0; i < lookup_->shape().dimensions[0]; i++) { in Eval()
|
D | HashtableLookup.cpp | 47 const int num_rows = value_->shape().dimensions[0]; in Eval() 52 for (int i = 0; i < static_cast<int>(lookup_->shape().dimensions[0]); i++) { in Eval()
|
/packages/modules/NeuralNetworks/common/include/ |
D | CpuOperationUtils.h | 35 inline tflite::Dims<4> convertShapeToDims(const Shape& shape) { in convertShapeToDims() argument 36 CHECK_LE(shape.dimensions.size(), 4u); in convertShapeToDims() 41 int src = static_cast<int>(shape.dimensions.size()) - i - 1; in convertShapeToDims() 43 dims.sizes[i] = static_cast<int>(getSizeOfDimension(shape, src)); in convertShapeToDims() 56 inline tflite::RuntimeShape convertShapeToTflshape(const Shape& shape) { in convertShapeToTflshape() argument 57 std::vector<int32_t> tflShapeDim(shape.dimensions.begin(), shape.dimensions.end()); in convertShapeToTflshape() 162 bool initialize(const T* data, const Shape& shape) { in initialize() argument 164 mShape = shape; in initialize() 166 return convertNchwToNhwc(mDataOriginal, shape, &mDataNhwc, &mShape); in initialize() 186 bool initialize(T* data, const Shape& shape) { in initialize() argument [all …]
|
D | OperationsUtils.h | 89 virtual bool setOutputShape(uint32_t index, const Shape& shape) = 0; 131 uint32_t getNumberOfElements(const Shape& shape); 132 uint32_t getNumberOfElements(const Shape& shape, size_t firstAxisInclusive, 135 uint32_t getNumberOfDimensions(const Shape& shape); 137 uint32_t getSizeOfDimension(const Shape& shape, uint32_t dimensionIdx); 139 uint32_t hasKnownRank(const Shape& shape); 144 inline bool handleNegativeAxis(const Shape& shape, int32_t* axis) { in handleNegativeAxis() argument 145 return handleNegativeAxis(getNumberOfDimensions(shape), axis); in handleNegativeAxis() 326 inline bool transposeFirstTwoDimensions(const T* buffer, const Shape& shape, T* transposedBuffer) { in transposeFirstTwoDimensions() argument 327 const int numDims = getNumberOfDimensions(shape); in transposeFirstTwoDimensions() [all …]
|
D | CpuExecutor.h | 76 Shape shape() const { in shape() function 276 return operand->shape().dimensions.size(); in NumDimensions() 280 return operand->shape().dimensions[i]; in SizeOfDimension()
|
/packages/modules/NeuralNetworks/tools/api/ |
D | types.spec | 425 * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying 454 * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying 478 * * 0: The output 4-D tensor, of shape 514 * * 0 ~ n-1: The list of n input tensors, of shape 536 * tensors. The output shape is [D0, D1, ..., sum(Daxis(i)), ..., Dm]. 1119 * * 1: A 2-D tensor, specifying the weights, of shape 1122 * * 2: A 1-D tensor, of shape [num_units], specifying the bias. For input 1138 * * 0: The output tensor, of shape [batch_size, num_units]. %{BeforeNNAPILevel3For} 1163 * For example, if Values has shape of [40, 200, 300], 1164 * Keys should have a shape of [40]. If Lookups tensor has shape [all …]
|
/packages/apps/ThemePicker/src/com/android/customization/model/theme/custom/ |
D | ColorOptionsProvider.java | 92 Drawable shape = loadShape(shapePackage); in loadOptions() local 93 addDefault(previewIcons, shape); in loadOptions() 107 option.setShapeDrawable(shape); in loadOptions() 116 private void addDefault(List<Drawable> previewIcons, Drawable shape) { in addDefault() argument 139 option.setShapeDrawable(shape); in addDefault() 162 PathShape shape = new PathShape(PathParser.createPathFromPathData(path), in loadShape() local 164 shapeDrawable = new ShapeDrawable(shape); in loadShape()
|
D | ShapeOptionsProvider.java | 99 PathShape shape = new PathShape(path, PATH_SIZE, PATH_SIZE); in createShapeDrawable() local 100 ShapeDrawable shapeDrawable = new ShapeDrawable(shape); in createShapeDrawable() 135 String shape = overlayRes.getString(overlayRes.getIdentifier(CONFIG_ICON_MASK, "string", in loadPath() local 138 if (!TextUtils.isEmpty(shape)) { in loadPath() 139 return PathParser.createPathFromPathData(shape); in loadPath()
|
/packages/apps/Test/connectivity/sl4n/rapidjson/doc/diagram/ |
D | move2.dot | 19 node [shape=Mrecord, style=filled, colorscheme=spectral7] 24 c13 [shape="none", label="...", style="solid"] 42 node [shape=Mrecord, style=filled, colorscheme=spectral7] 48 c23 [shape=none, label="...", style="solid"] 53 c33 [shape="none", label="...", style="solid"]
|
D | move3.dot | 20 node [shape=Mrecord, style=filled, colorscheme=spectral7] 25 c13 [shape=none, label="...", style="solid"] 43 node [shape=Mrecord, style=filled, colorscheme=spectral7] 49 c23 [shape="none", label="...", style="solid"]
|
D | simpledom.dot | 13 node [shape=record, fontsize="8", margin="0.04", height=0.2, color=gray] 19 node [shape="box", style="filled", fillcolor="gray95"] 30 node [shape=Mrecord, style=filled, colorscheme=spectral7]
|
D | insituparsing.dot | 13 node [shape=record, fontsize="8", margin="0.04", height=0.2, color=gray] 16 newjson [shape=plaintext, label=< 37 node [shape=Mrecord, style=filled, colorscheme=spectral7]
|
D | move1.dot | 19 node [shape=Mrecord, style=filled, colorscheme=spectral7] 37 node [shape=Mrecord, style=filled, colorscheme=spectral7]
|
/packages/modules/NeuralNetworks/runtime/test/specs/V1_2/ |
D | sub_v1_2.mod.py | 37 shape = "{2, 4, 16, 2}, 0.5, 0" variable 38 input0 = Input("input0", "TENSOR_QUANT8_ASYMM", shape) 39 input1 = Input("input1", "TENSOR_QUANT8_ASYMM", shape) 41 output0 = Output("output0", "TENSOR_QUANT8_ASYMM", shape)
|
/packages/modules/NeuralNetworks/runtime/test/specs/V1_3/ |
D | sub_quant8_signed.mod.py | 86 shape = "{2, 4, 16, 2}, 0.5, -128" variable 87 input0 = Input("input0", "TENSOR_QUANT8_ASYMM_SIGNED", shape) 88 input1 = Input("input1", "TENSOR_QUANT8_ASYMM_SIGNED", shape) 90 output0 = Output("output0", "TENSOR_QUANT8_ASYMM_SIGNED", shape)
|
/packages/apps/Launcher3/src/com/android/launcher3/graphics/ |
D | IconShape.java | 419 IconShape shape = getShapeDefinition(parser.getName(), a.getFloat(0, 1)); 422 result.add(shape); 449 for (IconShape shape : getAllShapes(context)) { 451 shape.addToPath(shapePath, 0, 0, size / 2f); 458 closestShape = shape;
|