Lines Matching refs:input
346 bool floorPrepare(const Shape& input, Shape* output) { in floorPrepare() argument
347 return SetShape(input, output); in floorPrepare()
350 bool depthwiseConvPrepare(const Shape& input, const Shape& filter, const Shape& bias, in depthwiseConvPrepare() argument
356 NN_OPS_CHECK(input.type == OperandType::TENSOR_QUANT8_ASYMM); in depthwiseConvPrepare()
358 NN_OPS_CHECK(input.type == filter.type); in depthwiseConvPrepare()
360 if (input.type == OperandType::TENSOR_QUANT8_ASYMM) { in depthwiseConvPrepare()
363 NN_OPS_CHECK(input.type == bias.type); in depthwiseConvPrepare()
365 NN_OPS_CHECK(getNumberOfDimensions(input) == 4); in depthwiseConvPrepare()
372 uint32_t channels_in = getSizeOfDimension(input, 3); in depthwiseConvPrepare()
373 uint32_t width = getSizeOfDimension(input, 2); in depthwiseConvPrepare()
374 uint32_t height = getSizeOfDimension(input, 1); in depthwiseConvPrepare()
377 uint32_t batches = getSizeOfDimension(input, 0); in depthwiseConvPrepare()
392 output->type = input.type; in depthwiseConvPrepare()
397 bool genericActivationPrepare(const Shape& input, in genericActivationPrepare() argument
399 NN_OPS_CHECK(getNumberOfDimensions(input) <= 4); in genericActivationPrepare()
400 return SetShape(input, output); in genericActivationPrepare()
403 bool genericNormalizationPrepare(const Shape& input, Shape* output) { in genericNormalizationPrepare() argument
404 return SetShape(input, output); in genericNormalizationPrepare()
407 bool reshapePrepare(const Shape& input, in reshapePrepare() argument
415 int32_t numInputElements = (int32_t) getNumberOfElements(input); in reshapePrepare()
438 output->type = input.type; in reshapePrepare()
440 output->offset = input.offset; in reshapePrepare()
441 output->scale = input.scale; in reshapePrepare()
446 bool depthToSpacePrepare(const Shape& input, in depthToSpacePrepare() argument
449 NN_OPS_CHECK(getNumberOfDimensions(input) == 4); in depthToSpacePrepare()
452 uint32_t batches = getSizeOfDimension(input, 0); in depthToSpacePrepare()
453 uint32_t height = getSizeOfDimension(input, 1); in depthToSpacePrepare()
454 uint32_t width = getSizeOfDimension(input, 2); in depthToSpacePrepare()
455 uint32_t channels = getSizeOfDimension(input, 3); in depthToSpacePrepare()
458 output->type = input.type; in depthToSpacePrepare()
463 output->offset = input.offset; in depthToSpacePrepare()
464 output->scale = input.scale; in depthToSpacePrepare()
469 bool spaceToDepthPrepare(const Shape& input, in spaceToDepthPrepare() argument
472 NN_OPS_CHECK(getNumberOfDimensions(input) == 4); in spaceToDepthPrepare()
475 uint32_t batches = getSizeOfDimension(input, 0); in spaceToDepthPrepare()
476 uint32_t height = getSizeOfDimension(input, 1); in spaceToDepthPrepare()
477 uint32_t width = getSizeOfDimension(input, 2); in spaceToDepthPrepare()
478 uint32_t channels = getSizeOfDimension(input, 3); in spaceToDepthPrepare()
483 output->type = input.type; in spaceToDepthPrepare()
488 output->offset = input.offset; in spaceToDepthPrepare()
489 output->scale = input.scale; in spaceToDepthPrepare()
544 bool padPrepare(const Shape& input, in padPrepare() argument
548 uint32_t numInputDims = getNumberOfDimensions(input); in padPrepare()
562 outDims[i] = beforePadding + getSizeOfDimension(input, i) + afterPadding; in padPrepare()
564 output->type = input.type; in padPrepare()
566 output->offset = input.offset; in padPrepare()
567 output->scale = input.scale; in padPrepare()
572 bool batchToSpacePrepare(const Shape& input, in batchToSpacePrepare() argument
577 NN_OPS_CHECK(getNumberOfDimensions(input) == 4); in batchToSpacePrepare()
585 uint32_t batches = getSizeOfDimension(input, 0); in batchToSpacePrepare()
586 uint32_t height = getSizeOfDimension(input, 1); in batchToSpacePrepare()
587 uint32_t width = getSizeOfDimension(input, 2); in batchToSpacePrepare()
588 uint32_t channels = getSizeOfDimension(input, 3); in batchToSpacePrepare()
591 output->type = input.type; in batchToSpacePrepare()
596 output->offset = input.offset; in batchToSpacePrepare()
597 output->scale = input.scale; in batchToSpacePrepare()
602 bool spaceToBatchPrepare(const Shape& input, in spaceToBatchPrepare() argument
609 NN_OPS_CHECK(getNumberOfDimensions(input) == 4); in spaceToBatchPrepare()
623 uint32_t batches = getSizeOfDimension(input, 0); in spaceToBatchPrepare()
624 uint32_t height = getSizeOfDimension(input, 1); in spaceToBatchPrepare()
625 uint32_t width = getSizeOfDimension(input, 2); in spaceToBatchPrepare()
626 uint32_t channels = getSizeOfDimension(input, 3); in spaceToBatchPrepare()
634 output->type = input.type; in spaceToBatchPrepare()
639 output->offset = input.offset; in spaceToBatchPrepare()
640 output->scale = input.scale; in spaceToBatchPrepare()
645 bool squeezePrepare(const Shape& input, in squeezePrepare() argument
649 int32_t numInputDims = static_cast<int32_t>(getNumberOfDimensions(input)); in squeezePrepare()
662 if (getSizeOfDimension(input, idx) == 1) { in squeezePrepare()
672 getSizeOfDimension(input, current) == 1); in squeezePrepare()
682 outDims[outIdx++] = getSizeOfDimension(input, inIdx); in squeezePrepare()
686 output->type = input.type; in squeezePrepare()
688 output->offset = input.offset; in squeezePrepare()
689 output->scale = input.scale; in squeezePrepare()
694 bool meanPrepare(const Shape& input, in meanPrepare() argument
704 int32_t numInputDims = static_cast<int32_t>(getNumberOfDimensions(input)); in meanPrepare()
721 outDims[idx] = getSizeOfDimension(input, idx); in meanPrepare()
758 outDims[idx - numSkipAxis] = getSizeOfDimension(input, idx); in meanPrepare()
764 output->type = input.type; in meanPrepare()
765 output->offset = input.offset; in meanPrepare()
766 output->scale = input.scale; in meanPrepare()
771 bool stridedSlicePrepare(const Shape& input, in stridedSlicePrepare() argument
777 uint32_t numInputDims = getNumberOfDimensions(input); in stridedSlicePrepare()
796 int32_t dim = static_cast<int32_t>(getSizeOfDimension(input, idx)); in stridedSlicePrepare()
822 output->type = input.type; in stridedSlicePrepare()
824 output->offset = input.offset; in stridedSlicePrepare()
825 output->scale = input.scale; in stridedSlicePrepare()
830 bool argMinMaxPrepare(const Shape& input, int32_t axis, Shape* output) { in argMinMaxPrepare() argument
831 NN_CHECK(handleNegativeAxis(input, &axis)); in argMinMaxPrepare()
837 output->dimensions.reserve(getNumberOfDimensions(input) - 1); in argMinMaxPrepare()
839 input.dimensions.begin(), in argMinMaxPrepare()
840 input.dimensions.begin() + axis); in argMinMaxPrepare()
842 input.dimensions.begin() + axis + 1, in argMinMaxPrepare()
843 input.dimensions.end()); in argMinMaxPrepare()
848 bool splitPrepare(const Shape& input, int32_t axis, int32_t numOutputs, in splitPrepare() argument
850 NN_CHECK(handleNegativeAxis(input, &axis)); in splitPrepare()
852 const int32_t sizeOfAxisToSplit = input.dimensions[axis]; in splitPrepare()
857 output->at(i).type = input.type; in splitPrepare()
858 output->at(i).dimensions = input.dimensions; in splitPrepare()
860 output->at(i).offset = input.offset; in splitPrepare()
861 output->at(i).scale = input.scale; in splitPrepare()
866 bool groupedConvPrepare(const Shape& input, const Shape& filter, const Shape& bias, in groupedConvPrepare() argument
871 NN_OPS_CHECK(input.type == OperandType::TENSOR_QUANT8_ASYMM); in groupedConvPrepare()
873 NN_OPS_CHECK(input.type == filter.type); in groupedConvPrepare()
875 if (input.type == OperandType::TENSOR_QUANT8_ASYMM) { in groupedConvPrepare()
878 NN_OPS_CHECK(input.type == bias.type); in groupedConvPrepare()
880 NN_OPS_CHECK(getNumberOfDimensions(input) == 4); in groupedConvPrepare()
886 NN_OPS_CHECK(getSizeOfDimension(filter, 3) * numGroups == getSizeOfDimension(input, 3)); in groupedConvPrepare()
890 uint32_t width = getSizeOfDimension(input, 2); in groupedConvPrepare()
891 uint32_t height = getSizeOfDimension(input, 1); in groupedConvPrepare()
894 uint32_t batches = getSizeOfDimension(input, 0); in groupedConvPrepare()
906 output->type = input.type; in groupedConvPrepare()