Lines Matching refs:input
197 bool floorPrepare(const Shape& input, Shape* output) { in floorPrepare() argument
198 return SetShape(input, output); in floorPrepare()
201 bool dequantizePrepare(const Shape& input, Shape* output) { in dequantizePrepare() argument
202 if (input.type != OperandType::TENSOR_QUANT8_ASYMM || in dequantizePrepare()
207 if (input.dimensions.size() != output->dimensions.size()) { in dequantizePrepare()
211 output->dimensions = input.dimensions; in dequantizePrepare()
215 bool convPrepare(const Shape& input, in convPrepare() argument
222 NN_OPS_CHECK(input.type == filter.type); in convPrepare()
223 if (input.type == OperandType::TENSOR_QUANT8_ASYMM) { in convPrepare()
226 NN_OPS_CHECK(input.type == bias.type); in convPrepare()
228 NN_OPS_CHECK(getNumberOfDimensions(input) == 4); in convPrepare()
233 NN_OPS_CHECK(getSizeOfDimension(filter, 3) == getSizeOfDimension(input, 3)); in convPrepare()
236 uint32_t width = getSizeOfDimension(input, 2); in convPrepare()
237 uint32_t height = getSizeOfDimension(input, 1); in convPrepare()
240 uint32_t batches = getSizeOfDimension(input, 0); in convPrepare()
247 output->type = input.type; in convPrepare()
252 bool depthwiseConvPrepare(const Shape& input, in depthwiseConvPrepare() argument
259 NN_OPS_CHECK(input.type == filter.type); in depthwiseConvPrepare()
260 if (input.type == OperandType::TENSOR_QUANT8_ASYMM) { in depthwiseConvPrepare()
263 NN_OPS_CHECK(input.type == bias.type); in depthwiseConvPrepare()
265 NN_OPS_CHECK(getNumberOfDimensions(input) == 4); in depthwiseConvPrepare()
272 uint32_t width = getSizeOfDimension(input, 2); in depthwiseConvPrepare()
273 uint32_t height = getSizeOfDimension(input, 1); in depthwiseConvPrepare()
276 uint32_t batches = getSizeOfDimension(input, 0); in depthwiseConvPrepare()
283 output->type = input.type; in depthwiseConvPrepare()
289 bool genericPoolingPrepare(const Shape& input, in genericPoolingPrepare() argument
295 NN_OPS_CHECK(getNumberOfDimensions(input) == 4); in genericPoolingPrepare()
297 uint32_t batches = getSizeOfDimension(input, 0); in genericPoolingPrepare()
298 uint32_t width = getSizeOfDimension(input, 2); in genericPoolingPrepare()
299 uint32_t height = getSizeOfDimension(input, 1); in genericPoolingPrepare()
300 uint32_t channels_out = getSizeOfDimension(input, 3); in genericPoolingPrepare()
307 output->type = input.type; in genericPoolingPrepare()
313 bool genericActivationPrepare(const Shape& input, in genericActivationPrepare() argument
315 NN_OPS_CHECK(getNumberOfDimensions(input) <= 4); in genericActivationPrepare()
316 return SetShape(input, output); in genericActivationPrepare()
319 bool fullyConnectedPrepare(const Shape& input, in fullyConnectedPrepare() argument
325 NN_OPS_CHECK(input.type == weights.type); in fullyConnectedPrepare()
326 if (input.type == OperandType::TENSOR_QUANT8_ASYMM) { in fullyConnectedPrepare()
329 NN_OPS_CHECK(input.type == bias.type); in fullyConnectedPrepare()
331 NN_OPS_CHECK(getNumberOfDimensions(input) >= 2); in fullyConnectedPrepare()
332 uint32_t input_size = getNumberOfElements(input); in fullyConnectedPrepare()
340 output->type = input.type; in fullyConnectedPrepare()
388 bool genericNormalizationPrepare(const Shape& input, Shape* output) { in genericNormalizationPrepare() argument
389 NN_OPS_CHECK(getNumberOfDimensions(input) == 4); in genericNormalizationPrepare()
390 return SetShape(input, output); in genericNormalizationPrepare()
393 bool reshapePrepare(const Shape& input, in reshapePrepare() argument
401 int32_t numInputElements = (int32_t) getNumberOfElements(input); in reshapePrepare()
424 output->type = input.type; in reshapePrepare()
426 output->offset = input.offset; in reshapePrepare()
427 output->scale = input.scale; in reshapePrepare()
432 bool resizeBilinearPrepare(const Shape& input, in resizeBilinearPrepare() argument
436 NN_OPS_CHECK(getNumberOfDimensions(input) == 4); in resizeBilinearPrepare()
437 uint32_t batches = getSizeOfDimension(input, 0); in resizeBilinearPrepare()
438 uint32_t channels = getSizeOfDimension(input, 3); in resizeBilinearPrepare()
440 output->type = input.type; in resizeBilinearPrepare()
446 bool depthToSpacePrepare(const Shape& input, in depthToSpacePrepare() argument
449 NN_OPS_CHECK(getNumberOfDimensions(input) == 4); in depthToSpacePrepare()
452 uint32_t batches = getSizeOfDimension(input, 0); in depthToSpacePrepare()
453 uint32_t height = getSizeOfDimension(input, 1); in depthToSpacePrepare()
454 uint32_t width = getSizeOfDimension(input, 2); in depthToSpacePrepare()
455 uint32_t channels = getSizeOfDimension(input, 3); in depthToSpacePrepare()
458 output->type = input.type; in depthToSpacePrepare()
463 output->offset = input.offset; in depthToSpacePrepare()
464 output->scale = input.scale; in depthToSpacePrepare()
469 bool spaceToDepthPrepare(const Shape& input, in spaceToDepthPrepare() argument
472 NN_OPS_CHECK(getNumberOfDimensions(input) == 4); in spaceToDepthPrepare()
475 uint32_t batches = getSizeOfDimension(input, 0); in spaceToDepthPrepare()
476 uint32_t height = getSizeOfDimension(input, 1); in spaceToDepthPrepare()
477 uint32_t width = getSizeOfDimension(input, 2); in spaceToDepthPrepare()
478 uint32_t channels = getSizeOfDimension(input, 3); in spaceToDepthPrepare()
483 output->type = input.type; in spaceToDepthPrepare()
488 output->offset = input.offset; in spaceToDepthPrepare()
489 output->scale = input.scale; in spaceToDepthPrepare()