Lines Matching refs:input_tmp
798 RunTimeOperandInfo input_tmp, output_tmp; in executeOperation() local
800 if (!convertToNhwc(input_tmp, input, input_tmp_guard, data_layout)) { in executeOperation()
809 Shape inputShape = input_tmp.shape(); in executeOperation()
823 if (!depthwiseConvPrepare(input_tmp.shape(), filter.shape(), bias.shape(), padding_left, in executeOperation()
832 if (input_tmp.type == OperandType::TENSOR_FLOAT32) { in executeOperation()
834 reinterpret_cast<const float*>(input_tmp.buffer), input_tmp.shape(), in executeOperation()
840 } else if (input_tmp.type == OperandType::TENSOR_FLOAT16) { in executeOperation()
842 reinterpret_cast<const _Float16*>(input_tmp.buffer), input_tmp.shape(), in executeOperation()
848 } else if (input_tmp.type == OperandType::TENSOR_QUANT8_ASYMM) { in executeOperation()
851 reinterpret_cast<const uint8_t*>(input_tmp.buffer), input_tmp.shape(), in executeOperation()
861 reinterpret_cast<const uint8_t*>(input_tmp.buffer), input_tmp.shape(), in executeOperation()
943 RunTimeOperandInfo input_tmp, output_tmp; in executeOperation() local
945 if (!convertToNhwc(input_tmp, input, input_tmp_guard, data_layout)) { in executeOperation()
952 if (!depthToSpacePrepare(input_tmp.shape(), blockSize, &outShape) || in executeOperation()
957 switch (input_tmp.type) { in executeOperation()
960 reinterpret_cast<const float*>(input_tmp.buffer), input_tmp.shape(), in executeOperation()
966 reinterpret_cast<const _Float16*>(input_tmp.buffer), input_tmp.shape(), in executeOperation()
972 reinterpret_cast<const uint8_t*>(input_tmp.buffer), input_tmp.shape(), in executeOperation()
1001 RunTimeOperandInfo input_tmp, output_tmp; in executeOperation() local
1003 if (!convertToNhwc(input_tmp, input, input_tmp_guard, data_layout)) { in executeOperation()
1011 if (!spaceToDepthPrepare(input_tmp.shape(), blockSize, &outShape) || in executeOperation()
1016 switch (input_tmp.type) { in executeOperation()
1019 reinterpret_cast<const float*>(input_tmp.buffer), input_tmp.shape(), in executeOperation()
1025 reinterpret_cast<const _Float16*>(input_tmp.buffer), input_tmp.shape(), in executeOperation()
1031 reinterpret_cast<const uint8_t*>(input_tmp.buffer), input_tmp.shape(), in executeOperation()
1180 RunTimeOperandInfo input_tmp, output_tmp; in executeOperation() local
1182 if (!convertToNhwc(input_tmp, input, input_tmp_guard, data_layout)) { in executeOperation()
1190 if (!batchToSpacePrepare(input_tmp.shape(), in executeOperation()
1197 switch (input_tmp.type) { in executeOperation()
1200 reinterpret_cast<const float*>(input_tmp.buffer), input_tmp.shape(), in executeOperation()
1207 reinterpret_cast<const _Float16*>(input_tmp.buffer), input_tmp.shape(), in executeOperation()
1214 reinterpret_cast<const uint8_t*>(input_tmp.buffer), input_tmp.shape(), in executeOperation()
1245 RunTimeOperandInfo input_tmp, output_tmp; in executeOperation() local
1247 if (!convertToNhwc(input_tmp, input, input_tmp_guard, data_layout)) { in executeOperation()
1256 input_tmp.shape(), reinterpret_cast<const int32_t*>(blockSize.buffer), in executeOperation()
1263 switch (input_tmp.type) { in executeOperation()
1266 reinterpret_cast<const float*>(input_tmp.buffer), input_tmp.shape(), in executeOperation()
1274 reinterpret_cast<const _Float16*>(input_tmp.buffer), input_tmp.shape(), in executeOperation()
1282 reinterpret_cast<const uint8_t*>(input_tmp.buffer), input_tmp.shape(), in executeOperation()
1583 RunTimeOperandInfo input_tmp, output_tmp; in executeOperation() local
1585 if (!convertToNhwc(input_tmp, input, input_tmp_guard, data_layout)) { in executeOperation()
1594 Shape inputShape = input_tmp.shape(); in executeOperation()
1606 if (!groupedConvPrepare(input_tmp.shape(), filter.shape(), bias.shape(), padding_left, in executeOperation()
1615 if (input_tmp.type == OperandType::TENSOR_FLOAT32) { in executeOperation()
1617 reinterpret_cast<const float*>(input_tmp.buffer), input_tmp.shape(), in executeOperation()
1623 } else if (input_tmp.type == OperandType::TENSOR_FLOAT16) { in executeOperation()
1625 reinterpret_cast<const _Float16*>(input_tmp.buffer), input_tmp.shape(), in executeOperation()
1631 } else if (input_tmp.type == OperandType::TENSOR_QUANT8_ASYMM) { in executeOperation()
1634 reinterpret_cast<const uint8_t*>(input_tmp.buffer), input_tmp.shape(), in executeOperation()
1643 reinterpret_cast<const uint8_t*>(input_tmp.buffer), input_tmp.shape(), in executeOperation()