/external/pytorch/benchmarks/operator_benchmark/pt/ |
D | interpolate_test.py | 18 input_image = torch.randint( 27 if input_image.ndim == 4: 28 input_image = input_image.contiguous(memory_format=torch.channels_last) 29 elif input_image.ndim == 5: 30 input_image = input_image.contiguous( 45 }[input_image.ndim] 48 "input_image": input_image, 56 def forward(self, input_image, output_size, mode, align_corners): argument 58 input_image, size=output_size, mode=mode, align_corners=align_corners
|
/external/webrtc/sdk/android/src/jni/ |
D | video_decoder_wrapper.cc | 101 EncodedImage input_image(image_param); in Decode() local 103 input_image.capture_time_ms_ = in Decode() 104 input_image.Timestamp() / kNumRtpTicksPerMillisec; in Decode() 108 input_image.capture_time_ms_ * rtc::kNumNanosecsPerMillisec; in Decode() 109 frame_extra_info.timestamp_rtp = input_image.Timestamp(); in Decode() 110 frame_extra_info.timestamp_ntp = input_image.ntp_time_ms_; in Decode() 112 qp_parsing_enabled_ ? ParseQP(input_image) : absl::nullopt; in Decode() 120 NativeToJavaEncodedImage(env, input_image); in Decode() 226 const EncodedImage& input_image) { in ParseQP() argument 227 if (input_image.qp_ != -1) { in ParseQP() [all …]
|
D | video_decoder_wrapper.h | 37 int32_t Decode(const EncodedImage& input_image, 79 absl::optional<uint8_t> ParseQP(const EncodedImage& input_image)
|
/external/tensorflow/tensorflow/python/kernel_tests/nn_ops/ |
D | lrn_op_test.py | 36 def _LRN(self, input_image, lrn_depth_radius=5, bias=1.0, alpha=1.0, argument 39 output = copy.deepcopy(input_image) 40 batch_size = input_image.shape[0] 41 rows = input_image.shape[1] 42 cols = input_image.shape[2] 43 depth = input_image.shape[3] 50 patch = input_image[b, r, c, begin:end] 128 input_image = random_ops.random_uniform( 144 input_image=input_image,
|
/external/webrtc/modules/video_coding/codecs/vp8/ |
D | libvpx_vp8_decoder.cc | 172 int LibvpxVp8Decoder::Decode(const EncodedImage& input_image, in Decode() argument 181 if (input_image.data() == NULL && input_image.size() > 0) { in Decode() 233 if (input_image._frameType != VideoFrameType::kVideoFrameKey) in Decode() 239 if (input_image._frameType == VideoFrameType::kVideoFrameKey) { in Decode() 266 const uint8_t* buffer = input_image.data(); in Decode() 267 if (input_image.size() == 0) { in Decode() 270 if (vpx_codec_decode(decoder_, buffer, input_image.size(), 0, in Decode() 284 ret = ReturnFrame(img, input_image.Timestamp(), qp, input_image.ColorSpace()); in Decode()
|
/external/webrtc/modules/video_coding/codecs/vp9/ |
D | libvpx_vp9_decoder.cc | 190 int LibvpxVp9Decoder::Decode(const EncodedImage& input_image, in Decode() argument 200 if (input_image._frameType == VideoFrameType::kVideoFrameKey) { in Decode() 203 rtc::MakeArrayView(input_image.data(), input_image.size())); in Decode() 224 if (input_image._frameType != VideoFrameType::kVideoFrameKey) in Decode() 230 const uint8_t* buffer = input_image.data(); in Decode() 231 if (input_image.size() == 0) { in Decode() 238 static_cast<unsigned int>(input_image.size()), 0, in Decode() 251 ReturnFrame(img, input_image.Timestamp(), qp, input_image.ColorSpace()); in Decode()
|
/external/tensorflow/tensorflow/compiler/tests/ |
D | lrn_ops_test.py | 37 def _LRN(self, input_image, lrn_depth_radius=5, bias=1.0, alpha=1.0, argument 40 output = copy.deepcopy(input_image) 41 batch_size = input_image.shape[0] 42 rows = input_image.shape[1] 43 cols = input_image.shape[2] 44 depth = input_image.shape[3] 51 patch = input_image[b, r, c, begin:end]
|
/external/armnn/samples/ImageClassification/ |
D | run_classifier.py | 27 input_image_p = args.input_image 116 def run_inference(interpreter, input_image): argument 131 interpreter.set_tensor(input_details[0]["index"], input_image) 195 input_image = load_image(args.input_image, input_shape, False) 198 output_tensor = run_inference(interpreter, input_image)
|
/external/tensorflow/tensorflow/python/ops/ |
D | image_grad_d9m_test.py | 77 input_image = array_ops.zeros((1, 2, 2, 1), dtype=data_type) 79 tape.watch(input_image) 81 input_image, (3, 3), 88 gradient = tape.gradient(output_image, input_image) 142 input_image = self._randomDataOp(input_shape, data_type) 150 tape.watch(input_image) 152 input_image, 157 return tape.gradient(gradient_injector_output, input_image) 168 input_image, 179 input_image,
|
/external/tensorflow/tensorflow/lite/tutorials/ |
D | mnist_tflite.py | 43 def run_eval(interpreter, input_image): argument 59 input_image = np.reshape(input_image, input_details[0]['shape']) 60 interpreter.set_tensor(input_details[0]['index'], input_image)
|
/external/webrtc/video/ |
D | frame_dumping_decoder.cc | 28 int32_t Decode(const EncodedImage& input_image, 56 int32_t FrameDumpingDecoder::Decode(const EncodedImage& input_image, in Decode() argument 59 int32_t ret = decoder_->Decode(input_image, missing_frames, render_time_ms); in Decode() 60 writer_->WriteFrame(input_image, codec_type_); in Decode()
|
D | video_stream_decoder_impl_unittest.cc | 48 int32_t Decode(const EncodedImage& input_image, in Decode() argument 51 int32_t ret_code = DecodeCall(input_image, missing_frames, render_time_ms); in Decode() 64 (const EncodedImage& input_image, 88 int32_t Decode(const EncodedImage& input_image, in Decode() argument 91 return decoder_->Decode(input_image, missing_frames, render_time_ms); in Decode()
|
/external/webrtc/test/pc/e2e/analyzer/video/ |
D | quality_analyzing_video_decoder.cc | 53 int32_t QualityAnalyzingVideoDecoder::Decode(const EncodedImage& input_image, in Decode() argument 61 EncodedImageExtractionResult out = extractor_->ExtractData(input_image); in Decode() 79 out.id.value_or(VideoFrame::kNotSetId), input_image.Timestamp()); in Decode() 86 timestamp_to_frame_id_.insert({input_image.Timestamp(), out.id}); in Decode() 90 decoding_images_.insert({input_image.Timestamp(), std::move(out.image)}) in Decode() 106 timestamp_to_frame_id_.erase(input_image.Timestamp()); in Decode() 107 decoding_images_.erase(input_image.Timestamp()); in Decode()
|
/external/webrtc/test/ |
D | fake_encoder.cc | 92 int32_t FakeEncoder::Encode(const VideoFrame& input_image, in Encode() argument 139 encoded.SetTimestamp(input_image.timestamp()); in Encode() 372 int32_t DelayedEncoder::Encode(const VideoFrame& input_image, in Encode() argument 378 return FakeEncoder::Encode(input_image, frame_types); in Encode() 407 const VideoFrame& input_image, in Encode() argument 418 queue->PostTask([this, input_image, frame_types = *frame_types] { in Encode() 419 EncodeCallback(input_image, &frame_types); in Encode() 426 const VideoFrame& input_image, in EncodeCallback() argument 428 return FakeH264Encoder::Encode(input_image, frame_types); in EncodeCallback()
|
D | fake_encoder.h | 50 int32_t Encode(const VideoFrame& input_image, 137 int32_t Encode(const VideoFrame& input_image, 158 int32_t Encode(const VideoFrame& input_image, 161 int32_t EncodeCallback(const VideoFrame& input_image,
|
D | video_decoder_proxy_factory.h | 50 int32_t Decode(const EncodedImage& input_image, in Decode() argument 53 return decoder_->Decode(input_image, missing_frames, render_time_ms); in Decode()
|
/external/webrtc/api/video_codecs/ |
D | video_decoder_software_fallback_wrapper.cc | 43 int32_t Decode(const EncodedImage& input_image, 178 const EncodedImage& input_image, in Decode() argument 187 ret = hw_decoder_->Decode(input_image, missing_frames, render_time_ms); in Decode() 194 if (input_image._frameType == VideoFrameType::kVideoFrameKey) { in Decode() 215 return fallback_decoder_->Decode(input_image, missing_frames, in Decode()
|
/external/webrtc/modules/video_coding/codecs/h264/ |
D | h264_decoder_impl.cc | 330 int32_t H264DecoderImpl::Decode(const EncodedImage& input_image, in Decode() argument 344 if (!input_image.data() || !input_image.size()) { in Decode() 356 packet->data = const_cast<uint8_t*>(input_image.data()); in Decode() 357 if (input_image.size() > in Decode() 362 packet->size = static_cast<int>(input_image.size()); in Decode() 363 int64_t frame_timestamp_us = input_image.ntp_time_ms_ * 1000; // ms -> μs in Decode() 386 h264_bitstream_parser_.ParseBitstream(input_image); in Decode() 575 input_image.ColorSpace() ? *input_image.ColorSpace() in Decode() 580 .set_timestamp_rtp(input_image.Timestamp()) in Decode()
|
/external/webrtc/modules/video_coding/codecs/multiplex/ |
D | multiplex_decoder_adapter.cc | 127 int32_t MultiplexDecoderAdapter::Decode(const EncodedImage& input_image, in Decode() argument 130 MultiplexImage image = MultiplexEncodedImagePacker::Unpack(input_image); in Decode() 133 RTC_DCHECK(decoded_augmenting_data_.find(input_image.Timestamp()) == in Decode() 137 std::forward_as_tuple(input_image.Timestamp()), in Decode() 143 RTC_DCHECK(decoded_data_.find(input_image.Timestamp()) == in Decode() 146 std::forward_as_tuple(input_image.Timestamp()), in Decode()
|
/external/tensorflow/tensorflow/security/advisory/ |
D | tfsa-2022-116.md | 15 input_image = tf.random.uniform(shape=[4, 4, 4, 4], minval=-10000, maxval=10000, dtype=tf.float32, … 17 tf.raw_ops.LRNGrad(input_grads=input_grads, input_image=input_image, output_image=output_image, dep…
|
/external/webrtc/video/end_to_end_tests/ |
D | network_state_tests.cc | 294 int32_t Encode(const VideoFrame& input_image, in TEST_F() argument 308 return test::FakeEncoder::Encode(input_image, frame_types); in TEST_F() 388 int32_t Encode(const VideoFrame& input_image, in TEST_F() argument 391 return test::FakeEncoder::Encode(input_image, frame_types); in TEST_F() 411 int32_t Encode(const VideoFrame& input_image, in TEST_F() argument 414 return test::FakeEncoder::Encode(input_image, frame_types); in TEST_F()
|
/external/webrtc/media/engine/ |
D | simulcast_encoder_adapter.cc | 431 const VideoFrame& input_image, in Encode() argument 445 if (input_image.width() % alignment != 0 || in Encode() 446 input_image.height() % alignment != 0) { in Encode() 447 RTC_LOG(LS_WARNING) << "Frame " << input_image.width() << "x" in Encode() 448 << input_image.height() << " not divisible by " in Encode() 477 int src_width = input_image.width(); in Encode() 478 int src_height = input_image.height(); in Encode() 488 Timestamp::Micros((1000 * input_image.timestamp()) / 90); in Encode() 539 (input_image.video_frame_buffer()->type() == in Encode() 542 int ret = layer.encoder().Encode(input_image, &stream_frame_types); in Encode() [all …]
|
D | encoder_simulcast_proxy.cc | 51 const VideoFrame& input_image, in Encode() argument 53 return encoder_->Encode(input_image, frame_types); in Encode()
|
/external/tensorflow/tensorflow/core/ops/compat/ops_history_v2/ |
D | LRNGrad.pbtxt | 8 name: "input_image" 68 name: "input_image"
|
/external/tensorflow/tensorflow/core/ops/compat/ops_history_v1/ |
D | LRNGrad.pbtxt | 8 name: "input_image" 68 name: "input_image"
|