/external/webrtc/sdk/android/src/jni/ |
D | video_decoder_wrapper.cc | 99 EncodedImage input_image(image_param); in Decode() local 101 input_image.capture_time_ms_ = in Decode() 102 input_image.Timestamp() / kNumRtpTicksPerMillisec; in Decode() 106 input_image.capture_time_ms_ * rtc::kNumNanosecsPerMillisec; in Decode() 107 frame_extra_info.timestamp_rtp = input_image.Timestamp(); in Decode() 108 frame_extra_info.timestamp_ntp = input_image.ntp_time_ms_; in Decode() 110 qp_parsing_enabled_ ? ParseQP(input_image) : absl::nullopt; in Decode() 118 NativeToJavaEncodedImage(env, input_image); in Decode() 230 const EncodedImage& input_image) { in ParseQP() argument 231 if (input_image.qp_ != -1) { in ParseQP() [all …]
|
D | video_decoder_wrapper.h | 38 int32_t Decode(const EncodedImage& input_image, 85 absl::optional<uint8_t> ParseQP(const EncodedImage& input_image)
|
/external/webrtc/modules/video_coding/codecs/vp8/ |
D | libvpx_vp8_decoder.cc | 177 int LibvpxVp8Decoder::Decode(const EncodedImage& input_image, in Decode() argument 186 if (input_image.data() == NULL && input_image.size() > 0) { in Decode() 238 if (input_image._frameType != VideoFrameType::kVideoFrameKey) in Decode() 241 if (input_image._completeFrame) { in Decode() 249 if (input_image._frameType == VideoFrameType::kVideoFrameKey && in Decode() 250 input_image._completeFrame) { in Decode() 253 } else if ((!input_image._completeFrame || missing_frames) && in Decode() 278 const uint8_t* buffer = input_image.data(); in Decode() 279 if (input_image.size() == 0) { in Decode() 282 if (vpx_codec_decode(decoder_, buffer, input_image.size(), 0, in Decode() [all …]
|
D | libvpx_vp8_encoder.cc | 997 rtc::scoped_refptr<I420BufferInterface> input_image = in Encode() local 1001 RTC_DCHECK_EQ(input_image->width(), raw_images_[0].d_w); in Encode() 1002 RTC_DCHECK_EQ(input_image->height(), raw_images_[0].d_h); in Encode() 1007 const_cast<uint8_t*>(input_image->DataY()); in Encode() 1009 const_cast<uint8_t*>(input_image->DataU()); in Encode() 1011 const_cast<uint8_t*>(input_image->DataV()); in Encode() 1013 raw_images_[0].stride[VPX_PLANE_Y] = input_image->StrideY(); in Encode() 1014 raw_images_[0].stride[VPX_PLANE_U] = input_image->StrideU(); in Encode() 1015 raw_images_[0].stride[VPX_PLANE_V] = input_image->StrideV(); in Encode() 1130 int LibvpxVp8Encoder::GetEncodedPartitions(const VideoFrame& input_image, in GetEncodedPartitions() argument [all …]
|
/external/webrtc/modules/video_coding/codecs/multiplex/ |
D | multiplex_encoder_adapter.cc | 152 const VideoFrame& input_image, in Encode() argument 164 const bool has_alpha = input_image.video_frame_buffer()->type() == in Encode() 171 input_image.video_frame_buffer().get()); in Encode() 186 std::forward_as_tuple(input_image.timestamp()), in Encode() 195 int rv = encoders_[kYUVStream]->Encode(input_image, &adjusted_frame_types); in Encode() 207 : input_image.video_frame_buffer()->GetI420A(); in Encode() 209 WrapI420Buffer(input_image.width(), input_image.height(), in Encode() 213 rtc::KeepRefUntilDone(input_image.video_frame_buffer())); in Encode() 216 .set_timestamp_rtp(input_image.timestamp()) in Encode() 217 .set_timestamp_ms(input_image.render_time_ms()) in Encode() [all …]
|
D | multiplex_decoder_adapter.cc | 130 int32_t MultiplexDecoderAdapter::Decode(const EncodedImage& input_image, in Decode() argument 133 MultiplexImage image = MultiplexEncodedImagePacker::Unpack(input_image); in Decode() 136 RTC_DCHECK(decoded_augmenting_data_.find(input_image.Timestamp()) == in Decode() 140 std::forward_as_tuple(input_image.Timestamp()), in Decode() 146 RTC_DCHECK(decoded_data_.find(input_image.Timestamp()) == in Decode() 149 std::forward_as_tuple(input_image.Timestamp()), in Decode()
|
/external/webrtc/modules/video_coding/codecs/h264/ |
D | h264_decoder_impl.cc | 241 int32_t H264DecoderImpl::Decode(const EncodedImage& input_image, in Decode() argument 255 if (!input_image.data() || !input_image.size()) { in Decode() 264 packet.data = const_cast<uint8_t*>(input_image.data()); in Decode() 265 if (input_image.size() > in Decode() 270 packet.size = static_cast<int>(input_image.size()); in Decode() 271 int64_t frame_timestamp_us = input_image.ntp_time_ms_ * 1000; // ms -> μs in Decode() 294 h264_bitstream_parser_.ParseBitstream(input_image.data(), input_image.size()); in Decode() 336 input_image.ColorSpace() ? *input_image.ColorSpace() in Decode() 341 .set_timestamp_rtp(input_image.Timestamp()) in Decode()
|
/external/tensorflow/tensorflow/python/ops/ |
D | image_grad_deterministic_test.py | 83 input_image = self._randomDataOp(input_shape, data_type) 91 tape.watch(input_image) 93 input_image, 98 return tape.gradient(gradient_injector_output, input_image) 109 input_image, 120 input_image,
|
/external/tensorflow/tensorflow/compiler/tests/ |
D | lrn_ops_test.py | 41 def _LRN(self, input_image, lrn_depth_radius=5, bias=1.0, alpha=1.0, argument 44 output = copy.deepcopy(input_image) 45 batch_size = input_image.shape[0] 46 rows = input_image.shape[1] 47 cols = input_image.shape[2] 48 depth = input_image.shape[3] 55 patch = input_image[b, r, c, begin:end]
|
/external/tensorflow/tensorflow/python/kernel_tests/ |
D | lrn_op_test.py | 38 def _LRN(self, input_image, lrn_depth_radius=5, bias=1.0, alpha=1.0, argument 41 output = copy.deepcopy(input_image) 42 batch_size = input_image.shape[0] 43 rows = input_image.shape[1] 44 cols = input_image.shape[2] 45 depth = input_image.shape[3] 52 patch = input_image[b, r, c, begin:end]
|
/external/tensorflow/tensorflow/lite/tutorials/ |
D | mnist_tflite.py | 47 def run_eval(interpreter, input_image): argument 63 input_image = np.reshape(input_image, input_details[0]['shape']) 64 interpreter.set_tensor(input_details[0]['index'], input_image)
|
/external/webrtc/video/ |
D | frame_dumping_decoder.cc | 29 int32_t Decode(const EncodedImage& input_image, 58 int32_t FrameDumpingDecoder::Decode(const EncodedImage& input_image, in Decode() argument 61 int32_t ret = decoder_->Decode(input_image, missing_frames, render_time_ms); in Decode() 62 writer_->WriteFrame(input_image, codec_type_); in Decode()
|
D | video_stream_decoder_impl_unittest.cc | 50 int32_t Decode(const EncodedImage& input_image, in Decode() argument 53 int32_t ret_code = DecodeCall(input_image, missing_frames, render_time_ms); in Decode() 66 (const EncodedImage& input_image, 91 int32_t Decode(const EncodedImage& input_image, in Decode() argument 94 return decoder_->Decode(input_image, missing_frames, render_time_ms); in Decode()
|
/external/tensorflow/tensorflow/python/keras/layers/preprocessing/ |
D | image_preprocessing_test.py | 96 input_image = np.reshape(np.arange(0, 16), (1, 4, 4, 1)).astype(dtype) 99 output_image = layer(input_image) 112 input_image = np.reshape(np.arange(0, 4), (1, 2, 2, 1)).astype(dtype) 115 output_image = layer(input_image) 533 input_image = np.reshape(np.arange(0, 25), (1, 5, 5, 1)).astype(dtype) 537 output_image = layer(input_image) 553 input_image = np.reshape(np.arange(0, 25), (1, 5, 5, 1)).astype(dtype) 557 output_image = layer(input_image) 573 input_image = np.reshape(np.arange(0, 25), (1, 5, 5, 1)).astype(dtype) 577 output_image = layer(input_image) [all …]
|
/external/webrtc/test/ |
D | fake_encoder.cc | 91 int32_t FakeEncoder::Encode(const VideoFrame& input_image, in Encode() argument 139 encoded.SetTimestamp(input_image.timestamp()); in Encode() 362 int32_t DelayedEncoder::Encode(const VideoFrame& input_image, in Encode() argument 368 return FakeEncoder::Encode(input_image, frame_types); in Encode() 399 const VideoFrame& input_image, in EncodeTask() argument 402 input_image_(input_image), in EncodeTask() 417 const VideoFrame& input_image, in Encode() argument 428 queue->PostTask(std::make_unique<EncodeTask>(this, input_image, frame_types)); in Encode() 434 const VideoFrame& input_image, in EncodeCallback() argument 436 return FakeH264Encoder::Encode(input_image, frame_types); in EncodeCallback()
|
D | fake_encoder.h | 51 int32_t Encode(const VideoFrame& input_image, 134 int32_t Encode(const VideoFrame& input_image, 155 int32_t Encode(const VideoFrame& input_image, 158 int32_t EncodeCallback(const VideoFrame& input_image,
|
D | video_decoder_proxy_factory.h | 50 int32_t Decode(const EncodedImage& input_image, in Decode() argument 53 return decoder_->Decode(input_image, missing_frames, render_time_ms); in Decode()
|
/external/webrtc/api/video_codecs/ |
D | video_decoder_software_fallback_wrapper.cc | 45 int32_t Decode(const EncodedImage& input_image, 192 const EncodedImage& input_image, in Decode() argument 201 ret = hw_decoder_->Decode(input_image, missing_frames, render_time_ms); in Decode() 208 if (input_image._frameType == VideoFrameType::kVideoFrameKey) { in Decode() 229 return fallback_decoder_->Decode(input_image, missing_frames, in Decode()
|
/external/webrtc/test/pc/e2e/analyzer/video/ |
D | quality_analyzing_video_decoder.cc | 51 int32_t QualityAnalyzingVideoDecoder::Decode(const EncodedImage& input_image, in Decode() argument 59 EncodedImageExtractionResult out = extractor_->ExtractData(input_image, id_); in Decode() 77 out.id, input_image.Timestamp()); in Decode() 84 timestamp_to_frame_id_.insert({input_image.Timestamp(), out.id}); in Decode() 100 timestamp_to_frame_id_.erase(input_image.Timestamp()); in Decode()
|
/external/webrtc/video/end_to_end_tests/ |
D | network_state_tests.cc | 278 int32_t Encode(const VideoFrame& input_image, in TEST_F() argument 292 return test::FakeEncoder::Encode(input_image, frame_types); in TEST_F() 371 int32_t Encode(const VideoFrame& input_image, in TEST_F() argument 374 return test::FakeEncoder::Encode(input_image, frame_types); in TEST_F() 394 int32_t Encode(const VideoFrame& input_image, in TEST_F() argument 397 return test::FakeEncoder::Encode(input_image, frame_types); in TEST_F()
|
/external/webrtc/modules/video_coding/codecs/vp9/ |
D | vp9_impl.cc | 866 int VP9EncoderImpl::Encode(const VideoFrame& input_image, in Encode() argument 898 1000 * input_image.timestamp() / kVideoPayloadTypeFrequency; in Encode() 905 input_image.update_rect().IsEmpty() && in Encode() 977 RTC_DCHECK_EQ(input_image.width(), raw_->d_w); in Encode() 978 RTC_DCHECK_EQ(input_image.height(), raw_->d_h); in Encode() 984 input_image_ = &input_image; in Encode() 992 i420_buffer = input_image.video_frame_buffer()->ToI420(); in Encode() 1010 switch (input_image.video_frame_buffer()->type()) { in Encode() 1012 i010_buffer = input_image.video_frame_buffer()->GetI010(); in Encode() 1017 I010Buffer::Copy(*input_image.video_frame_buffer()->ToI420()); in Encode() [all …]
|
/external/webrtc/media/engine/ |
D | simulcast_encoder_adapter.cc | 350 const VideoFrame& input_image, in Encode() argument 382 int src_width = input_image.width(); in Encode() 383 int src_height = input_image.height(); in Encode() 391 1000 * input_image.timestamp() / 90000; // kVideoPayloadTypeFrequency; in Encode() 425 (input_image.video_frame_buffer()->type() == in Encode() 430 int ret = streaminfos_[stream_idx].encoder->Encode(input_image, in Encode() 437 src_buffer = input_image.video_frame_buffer()->ToI420(); in Encode() 446 VideoFrame frame(input_image); in Encode()
|
D | encoder_simulcast_proxy.cc | 54 const VideoFrame& input_image, in Encode() argument 56 return encoder_->Encode(input_image, frame_types); in Encode()
|
/external/tensorflow/tensorflow/core/ops/compat/ops_history_v2/ |
D | LRNGrad.pbtxt | 8 name: "input_image" 68 name: "input_image"
|
/external/tensorflow/tensorflow/core/ops/compat/ops_history_v1/ |
D | LRNGrad.pbtxt | 8 name: "input_image" 68 name: "input_image"
|