Home
last modified time | relevance | path

Searched refs:input_image (Results 1 – 25 of 40) sorted by relevance

12

/external/libxcam/tests/
Dtest-image-deblurring.cpp50 blind_deblurring (cv::Mat &input_image, cv::Mat &output_image) in blind_deblurring() argument
54 image_deblurring->blind_deblurring (input_image, output_image, kernel, -1, -1, false); in blind_deblurring()
58 non_blind_deblurring (cv::Mat &input_image, cv::Mat &output_image) in non_blind_deblurring() argument
61 cv::cvtColor (input_image, input_image, CV_BGR2GRAY); in non_blind_deblurring()
74 …cv::filter2D (input_image, blurred, CV_32FC1, conv_kernel, cv::Point(-1, -1), 0, cv::BORDER_CONSTA… in non_blind_deblurring()
147 cv::Mat input_image = cv::imread (file_in_name, CV_LOAD_IMAGE_COLOR); in main() local
149 if (input_image.empty ()) in main()
156 blind_deblurring (input_image, output_image); in main()
160 non_blind_deblurring (input_image, output_image); in main()
162 float input_sharp = sharp->measure_sharp (input_image); in main()
/external/webrtc/webrtc/modules/video_coding/codecs/vp8/
Dvp8_impl.cc724 const VideoFrame& input_image = in Encode() local
727 if (quality_scaler_enabled_ && (input_image.width() != codec_.width || in Encode()
728 input_image.height() != codec_.height)) { in Encode()
729 int ret = UpdateCodecFrameSize(input_image); in Encode()
738 RTC_DCHECK_EQ(input_image.width(), static_cast<int>(raw_images_[0].d_w)); in Encode()
739 RTC_DCHECK_EQ(input_image.height(), static_cast<int>(raw_images_[0].d_h)); in Encode()
744 const_cast<uint8_t*>(input_image.buffer(kYPlane)); in Encode()
746 const_cast<uint8_t*>(input_image.buffer(kUPlane)); in Encode()
748 const_cast<uint8_t*>(input_image.buffer(kVPlane)); in Encode()
750 raw_images_[0].stride[VPX_PLANE_Y] = input_image.stride(kYPlane); in Encode()
[all …]
Dsimulcast_encoder_adapter.cc232 const VideoFrame& input_image, in Encode() argument
261 int src_width = input_image.width(); in Encode()
262 int src_height = input_image.height(); in Encode()
284 input_image.IsZeroSize()) { in Encode()
285 streaminfos_[stream_idx].encoder->Encode(input_image, codec_specific_info, in Encode()
294 input_image.buffer(kYPlane), input_image.stride(kYPlane), in Encode()
295 input_image.buffer(kUPlane), input_image.stride(kUPlane), in Encode()
296 input_image.buffer(kVPlane), input_image.stride(kVPlane), src_width, in Encode()
301 dst_frame.set_timestamp(input_image.timestamp()); in Encode()
302 dst_frame.set_render_time_ms(input_image.render_time_ms()); in Encode()
Dvp8_impl.h49 virtual int Encode(const VideoFrame& input_image,
78 int UpdateCodecFrameSize(const VideoFrame& input_image);
86 int GetEncodedPartitions(const VideoFrame& input_image,
131 int Decode(const EncodedImage& input_image,
149 int DecodePartitions(const EncodedImage& input_image,
Dsimulcast_encoder_adapter.h43 int Encode(const VideoFrame& input_image,
/external/tensorflow/tensorflow/compiler/tests/
Dlrn_ops_test.py41 def _LRN(self, input_image, lrn_depth_radius=5, bias=1.0, alpha=1.0, argument
44 output = copy.deepcopy(input_image)
45 batch_size = input_image.shape[0]
46 rows = input_image.shape[1]
47 cols = input_image.shape[2]
48 depth = input_image.shape[3]
55 patch = input_image[b, r, c, begin:end]
/external/tensorflow/tensorflow/python/kernel_tests/
Dlrn_op_test.py38 def _LRN(self, input_image, lrn_depth_radius=5, bias=1.0, alpha=1.0, argument
41 output = copy.deepcopy(input_image)
42 batch_size = input_image.shape[0]
43 rows = input_image.shape[1]
44 cols = input_image.shape[2]
45 depth = input_image.shape[3]
52 patch = input_image[b, r, c, begin:end]
/external/webrtc/webrtc/modules/video_coding/codecs/vp9/
Dvp9_impl.cc475 int VP9EncoderImpl::Encode(const VideoFrame& input_image, in Encode() argument
481 if (input_image.IsZeroSize()) { in Encode()
492 RTC_DCHECK_EQ(input_image.width(), static_cast<int>(raw_->d_w)); in Encode()
493 RTC_DCHECK_EQ(input_image.height(), static_cast<int>(raw_->d_h)); in Encode()
499 input_image_ = &input_image; in Encode()
503 raw_->planes[VPX_PLANE_Y] = const_cast<uint8_t*>(input_image.buffer(kYPlane)); in Encode()
504 raw_->planes[VPX_PLANE_U] = const_cast<uint8_t*>(input_image.buffer(kUPlane)); in Encode()
505 raw_->planes[VPX_PLANE_V] = const_cast<uint8_t*>(input_image.buffer(kVPlane)); in Encode()
506 raw_->stride[VPX_PLANE_Y] = input_image.stride(kYPlane); in Encode()
507 raw_->stride[VPX_PLANE_U] = input_image.stride(kUPlane); in Encode()
[all …]
Dvp9_impl.h40 int Encode(const VideoFrame& input_image,
142 int Decode(const EncodedImage& input_image,
/external/tensorflow/tensorflow/lite/tutorials/
Dmnist_tflite.py46 def run_eval(interpreter, input_image): argument
62 input_image = np.reshape(input_image, input_details[0]['shape'])
63 interpreter.set_tensor(input_details[0]['index'], input_image)
/external/webrtc/webrtc/video/
Dvideo_decoder.cc86 const EncodedImage& input_image, in Decode() argument
93 if (!fallback_decoder_ || input_image._frameType == kVideoFrameKey) { in Decode()
94 int32_t ret = decoder_->Decode(input_image, missing_frames, fragmentation, in Decode()
112 return fallback_decoder_->Decode(input_image, missing_frames, fragmentation, in Decode()
/external/tensorflow/tensorflow/contrib/resampler/xla/
Dresampler_ops_xla_test.py34 input_image = array_ops.placeholder(image_np.dtype)
36 resampled = resampler.resampler(input_image, warp, name='resampler')
37 out = sess.run(resampled, {input_image: image_np, warp: warp_np})
45 input_image = array_ops.placeholder(input_np.dtype)
50 input_image, warp, grad_output)
53 input_image: input_np,
/external/libxcam/modules/ocl/
Dcl_demo_handler.cpp68 SmartPtr<CLImage> input_image = convert_to_climage (context, input, desc); in prepare_parameters() local
71 XCAM_ASSERT (input_image.ptr () && output_image.ptr ()); in prepare_parameters()
72 XCAM_ASSERT (input_image->is_valid () && output_image->is_valid ()); in prepare_parameters()
73 args.push_back (new CLMemArgument (input_image)); in prepare_parameters()
Dcl_wavelet_denoise_handler.cpp60 SmartPtr<CLMemory> input_image = convert_to_clbuffer (context, input); in prepare_arguments() local
78 input_image->is_valid () && reconstruct_image->is_valid (), in prepare_arguments()
88 args.push_back (new CLMemArgument (input_image)); in prepare_arguments()
92 args.push_back (new CLMemArgument (input_image)); in prepare_arguments()
/external/tensorflow/tensorflow/contrib/image/python/kernel_tests/
Dsparse_image_warp_test.py111 warped_image, input_image, _ = sess.run(
114 self.assertAllClose(warped_image, input_image)
153 warped_image, input_image, flow = sess.run(
158 input_image[0, 4, 4, :],
180 input_image = self.load_image(input_file, sess)
191 float_image = np.expand_dims(np.float32(input_image) / 255, 0)
/external/webrtc/webrtc/modules/video_coding/codecs/h264/
Dh264_video_toolbox_decoder.cc122 const EncodedImage& input_image, in Decode() argument
127 RTC_DCHECK(input_image._buffer); in Decode()
130 if (!H264AnnexBBufferToCMSampleBuffer(input_image._buffer, in Decode()
131 input_image._length, video_format_, in Decode()
147 new internal::FrameDecodeParams(callback_, input_image._timeStamp)); in Decode()
Dh264_video_toolbox_encoder.cc236 const VideoFrame& input_image, in Encode() argument
239 if (input_image.IsZeroSize()) { in Encode()
263 if (!internal::CopyVideoFrameToPixelBuffer(input_image, pixel_buffer)) { in Encode()
281 CMTimeMake(input_image.render_time_ms(), 1000); in Encode()
291 input_image.render_time_ms(), input_image.timestamp())); in Encode()
Dh264_video_toolbox_decoder.h36 int Decode(const EncodedImage& input_image,
Dh264_video_toolbox_encoder.h39 int Encode(const VideoFrame& input_image,
/external/webrtc/webrtc/test/
Dfake_encoder.cc48 int32_t FakeEncoder::Encode(const VideoFrame& input_image, in Encode() argument
102 encoded._timeStamp = input_image.timestamp(); in Encode()
103 encoded.capture_time_ms_ = input_image.render_time_ms(); in Encode()
200 int32_t DelayedEncoder::Encode(const VideoFrame& input_image, in Encode() argument
204 return FakeEncoder::Encode(input_image, codec_specific_info, frame_types); in Encode()
Dfake_encoder.h34 int32_t Encode(const VideoFrame& input_image,
78 int32_t Encode(const VideoFrame& input_image,
Dconfigurable_frame_size_encoder.h31 int32_t Encode(const VideoFrame& input_image,
/external/webrtc/webrtc/
Dvideo_decoder.h66 virtual int32_t Decode(const EncodedImage& input_image,
97 int32_t Decode(const EncodedImage& input_image,
/external/tensorflow/tensorflow/contrib/receptive_field/
DREADME.md50 images = tf.placeholder(tf.float32, shape=(1, None, None, 3), name='input_image')
56 g.as_graph_def(), 'input_image', 'my_output_endpoint')
79 images = tf.placeholder(tf.float32, shape=(1, None, None, 3), name='input_image')
85 g.as_graph_def(), 'input_image', 'InceptionResnetV2/Conv2d_7b_1x1/Relu')
170 --input_node input_image \
/external/tensorflow/tensorflow/core/api_def/base_api/
Dapi_def_LRNGrad.pbtxt11 name: "input_image"

12