/third_party/gstreamer/gstplugins_good/gst/videofilter/ |
D | gstvideomedian.c | 83 GstVideoFrame * in_frame, GstVideoFrame * out_frame); 231 GstVideoFrame * in_frame, GstVideoFrame * out_frame) in gst_video_median_transform_frame() argument 236 median_5 (GST_VIDEO_FRAME_PLANE_DATA (out_frame, 0), in gst_video_median_transform_frame() 237 GST_VIDEO_FRAME_PLANE_STRIDE (out_frame, 0), in gst_video_median_transform_frame() 243 gst_video_frame_copy_plane (out_frame, in_frame, 1); in gst_video_median_transform_frame() 244 gst_video_frame_copy_plane (out_frame, in_frame, 2); in gst_video_median_transform_frame() 246 median_5 (GST_VIDEO_FRAME_PLANE_DATA (out_frame, 1), in gst_video_median_transform_frame() 247 GST_VIDEO_FRAME_PLANE_STRIDE (out_frame, 1), in gst_video_median_transform_frame() 252 median_5 (GST_VIDEO_FRAME_PLANE_DATA (out_frame, 2), in gst_video_median_transform_frame() 253 GST_VIDEO_FRAME_PLANE_STRIDE (out_frame, 2), in gst_video_median_transform_frame() [all …]
|
/third_party/ffmpeg/libavfilter/ |
D | af_bs2b.c | 130 AVFrame *out_frame; in filter_frame() local 136 out_frame = frame; in filter_frame() 138 out_frame = ff_get_audio_buffer(outlink, frame->nb_samples); in filter_frame() 139 if (!out_frame) { in filter_frame() 143 av_frame_copy(out_frame, frame); in filter_frame() 144 ret = av_frame_copy_props(out_frame, frame); in filter_frame() 146 av_frame_free(&out_frame); in filter_frame() 152 bs2b->filter(bs2b->bs2bp, out_frame->extended_data[0], out_frame->nb_samples); in filter_frame() 154 if (frame != out_frame) in filter_frame() 157 return ff_filter_frame(outlink, out_frame); in filter_frame()
|
D | qsvvpp.c | 457 QSVFrame *out_frame; in query_frame() local 462 out_frame = get_free_frame(&s->out_frame_list); in query_frame() 463 if (!out_frame) in query_frame() 469 out_frame->frame = av_frame_alloc(); in query_frame() 470 if (!out_frame->frame) in query_frame() 473 ret = av_hwframe_get_buffer(outlink->hw_frames_ctx, out_frame->frame, 0); in query_frame() 479 out_frame->surface = (mfxFrameSurface1 *)out_frame->frame->data[3]; in query_frame() 483 out_frame->frame = ff_get_video_buffer(outlink, in query_frame() 486 if (!out_frame->frame) in query_frame() 489 out_frame->frame->width = outlink->w; in query_frame() [all …]
|
D | dnn_filter_common.c | 78 DNNReturnType ff_dnn_execute_model(DnnContext *ctx, AVFrame *in_frame, AVFrame *out_frame) in ff_dnn_execute_model() argument 81 (const char **)&ctx->model_outputname, 1, out_frame); in ff_dnn_execute_model() 84 DNNReturnType ff_dnn_execute_model_async(DnnContext *ctx, AVFrame *in_frame, AVFrame *out_frame) in ff_dnn_execute_model_async() argument 87 … (const char **)&ctx->model_outputname, 1, out_frame); in ff_dnn_execute_model_async() 90 DNNAsyncStatusType ff_dnn_get_async_result(DnnContext *ctx, AVFrame **in_frame, AVFrame **out_frame) in ff_dnn_get_async_result() argument 92 return (ctx->dnn_module->get_async_result)(ctx->model, in_frame, out_frame); in ff_dnn_get_async_result()
|
D | af_compand.c | 181 AVFrame *out_frame; in compand_nodelay() local 186 out_frame = frame; in compand_nodelay() 188 out_frame = ff_get_audio_buffer(ctx->outputs[0], nb_samples); in compand_nodelay() 189 if (!out_frame) { in compand_nodelay() 193 err = av_frame_copy_props(out_frame, frame); in compand_nodelay() 195 av_frame_free(&out_frame); in compand_nodelay() 203 double *dst = (double *)out_frame->extended_data[chan]; in compand_nodelay() 213 if (frame != out_frame) in compand_nodelay() 216 return ff_filter_frame(ctx->outputs[0], out_frame); in compand_nodelay() 228 AVFrame *out_frame = NULL; in compand_delay() local [all …]
|
D | af_flanger.c | 145 AVFrame *out_frame; in filter_frame() local 149 out_frame = frame; in filter_frame() 151 out_frame = ff_get_audio_buffer(ctx->outputs[0], frame->nb_samples); in filter_frame() 152 if (!out_frame) { in filter_frame() 156 av_frame_copy_props(out_frame, frame); in filter_frame() 165 double *dst = (double *)out_frame->extended_data[chan]; in filter_frame() 200 if (frame != out_frame) in filter_frame() 203 return ff_filter_frame(ctx->outputs[0], out_frame); in filter_frame()
|
D | vf_dnn_processing.c | 328 AVFrame *out_frame = NULL; in flush_frame() local 329 async_state = ff_dnn_get_async_result(&ctx->dnnctx, &in_frame, &out_frame); in flush_frame() 330 if (out_frame) { in flush_frame() 332 copy_uv_planes(ctx, out_frame, in_frame); in flush_frame() 334 ret = ff_filter_frame(outlink, out_frame); in flush_frame() 338 *out_pts = out_frame->pts + pts; in flush_frame() 380 AVFrame *out_frame = NULL; in activate_async() local 381 async_state = ff_dnn_get_async_result(&ctx->dnnctx, &in_frame, &out_frame); in activate_async() 382 if (out_frame) { in activate_async() 384 copy_uv_planes(ctx, out_frame, in_frame); in activate_async() [all …]
|
D | dnn_filter_common.h | 53 DNNReturnType ff_dnn_execute_model(DnnContext *ctx, AVFrame *in_frame, AVFrame *out_frame); 54 DNNReturnType ff_dnn_execute_model_async(DnnContext *ctx, AVFrame *in_frame, AVFrame *out_frame); 55 DNNAsyncStatusType ff_dnn_get_async_result(DnnContext *ctx, AVFrame **in_frame, AVFrame **out_frame…
|
D | af_aecho.c | 279 AVFrame *out_frame; in filter_frame() local 282 out_frame = frame; in filter_frame() 284 out_frame = ff_get_audio_buffer(ctx->outputs[0], frame->nb_samples); in filter_frame() 285 if (!out_frame) { in filter_frame() 289 av_frame_copy_props(out_frame, frame); in filter_frame() 292 s->echo_samples(s, s->delayptrs, frame->extended_data, out_frame->extended_data, in filter_frame() 297 if (frame != out_frame) in filter_frame() 300 return ff_filter_frame(ctx->outputs[0], out_frame); in filter_frame()
|
D | af_chorus.c | 244 AVFrame *out_frame; in filter_frame() local 248 out_frame = frame; in filter_frame() 250 out_frame = ff_get_audio_buffer(ctx->outputs[0], frame->nb_samples); in filter_frame() 251 if (!out_frame) { in filter_frame() 255 av_frame_copy_props(out_frame, frame); in filter_frame() 260 float *dst = (float *)out_frame->extended_data[c]; in filter_frame() 287 if (frame != out_frame) in filter_frame() 290 return ff_filter_frame(ctx->outputs[0], out_frame); in filter_frame()
|
D | af_adelay.c | 219 AVFrame *out_frame; in filter_frame() local 225 out_frame = ff_get_audio_buffer(ctx->outputs[0], frame->nb_samples); in filter_frame() 226 if (!out_frame) { in filter_frame() 230 av_frame_copy_props(out_frame, frame); in filter_frame() 235 uint8_t *dst = out_frame->extended_data[i]; in filter_frame() 243 out_frame->pts = s->next_pts; in filter_frame() 246 return ff_filter_frame(ctx->outputs[0], out_frame); in filter_frame()
|
/third_party/ffmpeg/tests/api/ |
D | api-flac-test.c | 112 AVFrame *in_frame, *out_frame; in run_test() local 139 out_frame = av_frame_alloc(); in run_test() 140 if (!out_frame) { in run_test() 194 result = avcodec_receive_frame(dec_ctx, out_frame); in run_test() 206 if (in_frame->nb_samples != out_frame->nb_samples) { in run_test() 211 if (in_frame->channel_layout != out_frame->channel_layout) { in run_test() 216 if (in_frame->format != out_frame->format) { in run_test() 220 out_frame_bytes = out_frame->nb_samples * out_frame->channels * sizeof(uint16_t); in run_test() 221 if (out_frame_bytes > out_frame->linesize[0]) { in run_test() 225 memcpy(raw_out + out_offset, out_frame->data[0], out_frame_bytes); in run_test() [all …]
|
/third_party/ffmpeg/libavcodec/ |
D | libdavs2.c | 36 davs2_picture_t out_frame; // output data, frame data member 126 frame->pts = cad->out_frame.pts; in davs2_dump_frames() 139 ret = davs2_decoder_flush(cad->decoder, &cad->headerset, &cad->out_frame); in davs2_flush() 140 davs2_decoder_frame_unref(cad->decoder, &cad->out_frame); in davs2_flush() 153 ret = davs2_decoder_flush(cad->decoder, &cad->headerset, &cad->out_frame); in send_delayed_frame() 159 ret = davs2_dump_frames(avctx, &cad->out_frame, got_frame, &cad->headerset, ret, frame); in send_delayed_frame() 160 davs2_decoder_frame_unref(cad->decoder, &cad->out_frame); in send_delayed_frame() 205 ret = davs2_decoder_recv_frame(cad->decoder, &cad->headerset, &cad->out_frame); in davs2_decode_frame() 208 ret = davs2_dump_frames(avctx, &cad->out_frame, got_frame, &cad->headerset, ret, frame); in davs2_decode_frame() 209 davs2_decoder_frame_unref(cad->decoder, &cad->out_frame); in davs2_decode_frame()
|
D | qsvdec.c | 448 QSVFrame *out_frame; in qsv_decode() local 507 QSVFrame *out_frame = find_frame(q, outsurf); in qsv_decode() local 509 if (!out_frame) { in qsv_decode() 516 out_frame->queued = 1; in qsv_decode() 517 av_fifo_generic_write(q->async_fifo, &out_frame, sizeof(out_frame), NULL); in qsv_decode() 527 av_fifo_generic_read(q->async_fifo, &out_frame, sizeof(out_frame), NULL); in qsv_decode() 529 out_frame->queued = 0; in qsv_decode() 539 src_frame = out_frame->frame; in qsv_decode() 545 outsurf = &out_frame->surface; in qsv_decode() 562 frame->pict_type = ff_qsv_map_pictype(out_frame->dec_info.FrameType); in qsv_decode() [all …]
|
/third_party/gstreamer/gstplugins_good/gst/videocrop/ |
D | gstvideocrop.c | 114 GstVideoFrame * in_frame, GstVideoFrame * out_frame); 247 GstVideoFrame * in_frame, GstVideoFrame * out_frame, gint x, gint y) in gst_video_crop_transform_packed_complex() argument 255 width = GST_VIDEO_FRAME_WIDTH (out_frame); in gst_video_crop_transform_packed_complex() 256 height = GST_VIDEO_FRAME_HEIGHT (out_frame); in gst_video_crop_transform_packed_complex() 259 out_data = GST_VIDEO_FRAME_PLANE_DATA (out_frame, 0); in gst_video_crop_transform_packed_complex() 262 out_stride = GST_VIDEO_FRAME_PLANE_STRIDE (out_frame, 0); in gst_video_crop_transform_packed_complex() 271 dx = width * GST_VIDEO_FRAME_COMP_PSTRIDE (out_frame, 0); in gst_video_crop_transform_packed_complex() 300 GstVideoFrame * in_frame, GstVideoFrame * out_frame, gint x, gint y) in gst_video_crop_transform_packed_simple() argument 307 width = GST_VIDEO_FRAME_WIDTH (out_frame); in gst_video_crop_transform_packed_simple() 308 height = GST_VIDEO_FRAME_HEIGHT (out_frame); in gst_video_crop_transform_packed_simple() [all …]
|
/third_party/ffmpeg/libavfilter/dnn/ |
D | dnn_backend_native.c | 48 … const char **output_names, uint32_t nb_output, AVFrame *out_frame, 84 AVFrame *out_frame = NULL; in get_output_native() local 91 out_frame = av_frame_alloc(); in get_output_native() 93 if (!out_frame) { in get_output_native() 102 …t = execute_model_native(native_model->model, input_name, in_frame, &output_name, 1, out_frame, 0); in get_output_native() 103 *output_width = out_frame->width; in get_output_native() 104 *output_height = out_frame->height; in get_output_native() 106 av_frame_free(&out_frame); in get_output_native() 262 … const char **output_names, uint32_t nb_output, AVFrame *out_frame, in execute_model_native() argument 362 native_model->model->post_proc(out_frame, &output, native_model->model->filter_ctx); in execute_model_native() [all …]
|
D | dnn_backend_openvino.c | 67 AVFrame *out_frame; member 241 … task->ov_model->model->post_proc(task->out_frame, &output, task->ov_model->model->filter_ctx); in infer_completion_callback() 243 ff_proc_from_dnn_to_frame(task->out_frame, &output, ctx); in infer_completion_callback() 246 task->out_frame->width = output.width; in infer_completion_callback() 247 task->out_frame->height = output.height; in infer_completion_callback() 491 AVFrame *out_frame = NULL; in get_output_ov() local 523 out_frame = av_frame_alloc(); in get_output_ov() 524 if (!out_frame) { in get_output_ov() 536 task.out_frame = out_frame; in get_output_ov() 544 *output_width = out_frame->width; in get_output_ov() [all …]
|
D | dnn_backend_tf.c | 66 … const char **output_names, uint32_t nb_output, AVFrame *out_frame, 171 AVFrame *out_frame = NULL; in get_output_tf() local 178 out_frame = av_frame_alloc(); in get_output_tf() 179 if (!out_frame) { in get_output_tf() 188 ret = execute_model_tf(tf_model->model, input_name, in_frame, &output_name, 1, out_frame, 0); in get_output_tf() 189 *output_width = out_frame->width; in get_output_tf() 190 *output_height = out_frame->height; in get_output_tf() 192 av_frame_free(&out_frame); in get_output_tf() 714 … const char **output_names, uint32_t nb_output, AVFrame *out_frame, in execute_model_tf() argument 802 tf_model->model->post_proc(out_frame, &output, tf_model->model->filter_ctx); in execute_model_tf() [all …]
|
D | dnn_backend_openvino.h | 35 … const char **output_names, uint32_t nb_output, AVFrame *out_frame); 37 … const char **output_names, uint32_t nb_output, AVFrame *out_frame);
|
/third_party/gstreamer/gstplugins_bad/gst/smooth/ |
D | gstsmooth.c | 59 GstVideoFrame * in_frame, GstVideoFrame * out_frame); 180 GstVideoFrame * out_frame) in gst_smooth_transform_frame() argument 187 gst_video_frame_copy (out_frame, in_frame); in gst_smooth_transform_frame() 191 smooth_filter (GST_VIDEO_FRAME_COMP_DATA (out_frame, 0), in gst_smooth_transform_frame() 196 GST_VIDEO_FRAME_COMP_STRIDE (out_frame, 0), in gst_smooth_transform_frame() 199 smooth_filter (GST_VIDEO_FRAME_COMP_DATA (out_frame, 1), in gst_smooth_transform_frame() 204 GST_VIDEO_FRAME_COMP_STRIDE (out_frame, 1), in gst_smooth_transform_frame() 206 smooth_filter (GST_VIDEO_FRAME_COMP_DATA (out_frame, 2), in gst_smooth_transform_frame() 211 GST_VIDEO_FRAME_COMP_STRIDE (out_frame, 2), in gst_smooth_transform_frame() 214 gst_video_frame_copy_plane (out_frame, in_frame, 1); in gst_smooth_transform_frame() [all …]
|
/third_party/gstreamer/gstplugins_good/gst/alpha/ |
D | gstalpha.c | 170 GstVideoFrame * in_frame, GstVideoFrame * out_frame); 627 GstVideoFrame * out_frame, GstAlpha * alpha) in gst_alpha_set_argb_ayuv() argument 639 dest = GST_VIDEO_FRAME_PLANE_DATA (out_frame, 0); in gst_alpha_set_argb_ayuv() 673 GstVideoFrame * out_frame, GstAlpha * alpha) in gst_alpha_chroma_key_argb_ayuv() argument 694 dest = GST_VIDEO_FRAME_PLANE_DATA (out_frame, 0); in gst_alpha_chroma_key_argb_ayuv() 742 GstVideoFrame * out_frame, GstAlpha * alpha) in gst_alpha_set_argb_argb() argument 752 dest = GST_VIDEO_FRAME_PLANE_DATA (out_frame, 0); in gst_alpha_set_argb_argb() 757 p[0] = GST_VIDEO_FRAME_COMP_POFFSET (out_frame, 3); in gst_alpha_set_argb_argb() 758 p[1] = GST_VIDEO_FRAME_COMP_POFFSET (out_frame, 0); in gst_alpha_set_argb_argb() 759 p[2] = GST_VIDEO_FRAME_COMP_POFFSET (out_frame, 1); in gst_alpha_set_argb_argb() [all …]
|
/third_party/gstreamer/gstplugins_good/gst/smpte/ |
D | gstsmptealpha.c | 148 GstVideoFrame * in_frame, GstVideoFrame * out_frame); 281 const GstVideoFrame * in_frame, GstVideoFrame * out_frame, GstMask * mask, \ 302 width = GST_VIDEO_FRAME_WIDTH (out_frame); \ 303 height = GST_VIDEO_FRAME_HEIGHT (out_frame); \ 306 out = GST_VIDEO_FRAME_PLANE_DATA (out_frame, 0); \ 308 dest_wrap = GST_VIDEO_FRAME_PLANE_STRIDE (out_frame, 0) - (width << 2); \ 334 const GstVideoFrame * in_frame, GstVideoFrame * out_frame, GstMask * mask, in gst_smpte_alpha_process_ayuv_ayuv() argument 355 width = GST_VIDEO_FRAME_WIDTH (out_frame); in gst_smpte_alpha_process_ayuv_ayuv() 356 height = GST_VIDEO_FRAME_HEIGHT (out_frame); in gst_smpte_alpha_process_ayuv_ayuv() 359 out = GST_VIDEO_FRAME_PLANE_DATA (out_frame, 0); in gst_smpte_alpha_process_ayuv_ayuv() [all …]
|
/third_party/grpc/test/core/tsi/alts/handshaker/ |
D | alts_handshaker_client_test.cc | 60 grpc_slice out_frame; member 336 config->out_frame = in create_config() 349 grpc_slice_unref(config->out_frame); in destroy_config() 374 nullptr, &config->out_frame) == TSI_INVALID_ARGUMENT); in schedule_request_invalid_arg_test() 384 GPR_ASSERT(alts_handshaker_client_next(nullptr, &config->out_frame) == in schedule_request_invalid_arg_test() 405 GPR_ASSERT(alts_handshaker_client_next(nullptr, &config->out_frame) == in schedule_request_success_test() 414 config->server, &config->out_frame) == TSI_OK); in schedule_request_success_test() 422 &config->out_frame) == TSI_OK); in schedule_request_success_test() 430 &config->out_frame) == TSI_OK); in schedule_request_success_test() 475 alts_handshaker_client_start_server(config->server, &config->out_frame); in schedule_request_grpc_call_failure_test() [all …]
|
/third_party/gstreamer/gstplugins_bad/sys/nvcodec/ |
D | gstcudabasefilter.c | 63 GstVideoFrame * out_frame, GstCudaMemory * out_cuda_mem); 184 GstVideoFrame * out_frame, GstCudaMemory * out_cuda_mem) in gst_cuda_base_filter_transform_frame() argument 290 for (i = 0; i < GST_VIDEO_FRAME_N_PLANES (out_frame); i++) { in gst_cuda_base_filter_transform_frame() 294 width = GST_VIDEO_FRAME_COMP_WIDTH (out_frame, i) * in gst_cuda_base_filter_transform_frame() 295 GST_VIDEO_FRAME_COMP_PSTRIDE (out_frame, i); in gst_cuda_base_filter_transform_frame() 296 height = GST_VIDEO_FRAME_COMP_HEIGHT (out_frame, i); in gst_cuda_base_filter_transform_frame() 303 param.dstPitch = GST_VIDEO_FRAME_PLANE_STRIDE (out_frame, i); in gst_cuda_base_filter_transform_frame() 304 param.dstHost = GST_VIDEO_FRAME_PLANE_DATA (out_frame, i); in gst_cuda_base_filter_transform_frame()
|
D | gstcudabasetransform.c | 74 GstVideoFrame * out_frame, GstCudaMemory * out_cuda_mem); 295 GstVideoFrame in_frame, out_frame; in gst_cuda_base_transform_transform() local 343 if (!gst_video_frame_map (&out_frame, &filter->out_info, outbuf, in gst_cuda_base_transform_transform() 349 ret = fclass->transform_frame (filter, &in_frame, in_cuda_mem, &out_frame, in gst_cuda_base_transform_transform() 352 gst_video_frame_unmap (&out_frame); in gst_cuda_base_transform_transform() 375 GstVideoFrame * out_frame, GstCudaMemory * out_cuda_mem) in gst_cuda_base_transform_transform_frame_default() argument 412 param.dstHost = GST_VIDEO_FRAME_PLANE_DATA (out_frame, i); in gst_cuda_base_transform_transform_frame_default() 413 param.dstPitch = GST_VIDEO_FRAME_PLANE_STRIDE (out_frame, i); in gst_cuda_base_transform_transform_frame_default() 433 if (!gst_video_frame_copy_plane (out_frame, in_frame, i)) { in gst_cuda_base_transform_transform_frame_default()
|