1 /* 2 * This file is part of FFmpeg. 3 * 4 * FFmpeg is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU Lesser General Public 6 * License as published by the Free Software Foundation; either 7 * version 2.1 of the License, or (at your option) any later version. 8 * 9 * FFmpeg is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 12 * Lesser General Public License for more details. 13 * 14 * You should have received a copy of the GNU Lesser General Public 15 * License along with FFmpeg; if not, write to the Free Software 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 19 /** 20 * @file 21 * DNN common functions different backends. 22 */ 23 24 #ifndef AVFILTER_DNN_DNN_BACKEND_COMMON_H 25 #define AVFILTER_DNN_DNN_BACKEND_COMMON_H 26 27 #include "queue.h" 28 #include "../dnn_interface.h" 29 #include "libavutil/thread.h" 30 31 #define DNN_BACKEND_COMMON_OPTIONS \ 32 { "nireq", "number of request", OFFSET(options.nireq), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FLAGS }, \ 33 { "async", "use DNN async inference", OFFSET(options.async), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, FLAGS }, 34 35 // one task for one function call from dnn interface 36 typedef struct TaskItem { 37 void *model; // model for the backend 38 AVFrame *in_frame; 39 AVFrame *out_frame; 40 const char *input_name; 41 const char **output_names; 42 uint8_t async; 43 uint8_t do_ioproc; 44 uint32_t nb_output; 45 uint32_t inference_todo; 46 uint32_t inference_done; 47 } TaskItem; 48 49 // one task might have multiple inferences 50 typedef struct LastLevelTaskItem { 51 TaskItem *task; 52 uint32_t bbox_index; 53 } LastLevelTaskItem; 54 55 /** 56 * Common Async Execution Mechanism for the DNN Backends. 57 */ 58 typedef struct DNNAsyncExecModule { 59 /** 60 * Synchronous inference function for the backend 61 * with corresponding request item as the argument. 62 */ 63 int (*start_inference)(void *request); 64 65 /** 66 * Completion Callback for the backend. 67 * Expected argument type of callback must match that 68 * of the inference function. 69 */ 70 void (*callback)(void *args); 71 72 /** 73 * Argument for the execution functions. 74 * i.e. Request item for the backend. 75 */ 76 void *args; 77 #if HAVE_PTHREAD_CANCEL 78 pthread_t thread_id; 79 pthread_attr_t thread_attr; 80 #endif 81 } DNNAsyncExecModule; 82 83 int ff_check_exec_params(void *ctx, DNNBackendType backend, DNNFunctionType func_type, DNNExecBaseParams *exec_params); 84 85 /** 86 * Fill the Task for Backend Execution. It should be called after 87 * checking execution parameters using ff_check_exec_params. 88 * 89 * @param task pointer to the allocated task 90 * @param exec_param pointer to execution parameters 91 * @param backend_model void pointer to the backend model 92 * @param async flag for async execution. Must be 0 or 1 93 * @param do_ioproc flag for IO processing. Must be 0 or 1 94 * 95 * @returns 0 if successful or error code otherwise. 96 */ 97 int ff_dnn_fill_task(TaskItem *task, DNNExecBaseParams *exec_params, void *backend_model, int async, int do_ioproc); 98 99 /** 100 * Join the Async Execution thread and set module pointers to NULL. 101 * 102 * @param async_module pointer to DNNAsyncExecModule module 103 * 104 * @returns 0 if successful or error code otherwise. 105 */ 106 int ff_dnn_async_module_cleanup(DNNAsyncExecModule *async_module); 107 108 /** 109 * Start asynchronous inference routine for the TensorFlow 110 * model on a detached thread. It calls the completion callback 111 * after the inference completes. Completion callback and inference 112 * function must be set before calling this function. 113 * 114 * If POSIX threads aren't supported, the execution rolls back 115 * to synchronous mode, calling completion callback after inference. 116 * 117 * @param ctx pointer to the backend context 118 * @param async_module pointer to DNNAsyncExecModule module 119 * 120 * @returns 0 on the start of async inference or error code otherwise. 121 */ 122 int ff_dnn_start_inference_async(void *ctx, DNNAsyncExecModule *async_module); 123 124 /** 125 * Extract input and output frame from the Task Queue after 126 * asynchronous inference. 127 * 128 * @param task_queue pointer to the task queue of the backend 129 * @param in double pointer to the input frame 130 * @param out double pointer to the output frame 131 * 132 * @retval DAST_EMPTY_QUEUE if task queue is empty 133 * @retval DAST_NOT_READY if inference not completed yet. 134 * @retval DAST_SUCCESS if result successfully extracted 135 */ 136 DNNAsyncStatusType ff_dnn_get_result_common(Queue *task_queue, AVFrame **in, AVFrame **out); 137 138 /** 139 * Allocate input and output frames and fill the Task 140 * with execution parameters. 141 * 142 * @param task pointer to the allocated task 143 * @param exec_params pointer to execution parameters 144 * @param backend_model void pointer to the backend model 145 * @param input_height height of input frame 146 * @param input_width width of input frame 147 * @param ctx pointer to the backend context 148 * 149 * @returns 0 if successful or error code otherwise. 150 */ 151 int ff_dnn_fill_gettingoutput_task(TaskItem *task, DNNExecBaseParams *exec_params, void *backend_model, int input_height, int input_width, void *ctx); 152 153 #endif 154