• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* GStreamer
2  * Copyright (C) <2018-2019> Seungha Yang <seungha.yang@navercorp.com>
3  *
4  * This library is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Library General Public
6  * License as published by the Free Software Foundation; either
7  * version 2 of the License, or (at your option) any later version.
8  *
9  * This library is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12  * Library General Public License for more details.
13  *
14  * You should have received a copy of the GNU Library General Public
15  * License along with this library; if not, write to the
16  * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
17  * Boston, MA 02110-1301, USA.
18  */
19 
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23 
24 #include "gstcudautils.h"
25 #include "gstcudacontext.h"
26 
27 #ifdef HAVE_NVCODEC_GST_GL
28 #include <gst/gl/gl.h>
29 #include <gst/gl/gstglfuncs.h>
30 #endif
31 
32 GST_DEBUG_CATEGORY_STATIC (gst_cuda_utils_debug);
33 #define GST_CAT_DEFAULT gst_cuda_utils_debug
34 GST_DEBUG_CATEGORY_STATIC (GST_CAT_CONTEXT);
35 
36 static void
_init_debug(void)37 _init_debug (void)
38 {
39   static gsize once_init = 0;
40 
41   if (g_once_init_enter (&once_init)) {
42 
43     GST_DEBUG_CATEGORY_INIT (gst_cuda_utils_debug, "cudautils", 0,
44         "CUDA utils");
45     GST_DEBUG_CATEGORY_GET (GST_CAT_CONTEXT, "GST_CONTEXT");
46     g_once_init_leave (&once_init, 1);
47   }
48 }
49 
50 static gboolean
pad_query(const GValue * item,GValue * value,gpointer user_data)51 pad_query (const GValue * item, GValue * value, gpointer user_data)
52 {
53   GstPad *pad = g_value_get_object (item);
54   GstQuery *query = user_data;
55   gboolean res;
56 
57   res = gst_pad_peer_query (pad, query);
58 
59   if (res) {
60     g_value_set_boolean (value, TRUE);
61     return FALSE;
62   }
63 
64   GST_CAT_INFO_OBJECT (GST_CAT_CONTEXT, pad, "pad peer query failed");
65   return TRUE;
66 }
67 
68 static gboolean
run_query(GstElement * element,GstQuery * query,GstPadDirection direction)69 run_query (GstElement * element, GstQuery * query, GstPadDirection direction)
70 {
71   GstIterator *it;
72   GstIteratorFoldFunction func = pad_query;
73   GValue res = { 0 };
74 
75   g_value_init (&res, G_TYPE_BOOLEAN);
76   g_value_set_boolean (&res, FALSE);
77 
78   /* Ask neighbor */
79   if (direction == GST_PAD_SRC)
80     it = gst_element_iterate_src_pads (element);
81   else
82     it = gst_element_iterate_sink_pads (element);
83 
84   while (gst_iterator_fold (it, func, &res, query) == GST_ITERATOR_RESYNC)
85     gst_iterator_resync (it);
86 
87   gst_iterator_free (it);
88 
89   return g_value_get_boolean (&res);
90 }
91 
92 static void
find_cuda_context(GstElement * element,GstCudaContext ** cuda_ctx)93 find_cuda_context (GstElement * element, GstCudaContext ** cuda_ctx)
94 {
95   GstQuery *query;
96   GstContext *ctxt;
97 
98   /*  1) Query downstream with GST_QUERY_CONTEXT for the context and
99    *      check if upstream already has a context of the specific type
100    *  2) Query upstream as above.
101    */
102   query = gst_query_new_context (GST_CUDA_CONTEXT_TYPE);
103   if (run_query (element, query, GST_PAD_SRC)) {
104     gst_query_parse_context (query, &ctxt);
105     GST_CAT_INFO_OBJECT (GST_CAT_CONTEXT, element,
106         "found context (%p) in downstream query", ctxt);
107     gst_element_set_context (element, ctxt);
108   }
109 
110   /* although we found cuda context above, the element does not want
111    * to use the context. Then try to find from the other direction */
112   if (*cuda_ctx == NULL && run_query (element, query, GST_PAD_SINK)) {
113     gst_query_parse_context (query, &ctxt);
114     GST_CAT_INFO_OBJECT (GST_CAT_CONTEXT, element,
115         "found context (%p) in upstream query", ctxt);
116     gst_element_set_context (element, ctxt);
117   }
118 
119   if (*cuda_ctx == NULL) {
120     /* 3) Post a GST_MESSAGE_NEED_CONTEXT message on the bus with
121      *    the required context type and afterwards check if a
122      *    usable context was set now. The message could
123      *    be handled by the parent bins of the element and the
124      *    application.
125      */
126     GstMessage *msg;
127 
128     GST_CAT_INFO_OBJECT (GST_CAT_CONTEXT, element,
129         "posting need context message");
130     msg = gst_message_new_need_context (GST_OBJECT_CAST (element),
131         GST_CUDA_CONTEXT_TYPE);
132     gst_element_post_message (element, msg);
133   }
134 
135   /*
136    * Whomever responds to the need-context message performs a
137    * GstElement::set_context() with the required context in which the element
138    * is required to update the cuda_ctx or call gst_cuda_handle_set_context().
139    */
140 
141   gst_query_unref (query);
142 }
143 
144 static void
context_set_cuda_context(GstContext * context,GstCudaContext * cuda_ctx)145 context_set_cuda_context (GstContext * context, GstCudaContext * cuda_ctx)
146 {
147   GstStructure *s;
148   gint device_id;
149 
150   g_return_if_fail (context != NULL);
151 
152   g_object_get (G_OBJECT (cuda_ctx), "cuda-device-id", &device_id, NULL);
153 
154   GST_CAT_LOG (GST_CAT_CONTEXT,
155       "setting GstCudaContext(%" GST_PTR_FORMAT
156       ") with cuda-device-id %d on context(%" GST_PTR_FORMAT ")",
157       cuda_ctx, device_id, context);
158 
159   s = gst_context_writable_structure (context);
160   gst_structure_set (s, GST_CUDA_CONTEXT_TYPE, GST_TYPE_CUDA_CONTEXT,
161       cuda_ctx, "cuda-device-id", G_TYPE_INT, device_id, NULL);
162 }
163 
164 /**
165  * gst_cuda_ensure_element_context:
166  * @element: the #GstElement running the query
167  * @device_id: preferred device-id, pass device_id >=0 when
168  *             the device_id explicitly required. Otherwise, set -1.
169  * @cuda_ctx: (inout): the resulting #GstCudaContext
170  *
171  * Perform the steps necessary for retrieving a #GstCudaContext from the
172  * surrounding elements or from the application using the #GstContext mechanism.
173  *
174  * If the content of @cuda_ctx is not %NULL, then no #GstContext query is
175  * necessary for #GstCudaContext.
176  *
177  * Returns: whether a #GstCudaContext exists in @cuda_ctx
178  */
179 gboolean
gst_cuda_ensure_element_context(GstElement * element,gint device_id,GstCudaContext ** cuda_ctx)180 gst_cuda_ensure_element_context (GstElement * element, gint device_id,
181     GstCudaContext ** cuda_ctx)
182 {
183   g_return_val_if_fail (element != NULL, FALSE);
184   g_return_val_if_fail (cuda_ctx != NULL, FALSE);
185 
186   _init_debug ();
187 
188   if (*cuda_ctx)
189     return TRUE;
190 
191   find_cuda_context (element, cuda_ctx);
192   if (*cuda_ctx)
193     return TRUE;
194 
195   /* No available CUDA context in pipeline, create new one here */
196   *cuda_ctx = gst_cuda_context_new (device_id);
197 
198   if (*cuda_ctx == NULL) {
199     GST_CAT_ERROR_OBJECT (GST_CAT_CONTEXT, element,
200         "Failed to create CUDA context with device-id %d", device_id);
201     return FALSE;
202   } else {
203     GstContext *context;
204     GstMessage *msg;
205 
206     /* Propagate new CUDA context */
207 
208     context = gst_context_new (GST_CUDA_CONTEXT_TYPE, TRUE);
209     context_set_cuda_context (context, *cuda_ctx);
210 
211     gst_element_set_context (element, context);
212 
213     GST_CAT_INFO_OBJECT (GST_CAT_CONTEXT, element,
214         "posting have context (%p) message with CUDA context (%p)",
215         context, *cuda_ctx);
216     msg = gst_message_new_have_context (GST_OBJECT_CAST (element), context);
217     gst_element_post_message (GST_ELEMENT_CAST (element), msg);
218   }
219 
220   return TRUE;
221 }
222 
223 /**
224  * gst_cuda_handle_set_context:
225  * @element: a #GstElement
226  * @context: a #GstContext
227  * @device_id: preferred device-id, pass device_id >=0 when
228  *             the device_id explicitly required. Otherwise, set -1.
229  * @cuda_ctx: (inout) (transfer full): location of a #GstCudaContext
230  *
231  * Helper function for implementing #GstElementClass.set_context() in
232  * CUDA capable elements.
233  *
234  * Retrieves the #GstCudaContext in @context and places the result in @cuda_ctx.
235  *
236  * Returns: whether the @cuda_ctx could be set successfully
237  */
238 gboolean
gst_cuda_handle_set_context(GstElement * element,GstContext * context,gint device_id,GstCudaContext ** cuda_ctx)239 gst_cuda_handle_set_context (GstElement * element,
240     GstContext * context, gint device_id, GstCudaContext ** cuda_ctx)
241 {
242   const gchar *context_type;
243 
244   g_return_val_if_fail (element != NULL, FALSE);
245   g_return_val_if_fail (cuda_ctx != NULL, FALSE);
246 
247   _init_debug ();
248 
249   if (!context)
250     return FALSE;
251 
252   context_type = gst_context_get_context_type (context);
253   if (g_strcmp0 (context_type, GST_CUDA_CONTEXT_TYPE) == 0) {
254     const GstStructure *str;
255     GstCudaContext *other_ctx = NULL;
256     gint other_device_id = 0;
257 
258     /* If we had context already, will not replace it */
259     if (*cuda_ctx)
260       return TRUE;
261 
262     str = gst_context_get_structure (context);
263     if (gst_structure_get (str, GST_CUDA_CONTEXT_TYPE, GST_TYPE_CUDA_CONTEXT,
264             &other_ctx, NULL)) {
265       g_object_get (other_ctx, "cuda-device-id", &other_device_id, NULL);
266 
267       if (device_id == -1 || other_device_id == device_id) {
268         GST_CAT_DEBUG_OBJECT (GST_CAT_CONTEXT, element, "Found CUDA context");
269         *cuda_ctx = other_ctx;
270 
271         return TRUE;
272       }
273 
274       gst_object_unref (other_ctx);
275     }
276   }
277 
278   return FALSE;
279 }
280 
281 /**
282  * gst_cuda_handle_context_query:
283  * @element: a #GstElement
284  * @query: a #GstQuery of type %GST_QUERY_CONTEXT
285  * @cuda_ctx: (transfer none) (nullable): a #GstCudaContext
286  *
287  * Returns: Whether the @query was successfully responded to from the passed
288  *          @context.
289  */
290 gboolean
gst_cuda_handle_context_query(GstElement * element,GstQuery * query,GstCudaContext * cuda_ctx)291 gst_cuda_handle_context_query (GstElement * element,
292     GstQuery * query, GstCudaContext * cuda_ctx)
293 {
294   const gchar *context_type;
295   GstContext *context, *old_context;
296 
297   g_return_val_if_fail (GST_IS_ELEMENT (element), FALSE);
298   g_return_val_if_fail (GST_IS_QUERY (query), FALSE);
299   g_return_val_if_fail (cuda_ctx == NULL
300       || GST_IS_CUDA_CONTEXT (cuda_ctx), FALSE);
301 
302   _init_debug ();
303 
304   GST_CAT_LOG_OBJECT (GST_CAT_CONTEXT, element,
305       "handle context query %" GST_PTR_FORMAT, query);
306   gst_query_parse_context_type (query, &context_type);
307 
308   if (cuda_ctx && g_strcmp0 (context_type, GST_CUDA_CONTEXT_TYPE) == 0) {
309     gst_query_parse_context (query, &old_context);
310 
311     if (old_context)
312       context = gst_context_copy (old_context);
313     else
314       context = gst_context_new (GST_CUDA_CONTEXT_TYPE, TRUE);
315 
316     context_set_cuda_context (context, cuda_ctx);
317     gst_query_set_context (query, context);
318     gst_context_unref (context);
319     GST_CAT_DEBUG_OBJECT (GST_CAT_CONTEXT, element,
320         "successfully set %" GST_PTR_FORMAT " on %" GST_PTR_FORMAT, cuda_ctx,
321         query);
322 
323     return TRUE;
324   }
325 
326   return FALSE;
327 }
328 
329 /**
330  * gst_context_new_cuda_context:
331  * @cuda_ctx: (transfer none) a #GstCudaContext
332  *
333  * Returns: (transfer full) (nullable): a new #GstContext embedding the @cuda_ctx
334  * or %NULL
335  */
336 GstContext *
gst_context_new_cuda_context(GstCudaContext * cuda_ctx)337 gst_context_new_cuda_context (GstCudaContext * cuda_ctx)
338 {
339   GstContext *context;
340 
341   g_return_val_if_fail (GST_IS_CUDA_CONTEXT (cuda_ctx), NULL);
342 
343   context = gst_context_new (GST_CUDA_CONTEXT_TYPE, TRUE);
344   context_set_cuda_context (context, cuda_ctx);
345 
346   return context;
347 }
348 
349 static const gchar *gst_cuda_quark_strings[] =
350     { "GstCudaQuarkGraphicsResource" };
351 
352 static GQuark gst_cuda_quark_table[GST_CUDA_QUARK_MAX];
353 
354 static void
init_cuda_quark_once(void)355 init_cuda_quark_once (void)
356 {
357   static gsize once_init = 0;
358 
359   if (g_once_init_enter (&once_init)) {
360     gint i;
361 
362     for (i = 0; i < GST_CUDA_QUARK_MAX; i++)
363       gst_cuda_quark_table[i] =
364           g_quark_from_static_string (gst_cuda_quark_strings[i]);
365 
366     g_once_init_leave (&once_init, 1);
367   }
368 }
369 
370 /**
371  * gst_cuda_quark_from_id: (skip)
372  * @id: a #GstCudaQuarkId
373  *
374  * Returns: the GQuark for given @id or 0 if @id is unknown value
375  */
376 GQuark
gst_cuda_quark_from_id(GstCudaQuarkId id)377 gst_cuda_quark_from_id (GstCudaQuarkId id)
378 {
379   g_return_val_if_fail (id < GST_CUDA_QUARK_MAX, 0);
380 
381   init_cuda_quark_once ();
382   _init_debug ();
383 
384   return gst_cuda_quark_table[id];
385 }
386 
387 /**
388  * gst_cuda_graphics_resource_new: (skip)
389  * @context: (transfer none): a #GstCudaContext
390  * @graphics_context: (transfer none) (nullable): a graphics API specific context object
391  * @type: a #GstCudaGraphicsResourceType of resource registration
392  *
393  * Create new #GstCudaGraphicsResource with given @context and @type
394  *
395  * Returns: a new #GstCudaGraphicsResource.
396  * Free with gst_cuda_graphics_resource_free
397  */
398 GstCudaGraphicsResource *
gst_cuda_graphics_resource_new(GstCudaContext * context,GstObject * graphics_context,GstCudaGraphicsResourceType type)399 gst_cuda_graphics_resource_new (GstCudaContext *
400     context, GstObject * graphics_context, GstCudaGraphicsResourceType type)
401 {
402   GstCudaGraphicsResource *resource;
403 
404   g_return_val_if_fail (GST_IS_CUDA_CONTEXT (context), NULL);
405 
406   _init_debug ();
407 
408   resource = g_new0 (GstCudaGraphicsResource, 1);
409   resource->cuda_context = gst_object_ref (context);
410   if (graphics_context)
411     resource->graphics_context = gst_object_ref (graphics_context);
412 
413   return resource;
414 }
415 
416 /**
417  * gst_cuda_graphics_resource_register_gl_buffer: (skip)
418  * @resource a #GstCudaGraphicsResource
419  * @buffer: a GL buffer object
420  * @flags: a #CUgraphicsRegisterFlags
421  *
422  * Register the @buffer for access by CUDA.
423  * Must be called from the gl context thread with current cuda context was
424  * pushed on the current thread
425  *
426  * Returns: whether @buffer was registered or not
427  */
428 gboolean
gst_cuda_graphics_resource_register_gl_buffer(GstCudaGraphicsResource * resource,guint buffer,CUgraphicsRegisterFlags flags)429 gst_cuda_graphics_resource_register_gl_buffer (GstCudaGraphicsResource *
430     resource, guint buffer, CUgraphicsRegisterFlags flags)
431 {
432   CUresult cuda_ret;
433 
434   g_return_val_if_fail (resource != NULL, FALSE);
435   g_return_val_if_fail (resource->registered == FALSE, FALSE);
436 
437   _init_debug ();
438 
439   cuda_ret = CuGraphicsGLRegisterBuffer (&resource->resource, buffer, flags);
440 
441   if (!gst_cuda_result (cuda_ret))
442     return FALSE;
443 
444   resource->registered = TRUE;
445   resource->type = GST_CUDA_GRAPHICS_RESOURCE_GL_BUFFER;
446   resource->flags = flags;
447 
448   return TRUE;
449 }
450 
451 /**
452  * gst_cuda_graphics_resource_unregister: (skip)
453  * @resource: a #GstCudaGraphicsResource
454  *
455  * Unregister previously registered resource.
456  * For GL resource, this method must be called from gl context thread.
457  * Also, current cuda context should be pushed on the current thread
458  * before calling this method.
459  */
460 void
gst_cuda_graphics_resource_unregister(GstCudaGraphicsResource * resource)461 gst_cuda_graphics_resource_unregister (GstCudaGraphicsResource * resource)
462 {
463   g_return_if_fail (resource != NULL);
464 
465   _init_debug ();
466 
467   if (!resource->registered)
468     return;
469 
470   gst_cuda_result (CuGraphicsUnregisterResource (resource->resource));
471   resource->resource = NULL;
472   resource->registered = FALSE;
473 
474   return;
475 }
476 
477 /**
478  * gst_cuda_graphics_resource_map: (skip)
479  * @resource: a #GstCudaGraphicsResource
480  * @stream: a #CUstream
481  * @flags: a #CUgraphicsMapResourceFlags
482  *
483  * Map previously registered resource with map flags
484  *
485  * Returns: the #CUgraphicsResource if successful or %NULL when failed
486  */
487 CUgraphicsResource
gst_cuda_graphics_resource_map(GstCudaGraphicsResource * resource,CUstream stream,CUgraphicsMapResourceFlags flags)488 gst_cuda_graphics_resource_map (GstCudaGraphicsResource * resource,
489     CUstream stream, CUgraphicsMapResourceFlags flags)
490 {
491   CUresult cuda_ret;
492 
493   g_return_val_if_fail (resource != NULL, NULL);
494   g_return_val_if_fail (resource->registered != FALSE, NULL);
495 
496   _init_debug ();
497 
498   cuda_ret = CuGraphicsResourceSetMapFlags (resource->resource, flags);
499   if (!gst_cuda_result (cuda_ret))
500     return NULL;
501 
502   cuda_ret = CuGraphicsMapResources (1, &resource->resource, stream);
503   if (!gst_cuda_result (cuda_ret))
504     return NULL;
505 
506   resource->mapped = TRUE;
507 
508   return resource->resource;
509 }
510 
511 /**
512  * gst_cuda_graphics_resource_unmap: (skip)
513  * @resource: a #GstCudaGraphicsResource
514  * @stream: a #CUstream
515  *
516  * Unmap previously mapped resource
517  */
518 void
gst_cuda_graphics_resource_unmap(GstCudaGraphicsResource * resource,CUstream stream)519 gst_cuda_graphics_resource_unmap (GstCudaGraphicsResource * resource,
520     CUstream stream)
521 {
522   g_return_if_fail (resource != NULL);
523   g_return_if_fail (resource->registered != FALSE);
524 
525   _init_debug ();
526 
527   if (!resource->mapped)
528     return;
529 
530   gst_cuda_result (CuGraphicsUnmapResources (1, &resource->resource, stream));
531 
532   resource->mapped = FALSE;
533 }
534 
535 #ifdef HAVE_NVCODEC_GST_GL
536 static void
unregister_resource_from_gl_thread(GstGLContext * gl_context,GstCudaGraphicsResource * resource)537 unregister_resource_from_gl_thread (GstGLContext * gl_context,
538     GstCudaGraphicsResource * resource)
539 {
540   GstCudaContext *cuda_context = resource->cuda_context;
541 
542   if (!gst_cuda_context_push (cuda_context)) {
543     GST_WARNING_OBJECT (cuda_context, "failed to push CUDA context");
544     return;
545   }
546 
547   gst_cuda_graphics_resource_unregister (resource);
548 
549   if (!gst_cuda_context_pop (NULL)) {
550     GST_WARNING_OBJECT (cuda_context, "failed to pop CUDA context");
551   }
552 }
553 #endif
554 
555 /**
556  * gst_cuda_graphics_resource_free: (skip)
557  * @resource: a #GstCudaGraphicsResource
558  *
559  * Free @resource
560  */
561 void
gst_cuda_graphics_resource_free(GstCudaGraphicsResource * resource)562 gst_cuda_graphics_resource_free (GstCudaGraphicsResource * resource)
563 {
564   g_return_if_fail (resource != NULL);
565 
566   if (resource->registered) {
567 #ifdef HAVE_NVCODEC_GST_GL
568     if (resource->type == GST_CUDA_GRAPHICS_RESOURCE_GL_BUFFER) {
569       gst_gl_context_thread_add ((GstGLContext *) resource->graphics_context,
570           (GstGLContextThreadFunc) unregister_resource_from_gl_thread,
571           resource);
572     } else
573 #endif
574     {
575       /* FIXME: currently opengl only */
576       g_assert_not_reached ();
577     }
578   }
579 
580   gst_object_unref (resource->cuda_context);
581   if (resource->graphics_context)
582     gst_object_unref (resource->graphics_context);
583   g_free (resource);
584 }
585