/third_party/gstreamer/gstplugins_good/sys/v4l2/ |
D | gstv4l2bufferpool.c | 7 * gstv4l2bufferpool.c V4L2 buffer pool class 125 gst_v4l2_buffer_pool_copy_buffer (GstV4l2BufferPool * pool, GstBuffer * dest, in gst_v4l2_buffer_pool_copy_buffer() argument 128 const GstVideoFormatInfo *finfo = pool->caps_info.finfo; in gst_v4l2_buffer_pool_copy_buffer() 130 GST_LOG_OBJECT (pool, "copying buffer"); in gst_v4l2_buffer_pool_copy_buffer() 136 GST_DEBUG_OBJECT (pool, "copy video frame"); in gst_v4l2_buffer_pool_copy_buffer() 139 if (!gst_video_frame_map (&src_frame, &pool->caps_info, src, GST_MAP_READ)) in gst_v4l2_buffer_pool_copy_buffer() 142 if (!gst_video_frame_map (&dest_frame, &pool->caps_info, dest, in gst_v4l2_buffer_pool_copy_buffer() 155 GST_DEBUG_OBJECT (pool, "copy raw bytes"); in gst_v4l2_buffer_pool_copy_buffer() 169 GST_CAT_LOG_OBJECT (CAT_PERFORMANCE, pool, "slow copy into buffer %p", dest); in gst_v4l2_buffer_pool_copy_buffer() 175 GST_ERROR_OBJECT (pool, "could not map buffer"); in gst_v4l2_buffer_pool_copy_buffer() [all …]
|
/third_party/gstreamer/gstreamer/gst/ |
D | gsttaskpool.c | 4 * gsttaskpool.c: Pool for streaming threads 25 * @short_description: Pool of GStreamer streaming threads 49 GST_DEBUG_CATEGORY_INIT (taskpool_debug, "taskpool", 0, "Thread pool"); \ 61 default_func (TaskData * tdata, GstTaskPool * pool) in default_func() argument 74 default_prepare (GstTaskPool * pool, GError ** error) in default_prepare() argument 76 GST_OBJECT_LOCK (pool); in default_prepare() 77 pool->pool = g_thread_pool_new ((GFunc) default_func, pool, -1, FALSE, error); in default_prepare() 78 GST_OBJECT_UNLOCK (pool); in default_prepare() 82 default_cleanup (GstTaskPool * pool) in default_cleanup() argument 86 GST_OBJECT_LOCK (pool); in default_cleanup() [all …]
|
D | gstbufferpool.c | 25 * @short_description: Pool for buffers 33 * Once a pool is created, it needs to be configured. A call to 35 * the pool. With gst_buffer_pool_config_set_params() and 37 * allocator can be configured. Other properties can be configured in the pool 38 * depending on the pool implementation. 46 * gst_buffer_pool_set_config() updates the configuration in the pool. This can 49 * After the pool has been configured, it can be activated with 51 * in the pool. 53 * When the pool is active, gst_buffer_pool_acquire_buffer() can be used to 54 * retrieve a buffer from the pool. [all …]
|
/third_party/skia/third_party/externals/microhttpd/src/microhttpd/ |
D | memorypool.c | 22 * @brief memory pool 47 * Handle for a memory pool. Pools are not reentrant and must not be 54 * Pointer to the pool's memory 59 * Size of the pool. 74 * #MHD_NO if pool was malloc'ed, #MHD_YES if mmapped (VirtualAlloc'ed for W32). 81 * Create a memory pool. 83 * @param max maximum size of the pool 89 struct MemoryPool *pool; in MHD_pool_create() local 91 pool = malloc (sizeof (struct MemoryPool)); in MHD_pool_create() 92 if (NULL == pool) in MHD_pool_create() [all …]
|
/third_party/iowow/src/utils/ |
D | iwpool.c | 18 /** Memory pool */ 22 char *heap; /**< Current pool heap ptr */ 29 IWPOOL *pool; in iwpool_create() local 32 pool = malloc(sizeof(*pool)); in iwpool_create() 33 if (!pool) { in iwpool_create() 36 pool->unit = malloc(sizeof(*pool->unit)); in iwpool_create() 37 if (!pool->unit) { in iwpool_create() 40 pool->unit->heap = malloc(siz); in iwpool_create() 41 if (!pool->unit->heap) { in iwpool_create() 44 pool->asiz = siz; in iwpool_create() [all …]
|
D | iwpool.h | 6 * Memory pool implementation. 44 * @brief Creates memory pool and preallocate initial buffer of size `siz` bytes. 48 * @return Pointer to the new pool or `zero` if allocation is failed. 53 * @brief Create empty pool with no preallocated buffer. 54 * @return Pointer to the new pool or `zero` if allocation is failed. 62 * @param pool Pointer to memory pool. 65 IW_EXPORT void *iwpool_alloc(size_t siz, IWPOOL *pool); 72 * @param pool Pointer to memory pool. 75 IW_EXPORT void *iwpool_calloc(size_t siz, IWPOOL *pool); 78 * @brief Copy a given `str` of size `len` into memory pool. [all …]
|
/third_party/skia/tests/ |
D | SkBlockAllocatorTest.cpp | 19 static size_t ScratchBlockSize(SkSBlockAllocator<N>& pool) { in ScratchBlockSize() argument 20 return (size_t) pool->scratchBlockSize(); in ScratchBlockSize() 26 static int block_count(const SkSBlockAllocator<N>& pool) { in block_count() argument 28 for (const Block* b : pool->blocks()) { in block_count() 36 static Block* get_block(SkSBlockAllocator<N>& pool, int blockIndex) { in get_block() argument 39 for (Block* b: pool->blocks()) { in get_block() 55 static size_t total_size(SkSBlockAllocator<N>& pool) { in total_size() argument 56 return pool->totalSize() - BlockAllocatorTestAccess::ScratchBlockSize(pool); in total_size() 60 static size_t add_block(SkSBlockAllocator<N>& pool) { in add_block() argument 61 size_t currentSize = total_size(pool); in add_block() [all …]
|
/third_party/openssl/crypto/rand/ |
D | rand_lib.c | 52 size_t rand_acquire_entropy_from_tsc(RAND_POOL *pool) in rand_acquire_entropy_from_tsc() argument 60 rand_pool_add(pool, &c, 1, 4); in rand_acquire_entropy_from_tsc() 63 return rand_pool_entropy_available(pool); in rand_acquire_entropy_from_tsc() 85 size_t rand_acquire_entropy_from_cpu(RAND_POOL *pool) in rand_acquire_entropy_from_cpu() argument 90 bytes_needed = rand_pool_bytes_needed(pool, 1 /*entropy_factor*/); in rand_acquire_entropy_from_cpu() 92 buffer = rand_pool_add_begin(pool, bytes_needed); in rand_acquire_entropy_from_cpu() 99 rand_pool_add_end(pool, bytes_needed, 8 * bytes_needed); in rand_acquire_entropy_from_cpu() 104 rand_pool_add_end(pool, bytes_needed, 8 * bytes_needed); in rand_acquire_entropy_from_cpu() 107 rand_pool_add_end(pool, 0, 0); in rand_acquire_entropy_from_cpu() 112 return rand_pool_entropy_available(pool); in rand_acquire_entropy_from_cpu() [all …]
|
/third_party/ffmpeg/libavfilter/ |
D | framepool.c | 58 FFFramePool *pool; in ff_frame_pool_video_init() local 64 pool = av_mallocz(sizeof(FFFramePool)); in ff_frame_pool_video_init() 65 if (!pool) in ff_frame_pool_video_init() 68 pool->type = AVMEDIA_TYPE_VIDEO; in ff_frame_pool_video_init() 69 pool->width = width; in ff_frame_pool_video_init() 70 pool->height = height; in ff_frame_pool_video_init() 71 pool->format = format; in ff_frame_pool_video_init() 72 pool->align = align; in ff_frame_pool_video_init() 78 if (!pool->linesize[0]) { in ff_frame_pool_video_init() 80 ret = av_image_fill_linesizes(pool->linesize, pool->format, in ff_frame_pool_video_init() [all …]
|
/third_party/gstreamer/gstplugins_bad/gst-libs/gst/vulkan/ |
D | gstvkcommandpool.c | 31 * @short_description: Vulkan command pool 37 #define GET_PRIV(pool) gst_vulkan_command_pool_get_instance_private (pool) argument 55 "vulkancommandpool", 0, "Vulkan Command Pool")); 60 gst_vulkan_command_pool_init (GstVulkanCommandPool * pool) in gst_vulkan_command_pool_init() argument 62 GstVulkanCommandPoolPrivate *priv = GET_PRIV (pool); in gst_vulkan_command_pool_init() 85 GstVulkanCommandPool *pool = GST_VULKAN_COMMAND_POOL (object); in gst_vulkan_command_pool_finalize() local 86 GstVulkanCommandPoolPrivate *priv = GET_PRIV (pool); in gst_vulkan_command_pool_finalize() 88 gst_vulkan_command_pool_lock (pool); in gst_vulkan_command_pool_finalize() 91 gst_vulkan_command_pool_unlock (pool); in gst_vulkan_command_pool_finalize() 95 ("Destroying a Vulkan command pool that has outstanding buffers!"); in gst_vulkan_command_pool_finalize() [all …]
|
D | gstvkhandlepool.c | 24 * @short_description: Vulkan handle pool 45 "vulkanhandlepool", 0, "Vulkan handle pool")); 48 gst_vulkan_handle_pool_default_alloc (GstVulkanHandlePool * pool, in gst_vulkan_handle_pool_default_alloc() argument 55 gst_vulkan_handle_pool_default_acquire (GstVulkanHandlePool * pool, in gst_vulkan_handle_pool_default_acquire() argument 60 GST_OBJECT_LOCK (pool); in gst_vulkan_handle_pool_default_acquire() 61 if (pool->available->len > 0) { in gst_vulkan_handle_pool_default_acquire() 62 ret = g_ptr_array_remove_index_fast (pool->available, 0); in gst_vulkan_handle_pool_default_acquire() 64 ret = gst_vulkan_handle_pool_alloc (pool, error); in gst_vulkan_handle_pool_default_acquire() 68 g_ptr_array_add (pool->outstanding, ret); in gst_vulkan_handle_pool_default_acquire() 71 if (pool->outstanding->len > GST_VULKAN_HANDLE_POOL_LARGE_OUTSTANDING) in gst_vulkan_handle_pool_default_acquire() [all …]
|
D | gstvkdescriptorpool.c | 30 * @short_description: Vulkan descriptor pool 34 #define GET_PRIV(pool) gst_vulkan_descriptor_pool_get_instance_private (pool) argument 49 "vulkancommandpool", 0, "Vulkan Command Pool")); 54 gst_vulkan_descriptor_pool_init (GstVulkanDescriptorPool * pool) in gst_vulkan_descriptor_pool_init() argument 70 GstVulkanDescriptorPool *pool = GST_VULKAN_DESCRIPTOR_POOL (object); in gst_vulkan_descriptor_pool_finalize() local 72 GstVulkanDescriptorPoolPrivate *priv = GET_PRIV (pool); in gst_vulkan_descriptor_pool_finalize() 77 ("Destroying a Vulkan descriptor pool that has outstanding descriptors!"); in gst_vulkan_descriptor_pool_finalize() 80 if (pool->pool) in gst_vulkan_descriptor_pool_finalize() 81 vkDestroyDescriptorPool (pool->device->device, pool->pool, NULL); in gst_vulkan_descriptor_pool_finalize() 82 pool->pool = VK_NULL_HANDLE; in gst_vulkan_descriptor_pool_finalize() [all …]
|
/third_party/glib/glib/ |
D | gthreadpool.c | 4 * GThreadPool: thread pool implementation. 54 * To create a new thread pool, you use g_thread_pool_new(). 57 * If you want to execute a certain task within a thread pool, 63 * the maximal number of threads for a thread pool, you use 81 * @func: the function to execute in the threads of this pool 82 * @user_data: the user data for the threads of this pool 83 * @exclusive: are all threads exclusive to this pool 85 * The #GThreadPool struct represents a thread pool. It has three 91 GThreadPool pool; member 123 GThreadPool *pool; member [all …]
|
/third_party/gstreamer/gstreamer/tests/check/gst/ |
D | gstbufferpool.c | 30 GstBufferPool *pool = gst_buffer_pool_new (); in create_pool() local 31 GstStructure *conf = gst_buffer_pool_get_config (pool); in create_pool() 35 gst_buffer_pool_set_config (pool, conf); in create_pool() 38 return pool; in create_pool() 53 * back into the buffer pool. */ 64 GstBufferPool *pool = create_pool (10, 0, 0); in GST_START_TEST() local 67 gst_buffer_pool_set_active (pool, TRUE); in GST_START_TEST() 68 gst_buffer_pool_acquire_buffer (pool, &buf, NULL); in GST_START_TEST() 72 gst_buffer_pool_set_active (pool, FALSE); in GST_START_TEST() 73 gst_object_unref (pool); in GST_START_TEST() [all …]
|
/third_party/mesa3d/src/panfrost/vulkan/ |
D | panvk_mempool.c | 32 * In "owned" mode, a single parent owns the entire pool, and the pool owns all 34 * panvk_pool_get_bo_handles. Freeing occurs at the level of an entire pool. 35 * This is useful for streaming uploads, where the batch owns the pool. 37 * In "unowned" mode, the pool is freestanding. It does not track created BOs 44 panvk_pool_alloc_backing(struct panvk_pool *pool, size_t bo_sz) in panvk_pool_alloc_backing() argument 48 /* If there's a free BO in our BO pool, let's pick it. */ in panvk_pool_alloc_backing() 49 if (pool->bo_pool && bo_sz == pool->base.slab_size && in panvk_pool_alloc_backing() 50 util_dynarray_num_elements(&pool->bo_pool->free_bos, struct panfrost_bo *)) { in panvk_pool_alloc_backing() 51 bo = util_dynarray_pop(&pool->bo_pool->free_bos, struct panfrost_bo *); in panvk_pool_alloc_backing() 59 bo = panfrost_bo_create(pool->base.dev, bo_sz, in panvk_pool_alloc_backing() [all …]
|
/third_party/mesa3d/src/gallium/drivers/r600/ |
D | compute_memory_pool.c | 48 static void compute_memory_shadow(struct compute_memory_pool* pool, 51 static void compute_memory_defrag(struct compute_memory_pool *pool, 55 static int compute_memory_promote_item(struct compute_memory_pool *pool, 59 static void compute_memory_move_item(struct compute_memory_pool *pool, 64 static void compute_memory_transfer(struct compute_memory_pool* pool, 70 * Creates a new pool. 75 struct compute_memory_pool* pool = (struct compute_memory_pool*) in compute_memory_pool_new() local 77 if (!pool) in compute_memory_pool_new() 82 pool->screen = rscreen; in compute_memory_pool_new() 83 pool->item_list = (struct list_head *) in compute_memory_pool_new() [all …]
|
/third_party/mesa3d/src/gallium/frontends/nine/ |
D | threadpool.c | 44 struct threadpool *pool = data; in threadpool_worker() local 46 pthread_mutex_lock(&pool->m); in threadpool_worker() 48 while (!pool->shutdown) { in threadpool_worker() 52 while (!pool->workqueue && !pool->shutdown) in threadpool_worker() 53 pthread_cond_wait(&pool->new_work, &pool->m); in threadpool_worker() 55 if (pool->shutdown) in threadpool_worker() 62 task = pool->workqueue; in threadpool_worker() 63 pool->workqueue = task->next; in threadpool_worker() 66 pthread_mutex_unlock(&pool->m); in threadpool_worker() 68 pthread_mutex_lock(&pool->m); in threadpool_worker() [all …]
|
/third_party/ffmpeg/libavutil/ |
D | buffer.c | 249 AVBufferPool *pool = av_mallocz(sizeof(*pool)); in av_buffer_pool_init2() local 250 if (!pool) in av_buffer_pool_init2() 253 ff_mutex_init(&pool->mutex, NULL); in av_buffer_pool_init2() 255 pool->size = size; in av_buffer_pool_init2() 256 pool->opaque = opaque; in av_buffer_pool_init2() 257 pool->alloc2 = alloc; in av_buffer_pool_init2() 258 pool->alloc = av_buffer_alloc; // fallback in av_buffer_pool_init2() 259 pool->pool_free = pool_free; in av_buffer_pool_init2() 261 atomic_init(&pool->refcount, 1); in av_buffer_pool_init2() 263 return pool; in av_buffer_pool_init2() [all …]
|
/third_party/wayland_standard/src/ |
D | wayland-shm.c | 75 struct wl_shm_pool *pool; member 85 shm_pool_finish_resize(struct wl_shm_pool *pool) in shm_pool_finish_resize() argument 89 if (pool->size == pool->new_size) in shm_pool_finish_resize() 92 data = mremap(pool->data, pool->size, pool->new_size, MREMAP_MAYMOVE); in shm_pool_finish_resize() 94 wl_resource_post_error(pool->resource, in shm_pool_finish_resize() 100 pool->data = data; in shm_pool_finish_resize() 101 pool->size = pool->new_size; in shm_pool_finish_resize() 105 shm_pool_unref(struct wl_shm_pool *pool, bool external) in shm_pool_unref() argument 108 pool->external_refcount--; in shm_pool_unref() 109 if (pool->external_refcount == 0) in shm_pool_unref() [all …]
|
/third_party/mesa3d/src/util/ |
D | slab.c | 48 * - a pointer to the child pool to which this element belongs, or 62 /* Next page in the same child pool. */ 83 * pool has been destroyed). Mark the element as freed and free the whole page 99 * Create a parent pool for the allocation of same-sized objects. 122 * Create a child pool linked to the given parent. 124 void slab_create_child(struct slab_child_pool *pool, in slab_create_child() argument 127 pool->parent = parent; in slab_create_child() 128 pool->pages = NULL; in slab_create_child() 129 pool->free = NULL; in slab_create_child() 130 pool->migrated = NULL; in slab_create_child() [all …]
|
/third_party/vk-gl-cts/framework/delibs/depool/ |
D | deMemPool.c | 2 * drawElements Memory Pool Library 21 * \brief Memory pool management. 48 * Represent a page of memory allocate by a memory pool. 69 * \brief Memory pool. 71 * A pool of memory from which individual memory allocations can be made. 73 * but rather all of the memory allocated from a pool is freed when the pool 76 * The pools can be arranged into a hierarchy. If a pool with children is 78 * the pool itself. 82 * creating the root pool with the deMemPool_createFailingRoot() function. 92 deMemPool* firstChild; /*!< Pointer to first child pool in linked list. */ [all …]
|
/third_party/mesa3d/src/gallium/drivers/llvmpipe/ |
D | lp_cs_tpool.c | 27 * compute shader thread pool. 38 struct lp_cs_tpool *pool = data; in lp_cs_tpool_worker() local 42 mtx_lock(&pool->m); in lp_cs_tpool_worker() 44 while (!pool->shutdown) { in lp_cs_tpool_worker() 48 while (list_is_empty(&pool->workqueue) && !pool->shutdown) in lp_cs_tpool_worker() 49 cnd_wait(&pool->new_work, &pool->m); in lp_cs_tpool_worker() 51 if (pool->shutdown) in lp_cs_tpool_worker() 54 task = list_first_entry(&pool->workqueue, struct lp_cs_tpool_task, in lp_cs_tpool_worker() 72 mtx_unlock(&pool->m); in lp_cs_tpool_worker() 76 mtx_lock(&pool->m); in lp_cs_tpool_worker() [all …]
|
/third_party/mesa3d/src/broadcom/vulkan/ |
D | v3dv_query.c | 38 struct v3dv_query_pool *pool = in v3dv_CreateQueryPool() local 39 vk_object_zalloc(&device->vk, pAllocator, sizeof(*pool), in v3dv_CreateQueryPool() 41 if (pool == NULL) in v3dv_CreateQueryPool() 44 pool->query_type = pCreateInfo->queryType; in v3dv_CreateQueryPool() 45 pool->query_count = pCreateInfo->queryCount; in v3dv_CreateQueryPool() 49 const uint32_t pool_bytes = sizeof(struct v3dv_query) * pool->query_count; in v3dv_CreateQueryPool() 50 pool->queries = vk_alloc2(&device->vk.alloc, pAllocator, pool_bytes, 8, in v3dv_CreateQueryPool() 52 if (pool->queries == NULL) { in v3dv_CreateQueryPool() 57 if (pool->query_type == VK_QUERY_TYPE_OCCLUSION) { in v3dv_CreateQueryPool() 62 const uint32_t query_groups = DIV_ROUND_UP(pool->query_count, 16); in v3dv_CreateQueryPool() [all …]
|
/third_party/mesa3d/src/gallium/drivers/panfrost/ |
D | pan_mempool.c | 32 * In "owned" mode, a single parent owns the entire pool, and the pool owns all 34 * panfrost_pool_get_bo_handles. Freeing occurs at the level of an entire pool. 35 * This is useful for streaming uploads, where the batch owns the pool. 37 * In "unowned" mode, the pool is freestanding. It does not track created BOs 44 panfrost_pool_alloc_backing(struct panfrost_pool *pool, size_t bo_sz) in panfrost_pool_alloc_backing() argument 52 struct panfrost_bo *bo = panfrost_bo_create(pool->base.dev, bo_sz, in panfrost_pool_alloc_backing() 53 pool->base.create_flags, pool->base.label); in panfrost_pool_alloc_backing() 55 if (pool->owned) in panfrost_pool_alloc_backing() 56 util_dynarray_append(&pool->bos, struct panfrost_bo *, bo); in panfrost_pool_alloc_backing() 58 panfrost_bo_unreference(pool->transient_bo); in panfrost_pool_alloc_backing() [all …]
|
/third_party/mesa3d/src/intel/vulkan/ |
D | anv_allocator.c | 84 * At the next level we can use various sub-allocators. The state pool is a 85 * pool of smaller, fixed size objects, which operates much like the block 86 * pool. It uses a free list for freeing objects, but when it runs out of 87 * space it just allocates a new block from the block pool. This allocator is 192 /* Assert that we only ever grow the pool */ in anv_state_table_expand_range() 205 /* Just leak the old map until we destroy the pool. We can't munmap it in anv_state_table_expand_range() 236 /* The block pool is always initialized to a nonzero size and this function in anv_state_table_grow() 297 /* We allocated the first block outside the pool so we have to grow in anv_state_table_add() 298 * the pool. pool_state->next acts a mutex: threads who try to in anv_state_table_add() 364 anv_block_pool_expand_range(struct anv_block_pool *pool, [all …]
|