Home
last modified time | relevance | path

Searched full:pool (Results 1 – 25 of 2316) sorted by relevance

12345678910>>...93

/kernel/linux/linux-5.10/net/xdp/
Dxsk_buff_pool.c11 void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs) in xp_add_xsk() argument
18 spin_lock_irqsave(&pool->xsk_tx_list_lock, flags); in xp_add_xsk()
19 list_add_rcu(&xs->tx_list, &pool->xsk_tx_list); in xp_add_xsk()
20 spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags); in xp_add_xsk()
23 void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs) in xp_del_xsk() argument
30 spin_lock_irqsave(&pool->xsk_tx_list_lock, flags); in xp_del_xsk()
32 spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags); in xp_del_xsk()
35 void xp_destroy(struct xsk_buff_pool *pool) in xp_destroy() argument
37 if (!pool) in xp_destroy()
40 kvfree(pool->heads); in xp_destroy()
[all …]
/kernel/linux/linux-5.10/drivers/infiniband/sw/rxe/
Drxe_pool.c81 static inline const char *pool_name(struct rxe_pool *pool) in pool_name() argument
83 return rxe_type_info[pool->type].name; in pool_name()
86 static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min) in rxe_pool_init_index() argument
91 if ((max - min + 1) < pool->max_elem) { in rxe_pool_init_index()
97 pool->max_index = max; in rxe_pool_init_index()
98 pool->min_index = min; in rxe_pool_init_index()
101 pool->table = kmalloc(size, GFP_KERNEL); in rxe_pool_init_index()
102 if (!pool->table) { in rxe_pool_init_index()
107 pool->table_size = size; in rxe_pool_init_index()
108 bitmap_zero(pool->table, max - min + 1); in rxe_pool_init_index()
[all …]
/kernel/linux/linux-5.10/drivers/net/ethernet/ti/
Dk3-cppi-desc-pool.c2 /* TI K3 CPPI5 descriptors pool API
15 #include "k3-cppi-desc-pool.h"
27 void k3_cppi_desc_pool_destroy(struct k3_cppi_desc_pool *pool) in k3_cppi_desc_pool_destroy() argument
29 if (!pool) in k3_cppi_desc_pool_destroy()
32 WARN(gen_pool_size(pool->gen_pool) != gen_pool_avail(pool->gen_pool), in k3_cppi_desc_pool_destroy()
34 gen_pool_size(pool->gen_pool), in k3_cppi_desc_pool_destroy()
35 gen_pool_avail(pool->gen_pool)); in k3_cppi_desc_pool_destroy()
36 if (pool->cpumem) in k3_cppi_desc_pool_destroy()
37 dma_free_coherent(pool->dev, pool->mem_size, pool->cpumem, in k3_cppi_desc_pool_destroy()
38 pool->dma_addr); in k3_cppi_desc_pool_destroy()
[all …]
/kernel/linux/linux-5.10/mm/
Dmempool.c5 * memory buffer pool support. Such pools are mostly used
25 static void poison_error(mempool_t *pool, void *element, size_t size, in poison_error() argument
28 const int nr = pool->curr_nr; in poison_error()
34 pr_err("Mempool %p size %zu\n", pool, size); in poison_error()
42 static void __check_element(mempool_t *pool, void *element, size_t size) in __check_element() argument
51 poison_error(pool, element, size, i); in __check_element()
58 static void check_element(mempool_t *pool, void *element) in check_element() argument
61 if (pool->free == mempool_free_slab || pool->free == mempool_kfree) { in check_element()
62 __check_element(pool, element, ksize(element)); in check_element()
63 } else if (pool->free == mempool_free_pages) { in check_element()
[all …]
Ddmapool.c3 * DMA Pool allocator
14 * The current design of this allocator is fairly simple. The pool is
42 struct dma_pool { /* the pool */
71 struct dma_pool *pool; in show_pools() local
81 list_for_each_entry(pool, &dev->dma_pools, pools) { in show_pools()
85 spin_lock_irq(&pool->lock); in show_pools()
86 list_for_each_entry(page, &pool->page_list, page_list) { in show_pools()
90 spin_unlock_irq(&pool->lock); in show_pools()
92 /* per-pool info, no real statistics yet */ in show_pools()
94 pool->name, blocks, in show_pools()
[all …]
/kernel/linux/linux-4.19/mm/
Dmempool.c5 * memory buffer pool support. Such pools are mostly used
25 static void poison_error(mempool_t *pool, void *element, size_t size, in poison_error() argument
28 const int nr = pool->curr_nr; in poison_error()
34 pr_err("Mempool %p size %zu\n", pool, size); in poison_error()
42 static void __check_element(mempool_t *pool, void *element, size_t size) in __check_element() argument
51 poison_error(pool, element, size, i); in __check_element()
58 static void check_element(mempool_t *pool, void *element) in check_element() argument
61 if (pool->free == mempool_free_slab || pool->free == mempool_kfree) in check_element()
62 __check_element(pool, element, ksize(element)); in check_element()
65 if (pool->free == mempool_free_pages) { in check_element()
[all …]
Ddmapool.c2 * DMA Pool allocator
17 * The current design of this allocator is fairly simple. The pool is
45 struct dma_pool { /* the pool */
74 struct dma_pool *pool; in show_pools() local
84 list_for_each_entry(pool, &dev->dma_pools, pools) { in show_pools()
88 spin_lock_irq(&pool->lock); in show_pools()
89 list_for_each_entry(page, &pool->page_list, page_list) { in show_pools()
93 spin_unlock_irq(&pool->lock); in show_pools()
95 /* per-pool info, no real statistics yet */ in show_pools()
97 pool->name, blocks, in show_pools()
[all …]
/kernel/linux/linux-5.10/drivers/staging/android/ion/
Dion_page_pool.c3 * ION Memory Allocator page pool helpers
15 static inline struct page *ion_page_pool_alloc_pages(struct ion_page_pool *pool) in ion_page_pool_alloc_pages() argument
19 return alloc_pages(pool->gfp_mask, pool->order); in ion_page_pool_alloc_pages()
22 static void ion_page_pool_free_pages(struct ion_page_pool *pool, in ion_page_pool_free_pages() argument
25 __free_pages(page, pool->order); in ion_page_pool_free_pages()
28 static void ion_page_pool_add(struct ion_page_pool *pool, struct page *page) in ion_page_pool_add() argument
30 mutex_lock(&pool->mutex); in ion_page_pool_add()
32 list_add_tail(&page->lru, &pool->high_items); in ion_page_pool_add()
33 pool->high_count++; in ion_page_pool_add()
35 list_add_tail(&page->lru, &pool->low_items); in ion_page_pool_add()
[all …]
/kernel/linux/linux-4.19/drivers/staging/android/ion/
Dion_page_pool.c15 static inline struct page *ion_page_pool_alloc_pages(struct ion_page_pool *pool) in ion_page_pool_alloc_pages() argument
19 return alloc_pages(pool->gfp_mask, pool->order); in ion_page_pool_alloc_pages()
22 static void ion_page_pool_free_pages(struct ion_page_pool *pool, in ion_page_pool_free_pages() argument
25 __free_pages(page, pool->order); in ion_page_pool_free_pages()
28 static void ion_page_pool_add(struct ion_page_pool *pool, struct page *page) in ion_page_pool_add() argument
30 mutex_lock(&pool->mutex); in ion_page_pool_add()
32 list_add_tail(&page->lru, &pool->high_items); in ion_page_pool_add()
33 pool->high_count++; in ion_page_pool_add()
35 list_add_tail(&page->lru, &pool->low_items); in ion_page_pool_add()
36 pool->low_count++; in ion_page_pool_add()
[all …]
/kernel/linux/linux-5.10/net/core/
Dpage_pool.c24 static int page_pool_init(struct page_pool *pool, in page_pool_init() argument
29 memcpy(&pool->p, params, sizeof(pool->p)); in page_pool_init()
32 if (pool->p.flags & ~(PP_FLAG_ALL)) in page_pool_init()
35 if (pool->p.pool_size) in page_pool_init()
36 ring_qsize = pool->p.pool_size; in page_pool_init()
46 if (pool->p.flags & PP_FLAG_DMA_MAP) { in page_pool_init()
47 if ((pool->p.dma_dir != DMA_FROM_DEVICE) && in page_pool_init()
48 (pool->p.dma_dir != DMA_BIDIRECTIONAL)) in page_pool_init()
52 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) { in page_pool_init()
56 if (!(pool->p.flags & PP_FLAG_DMA_MAP)) in page_pool_init()
[all …]
/kernel/linux/linux-4.19/drivers/infiniband/sw/rxe/
Drxe_pool.c105 static inline const char *pool_name(struct rxe_pool *pool) in pool_name() argument
107 return rxe_type_info[pool->type].name; in pool_name()
110 static inline struct kmem_cache *pool_cache(struct rxe_pool *pool) in pool_cache() argument
112 return rxe_type_info[pool->type].cache; in pool_cache()
161 static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min) in rxe_pool_init_index() argument
166 if ((max - min + 1) < pool->max_elem) { in rxe_pool_init_index()
172 pool->max_index = max; in rxe_pool_init_index()
173 pool->min_index = min; in rxe_pool_init_index()
176 pool->table = kmalloc(size, GFP_KERNEL); in rxe_pool_init_index()
177 if (!pool->table) { in rxe_pool_init_index()
[all …]
/kernel/linux/linux-4.19/drivers/infiniband/core/
Dfmr_pool.c57 * its pool's free_list (if the FMR can be mapped again; that is,
58 * remap_count < pool->max_remaps) or its pool's dirty_list (if the
95 void (*flush_function)(struct ib_fmr_pool *pool,
115 static inline struct ib_pool_fmr *ib_fmr_cache_lookup(struct ib_fmr_pool *pool, in ib_fmr_cache_lookup() argument
123 if (!pool->cache_bucket) in ib_fmr_cache_lookup()
126 bucket = pool->cache_bucket + ib_fmr_hash(*page_list); in ib_fmr_cache_lookup()
138 static void ib_fmr_batch_release(struct ib_fmr_pool *pool) in ib_fmr_batch_release() argument
145 spin_lock_irq(&pool->pool_lock); in ib_fmr_batch_release()
147 list_for_each_entry(fmr, &pool->dirty_list, list) { in ib_fmr_batch_release()
160 list_splice_init(&pool->dirty_list, &unmap_list); in ib_fmr_batch_release()
[all …]
/kernel/linux/linux-5.10/drivers/md/
Ddm-thin.c41 * The block size of the device holding pool data must be
191 * A pool device ties together a metadata device and a data device. It
198 * The pool runs in various modes. Ordered in degraded order for comparisons.
229 struct pool { struct
231 struct dm_target *ti; /* Only set if a pool target is bound */ argument
289 static void metadata_operation_failed(struct pool *pool, const char *op, int r); argument
291 static enum pool_mode get_pool_mode(struct pool *pool) in get_pool_mode() argument
293 return pool->pf.mode; in get_pool_mode()
296 static void notify_of_pool_mode_change(struct pool *pool) in notify_of_pool_mode_change() argument
306 enum pool_mode mode = get_pool_mode(pool); in notify_of_pool_mode_change()
[all …]
/kernel/linux/linux-4.19/drivers/md/
Ddm-thin.c41 * The block size of the device holding pool data must be
191 * A pool device ties together a metadata device and a data device. It
198 * The pool runs in various modes. Ordered in degraded order for comparisons.
229 struct pool { struct
231 struct dm_target *ti; /* Only set if a pool target is bound */ argument
286 static void metadata_operation_failed(struct pool *pool, const char *op, int r); argument
288 static enum pool_mode get_pool_mode(struct pool *pool) in get_pool_mode() argument
290 return pool->pf.mode; in get_pool_mode()
293 static void notify_of_pool_mode_change(struct pool *pool) in notify_of_pool_mode_change() argument
303 enum pool_mode mode = get_pool_mode(pool); in notify_of_pool_mode_change()
[all …]
/kernel/linux/linux-5.10/sound/core/seq/
Dseq_memory.c22 static inline int snd_seq_pool_available(struct snd_seq_pool *pool) in snd_seq_pool_available() argument
24 return pool->total_elements - atomic_read(&pool->counter); in snd_seq_pool_available()
27 static inline int snd_seq_output_ok(struct snd_seq_pool *pool) in snd_seq_output_ok() argument
29 return snd_seq_pool_available(pool) >= pool->room; in snd_seq_output_ok()
163 static inline void free_cell(struct snd_seq_pool *pool, in free_cell() argument
166 cell->next = pool->free; in free_cell()
167 pool->free = cell; in free_cell()
168 atomic_dec(&pool->counter); in free_cell()
174 struct snd_seq_pool *pool; in snd_seq_cell_free() local
178 pool = cell->pool; in snd_seq_cell_free()
[all …]
/kernel/linux/linux-5.10/include/net/
Dxdp_sock_drv.h14 void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries);
15 bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc);
16 void xsk_tx_release(struct xsk_buff_pool *pool);
19 void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool);
20 void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool);
21 void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool);
22 void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool);
23 bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool);
25 static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool) in xsk_pool_get_headroom() argument
27 return XDP_PACKET_HEADROOM + pool->headroom; in xsk_pool_get_headroom()
[all …]
/kernel/linux/linux-4.19/sound/core/seq/
Dseq_memory.c36 static inline int snd_seq_pool_available(struct snd_seq_pool *pool) in snd_seq_pool_available() argument
38 return pool->total_elements - atomic_read(&pool->counter); in snd_seq_pool_available()
41 static inline int snd_seq_output_ok(struct snd_seq_pool *pool) in snd_seq_output_ok() argument
43 return snd_seq_pool_available(pool) >= pool->room; in snd_seq_output_ok()
177 static inline void free_cell(struct snd_seq_pool *pool, in free_cell() argument
180 cell->next = pool->free; in free_cell()
181 pool->free = cell; in free_cell()
182 atomic_dec(&pool->counter); in free_cell()
188 struct snd_seq_pool *pool; in snd_seq_cell_free() local
192 pool = cell->pool; in snd_seq_cell_free()
[all …]
/kernel/linux/linux-5.10/net/ceph/
Dmsgpool.c14 struct ceph_msgpool *pool = arg; in msgpool_alloc() local
17 msg = ceph_msg_new2(pool->type, pool->front_len, pool->max_data_items, in msgpool_alloc()
20 dout("msgpool_alloc %s failed\n", pool->name); in msgpool_alloc()
22 dout("msgpool_alloc %s %p\n", pool->name, msg); in msgpool_alloc()
23 msg->pool = pool; in msgpool_alloc()
30 struct ceph_msgpool *pool = arg; in msgpool_free() local
33 dout("msgpool_release %s %p\n", pool->name, msg); in msgpool_free()
34 msg->pool = NULL; in msgpool_free()
38 int ceph_msgpool_init(struct ceph_msgpool *pool, int type, in ceph_msgpool_init() argument
43 pool->type = type; in ceph_msgpool_init()
[all …]
/kernel/linux/linux-4.19/net/ceph/
Dmsgpool.c14 struct ceph_msgpool *pool = arg; in msgpool_alloc() local
17 msg = ceph_msg_new(pool->type, pool->front_len, gfp_mask, true); in msgpool_alloc()
19 dout("msgpool_alloc %s failed\n", pool->name); in msgpool_alloc()
21 dout("msgpool_alloc %s %p\n", pool->name, msg); in msgpool_alloc()
22 msg->pool = pool; in msgpool_alloc()
29 struct ceph_msgpool *pool = arg; in msgpool_free() local
32 dout("msgpool_release %s %p\n", pool->name, msg); in msgpool_free()
33 msg->pool = NULL; in msgpool_free()
37 int ceph_msgpool_init(struct ceph_msgpool *pool, int type, in ceph_msgpool_init() argument
41 pool->type = type; in ceph_msgpool_init()
[all …]
/kernel/linux/linux-5.10/lib/
Dgenalloc.c16 * available. If new memory is added to the pool a lock has to be
145 * gen_pool_create - create a new special memory pool
147 * @nid: node id of the node the pool structure should be allocated on, or -1
149 * Create a new special memory pool that can be used to manage special purpose
154 struct gen_pool *pool; in gen_pool_create() local
156 pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid); in gen_pool_create()
157 if (pool != NULL) { in gen_pool_create()
158 spin_lock_init(&pool->lock); in gen_pool_create()
159 INIT_LIST_HEAD(&pool->chunks); in gen_pool_create()
160 pool->min_alloc_order = min_alloc_order; in gen_pool_create()
[all …]
/kernel/linux/linux-4.19/lib/
Dgenalloc.c15 * available. If new memory is added to the pool a lock has to be
146 * gen_pool_create - create a new special memory pool
148 * @nid: node id of the node the pool structure should be allocated on, or -1
150 * Create a new special memory pool that can be used to manage special purpose
155 struct gen_pool *pool; in gen_pool_create() local
157 pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid); in gen_pool_create()
158 if (pool != NULL) { in gen_pool_create()
159 spin_lock_init(&pool->lock); in gen_pool_create()
160 INIT_LIST_HEAD(&pool->chunks); in gen_pool_create()
161 pool->min_alloc_order = min_alloc_order; in gen_pool_create()
[all …]
/kernel/linux/linux-5.10/drivers/staging/media/atomisp/pci/runtime/rmgr/src/
Drmgr_vbuf.c31 * @brief VBUF resource pool - refpool
42 * @brief VBUF resource pool - writepool
53 * @brief VBUF resource pool - hmmbufferpool
137 * @brief Initialize the resource pool (host, vbuf)
139 * @param pool The pointer to the pool
141 int ia_css_rmgr_init_vbuf(struct ia_css_rmgr_vbuf_pool *pool) in ia_css_rmgr_init_vbuf() argument
147 assert(pool); in ia_css_rmgr_init_vbuf()
148 if (!pool) in ia_css_rmgr_init_vbuf()
150 /* initialize the recycle pool if used */ in ia_css_rmgr_init_vbuf()
151 if (pool->recycle && pool->size) { in ia_css_rmgr_init_vbuf()
[all …]
/kernel/linux/linux-4.19/drivers/gpu/drm/ttm/
Dttm_page_alloc_dma.c27 * A simple DMA pool losely based on dmapool.c. It has certain advantages
29 * - Pool collects resently freed pages for reuse (and hooks up to
71 * The pool structure. There are up to nine pools:
81 * @type: Type of the pool
83 * used with irqsave/irqrestore variants because pool allocator maybe called
85 * @free_list: Pool of pages that are free to be used. No order requirements.
90 * @nfrees: Stats when pool is shrinking.
91 * @nrefills: Stats when the pool is grown.
93 * @name: Name of the pool.
118 * huge pool
[all …]
/kernel/linux/linux-5.10/drivers/gpu/drm/ttm/
Dttm_page_alloc_dma.c27 * A simple DMA pool losely based on dmapool.c. It has certain advantages
29 * - Pool collects resently freed pages for reuse (and hooks up to
70 * The pool structure. There are up to nine pools:
80 * @type: Type of the pool
82 * used with irqsave/irqrestore variants because pool allocator maybe called
84 * @free_list: Pool of pages that are free to be used. No order requirements.
89 * @nfrees: Stats when pool is shrinking.
90 * @nrefills: Stats when the pool is grown.
92 * @name: Name of the pool.
117 * huge pool
[all …]
/kernel/linux/linux-4.19/net/core/
Dpage_pool.c17 static int page_pool_init(struct page_pool *pool, in page_pool_init() argument
22 memcpy(&pool->p, params, sizeof(pool->p)); in page_pool_init()
25 if (pool->p.flags & ~(PP_FLAG_ALL)) in page_pool_init()
28 if (pool->p.pool_size) in page_pool_init()
29 ring_qsize = pool->p.pool_size; in page_pool_init()
39 if ((pool->p.dma_dir != DMA_FROM_DEVICE) && in page_pool_init()
40 (pool->p.dma_dir != DMA_BIDIRECTIONAL)) in page_pool_init()
43 if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0) in page_pool_init()
51 struct page_pool *pool; in page_pool_create() local
54 pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, params->nid); in page_pool_create()
[all …]

12345678910>>...93