/net/core/ |
D | page_pool.c | 33 #define alloc_stat_inc(pool, __stat) (pool->alloc_stats.__stat++) argument 35 #define recycle_stat_inc(pool, __stat) \ argument 41 #define recycle_stat_add(pool, __stat, val) \ argument 72 bool page_pool_get_stats(struct page_pool *pool, in page_pool_get_stats() 143 #define alloc_stat_inc(pool, __stat) argument 144 #define recycle_stat_inc(pool, __stat) argument 145 #define recycle_stat_add(pool, __stat, val) argument 148 static bool page_pool_producer_lock(struct page_pool *pool) in page_pool_producer_lock() 161 static void page_pool_producer_unlock(struct page_pool *pool, in page_pool_producer_unlock() 171 static int page_pool_init(struct page_pool *pool, in page_pool_init() [all …]
|
/net/xdp/ |
D | xsk_buff_pool.c | 11 void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs) in xp_add_xsk() 23 void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs) in xp_del_xsk() 35 void xp_destroy(struct xsk_buff_pool *pool) in xp_destroy() 45 int xp_alloc_tx_descs(struct xsk_buff_pool *pool, struct xdp_sock *xs) in xp_alloc_tx_descs() 59 struct xsk_buff_pool *pool; in xp_create_and_assign_umem() local 117 void xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq) in xp_set_rxq_info() 126 static void xp_disable_drv_zc(struct xsk_buff_pool *pool) in xp_disable_drv_zc() 149 int xp_assign_dev(struct xsk_buff_pool *pool, in xp_assign_dev() 228 int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_sock *umem_xs, in xp_assign_dev_shared() 245 void xp_clear_dev(struct xsk_buff_pool *pool) in xp_clear_dev() [all …]
|
D | xsk_queue.h | 143 static inline bool xp_aligned_validate_desc(struct xsk_buff_pool *pool, in xp_aligned_validate_desc() 162 static inline bool xp_unaligned_validate_desc(struct xsk_buff_pool *pool, in xp_unaligned_validate_desc() 182 static inline bool xp_validate_desc(struct xsk_buff_pool *pool, in xp_validate_desc() 196 struct xsk_buff_pool *pool) in xskq_cons_is_valid_desc() 207 struct xsk_buff_pool *pool) in xskq_cons_read_desc() 226 static inline void parse_desc(struct xsk_queue *q, struct xsk_buff_pool *pool, in parse_desc() 234 u32 xskq_cons_read_desc_batch(struct xsk_queue *q, struct xsk_buff_pool *pool, in xskq_cons_read_desc_batch() 320 struct xsk_buff_pool *pool) in xskq_cons_peek_desc()
|
D | xsk.c | 39 void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool) in xsk_set_rx_need_wakeup() 49 void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool) in xsk_set_tx_need_wakeup() 66 void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool) in xsk_clear_rx_need_wakeup() 76 void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool) in xsk_clear_tx_need_wakeup() 93 bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool) in xsk_uses_need_wakeup() 123 int xsk_reg_pool_at_qid(struct net_device *dev, struct xsk_buff_pool *pool, in xsk_reg_pool_at_qid() 398 void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries) in xsk_tx_completed() 404 void xsk_tx_release(struct xsk_buff_pool *pool) in xsk_tx_release() 418 bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc) in xsk_tx_peek_desc() 449 static u32 xsk_tx_peek_release_fallback(struct xsk_buff_pool *pool, u32 max_entries) in xsk_tx_peek_release_fallback() [all …]
|
D | xsk_diag.c | 49 struct xsk_buff_pool *pool = xs->pool; in xsk_diag_put_umem() local
|
/net/ceph/ |
D | msgpool.c | 14 struct ceph_msgpool *pool = arg; in msgpool_alloc() local 30 struct ceph_msgpool *pool = arg; in msgpool_free() local 38 int ceph_msgpool_init(struct ceph_msgpool *pool, int type, in ceph_msgpool_init() 53 void ceph_msgpool_destroy(struct ceph_msgpool *pool) in ceph_msgpool_destroy() 59 struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool, int front_len, in ceph_msgpool_get() 81 void ceph_msgpool_put(struct ceph_msgpool *pool, struct ceph_msg *msg) in ceph_msgpool_put()
|
D | osdmap.c | 956 u64 pool; in decode_pool_names() local 1347 u64 pool; in __decode_pools() local 1972 u64 pool; in osdmap_apply_incremental() local
|
/net/rds/ |
D | ib_rdma.c | 198 struct rds_ib_mr *rds_ib_reuse_mr(struct rds_ib_mr_pool *pool) in rds_ib_reuse_mr() 275 struct rds_ib_mr_pool *pool = ibmr->pool; in rds_ib_teardown_mr() local 281 static inline unsigned int rds_ib_flush_goal(struct rds_ib_mr_pool *pool, int free_all) in rds_ib_flush_goal() 342 int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, in rds_ib_flush_mr_pool() 440 struct rds_ib_mr *rds_ib_try_reuse_ibmr(struct rds_ib_mr_pool *pool) in rds_ib_try_reuse_ibmr() 479 struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker.work); in rds_ib_mr_pool_flush_worker() local 487 struct rds_ib_mr_pool *pool = ibmr->pool; in rds_ib_free_mr() local 634 void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool) in rds_ib_destroy_mr_pool() 646 struct rds_ib_mr_pool *pool; in rds_ib_create_mr_pool() local
|
D | ib_frmr.c | 56 struct rds_ib_mr_pool *pool; in rds_ib_alloc_frmr() local 108 struct rds_ib_mr_pool *pool = ibmr->pool; in rds_ib_free_frmr() local 188 struct rds_ib_mr_pool *pool, in rds_ib_map_frmr() 439 struct rds_ib_mr_pool *pool = ibmr->pool; in rds_ib_free_frmr_list() local
|
D | ib_mr.h | 68 struct rds_ib_mr_pool *pool; member
|
/net/sunrpc/ |
D | svc.c | 506 struct svc_pool *pool = &serv->sv_pools[i]; in __svc_create() local 591 struct svc_pool *pool = &serv->sv_pools[i]; in svc_destroy() local 637 svc_rqst_alloc(struct svc_serv *serv, struct svc_pool *pool, int node) in svc_rqst_alloc() 674 svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node) in svc_prepare_thread() 703 void svc_pool_wake_idle_thread(struct svc_pool *pool) in svc_pool_wake_idle_thread() 725 svc_pool_next(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state) in svc_pool_next() 731 svc_pool_victim(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state) in svc_pool_victim() 763 svc_start_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) in svc_start_kthreads() 798 svc_stop_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) in svc_stop_kthreads() 835 svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) in svc_set_num_threads() [all …]
|
D | svc_xprt.c | 459 struct svc_pool *pool; in svc_xprt_enqueue() local 486 static struct svc_xprt *svc_xprt_dequeue(struct svc_pool *pool) in svc_xprt_dequeue() 582 struct svc_pool *pool = &serv->sv_pools[0]; in svc_wake_up() local 703 struct svc_pool *pool = rqstp->rq_pool; in rqst_should_sleep() local 726 struct svc_pool *pool = rqstp->rq_pool; in svc_get_next_xprt() local 1086 struct svc_pool *pool; in svc_dequeue_net() local 1392 struct svc_pool *pool = p; in svc_pool_stats_next() local 1416 struct svc_pool *pool = p; in svc_pool_stats_show() local
|