/net/ceph/ |
D | msgpool.c | 14 struct ceph_msgpool *pool = arg; in msgpool_alloc() local 17 msg = ceph_msg_new(pool->type, pool->front_len, gfp_mask, true); in msgpool_alloc() 19 dout("msgpool_alloc %s failed\n", pool->name); in msgpool_alloc() 21 dout("msgpool_alloc %s %p\n", pool->name, msg); in msgpool_alloc() 22 msg->pool = pool; in msgpool_alloc() 29 struct ceph_msgpool *pool = arg; in msgpool_free() local 32 dout("msgpool_release %s %p\n", pool->name, msg); in msgpool_free() 33 msg->pool = NULL; in msgpool_free() 37 int ceph_msgpool_init(struct ceph_msgpool *pool, int type, in ceph_msgpool_init() argument 41 pool->type = type; in ceph_msgpool_init() [all …]
|
D | osdmap.c | 584 if (lhs->pool < rhs->pool) in ceph_pg_compare() 586 if (lhs->pool > rhs->pool) in ceph_pg_compare() 890 u64 pool; in decode_pool_names() local 895 ceph_decode_64_safe(p, end, pool, bad); in decode_pool_names() 897 dout(" pool %llu len %d\n", pool, len); in decode_pool_names() 899 pi = __lookup_pg_pool(&map->pg_pools, pool); in decode_pool_names() 1128 u64 pool; in __decode_pools() local 1131 ceph_decode_64_safe(p, end, pool, e_inval); in __decode_pools() 1133 pi = __lookup_pg_pool(&map->pg_pools, pool); in __decode_pools() 1139 pi->id = pool; in __decode_pools() [all …]
|
D | debugfs.c | 94 seq_printf(s, "pg_temp %llu.%x [", pg->pgid.pool, in osdmap_show() 105 seq_printf(s, "primary_temp %llu.%x %d\n", pg->pgid.pool, in osdmap_show() 112 seq_printf(s, "pg_upmap %llu.%x [", pg->pgid.pool, in osdmap_show() 123 seq_printf(s, "pg_upmap_items %llu.%x [", pg->pgid.pool, in osdmap_show() 176 seq_printf(s, "%llu.%x", spgid->pgid.pool, spgid->pgid.seed); in dump_spgid() 185 seq_printf(s, "osd%d\t%llu.%x\t", t->osd, t->pgid.pool, t->pgid.seed); in dump_target() 298 hoid->pool == S64_MIN) { in dump_hoid() 306 seq_printf(s, "%lld:%08x:", hoid->pool, hoid->hash_reverse_bits); in dump_hoid()
|
D | osd_client.c | 972 req->r_base_oloc.pool = layout->pool_id; in ceph_osdc_new_request() 1311 WARN_ON(pi->id != t->target_oloc.pool); in target_should_be_paused() 1342 pi = ceph_pg_pool_by_id(osdc->osdmap, t->base_oloc.pool); in calc_target() 1363 t->target_oloc.pool = pi->read_tier; in calc_target() 1365 t->target_oloc.pool = pi->write_tier; in calc_target() 1367 pi = ceph_pg_pool_by_id(osdc->osdmap, t->target_oloc.pool); in calc_target() 1383 last_pgid.pool = pgid.pool; in calc_target() 1514 if (lhs->pool < rhs->pool) in hoid_compare() 1516 if (lhs->pool > rhs->pool) in hoid_compare() 1598 ceph_decode_64_safe(p, end, hoid->pool, e_inval); in decode_hoid() [all …]
|
D | messenger.c | 3492 if (m->pool) in ceph_msg_release() 3493 ceph_msgpool_put(m->pool, m); in ceph_msg_release()
|
/net/rds/ |
D | ib_rdma.c | 182 struct rds_ib_mr *rds_ib_reuse_mr(struct rds_ib_mr_pool *pool) in rds_ib_reuse_mr() argument 191 ret = llist_del_first(&pool->clean_list); in rds_ib_reuse_mr() 194 if (pool->pool_type == RDS_IB_MR_8K_POOL) in rds_ib_reuse_mr() 271 struct rds_ib_mr_pool *pool = ibmr->pool; in rds_ib_teardown_mr() local 273 atomic_sub(pinned, &pool->free_pinned); in rds_ib_teardown_mr() 277 static inline unsigned int rds_ib_flush_goal(struct rds_ib_mr_pool *pool, int free_all) in rds_ib_flush_goal() argument 281 item_count = atomic_read(&pool->item_count); in rds_ib_flush_goal() 315 static void list_to_llist_nodes(struct rds_ib_mr_pool *pool, in list_to_llist_nodes() argument 339 int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, in rds_ib_flush_mr_pool() argument 349 if (pool->pool_type == RDS_IB_MR_8K_POOL) in rds_ib_flush_mr_pool() [all …]
|
D | ib_fmr.c | 37 struct rds_ib_mr_pool *pool; in rds_ib_alloc_fmr() local 43 pool = rds_ibdev->mr_8k_pool; in rds_ib_alloc_fmr() 45 pool = rds_ibdev->mr_1m_pool; in rds_ib_alloc_fmr() 47 if (atomic_read(&pool->dirty_count) >= pool->max_items / 10) in rds_ib_alloc_fmr() 48 queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10); in rds_ib_alloc_fmr() 51 if (atomic_read(&pool->dirty_count) >= pool->max_items * 9 / 10) { in rds_ib_alloc_fmr() 52 if (pool->pool_type == RDS_IB_MR_8K_POOL) in rds_ib_alloc_fmr() 53 pool = rds_ibdev->mr_1m_pool; in rds_ib_alloc_fmr() 55 pool = rds_ibdev->mr_8k_pool; in rds_ib_alloc_fmr() 58 ibmr = rds_ib_try_reuse_ibmr(pool); in rds_ib_alloc_fmr() [all …]
|
D | ib_frmr.c | 38 struct rds_ib_mr_pool *pool; in rds_ib_alloc_frmr() local 44 pool = rds_ibdev->mr_8k_pool; in rds_ib_alloc_frmr() 46 pool = rds_ibdev->mr_1m_pool; in rds_ib_alloc_frmr() 48 ibmr = rds_ib_try_reuse_ibmr(pool); in rds_ib_alloc_frmr() 61 pool->fmr_attr.max_pages); in rds_ib_alloc_frmr() 68 ibmr->pool = pool; in rds_ib_alloc_frmr() 69 if (pool->pool_type == RDS_IB_MR_8K_POOL) in rds_ib_alloc_frmr() 74 if (atomic_read(&pool->item_count) > pool->max_items_soft) in rds_ib_alloc_frmr() 75 pool->max_items_soft = pool->max_items; in rds_ib_alloc_frmr() 82 atomic_dec(&pool->item_count); in rds_ib_alloc_frmr() [all …]
|
D | ib_mr.h | 68 struct rds_ib_mr_pool *pool; member
|
/net/sunrpc/ |
D | svc.c | 473 struct svc_pool *pool = &serv->sv_pools[i]; in __svc_create() local 478 pool->sp_id = i; in __svc_create() 479 INIT_LIST_HEAD(&pool->sp_sockets); in __svc_create() 480 INIT_LIST_HEAD(&pool->sp_all_threads); in __svc_create() 481 spin_lock_init(&pool->sp_lock); in __svc_create() 603 svc_rqst_alloc(struct svc_serv *serv, struct svc_pool *pool, int node) in svc_rqst_alloc() argument 614 rqstp->rq_pool = pool; in svc_rqst_alloc() 635 svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node) in svc_prepare_thread() argument 639 rqstp = svc_rqst_alloc(serv, pool, node); in svc_prepare_thread() 644 spin_lock_bh(&pool->sp_lock); in svc_prepare_thread() [all …]
|
D | svc_xprt.c | 380 struct svc_pool *pool; in svc_xprt_do_enqueue() local 400 pool = svc_pool_for_cpu(xprt->xpt_server, cpu); in svc_xprt_do_enqueue() 402 atomic_long_inc(&pool->sp_stats.packets); in svc_xprt_do_enqueue() 407 list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) { in svc_xprt_do_enqueue() 433 atomic_long_inc(&pool->sp_stats.threads_woken); in svc_xprt_do_enqueue() 449 spin_lock_bh(&pool->sp_lock); in svc_xprt_do_enqueue() 450 list_add_tail(&xprt->xpt_ready, &pool->sp_sockets); in svc_xprt_do_enqueue() 451 pool->sp_stats.sockets_queued++; in svc_xprt_do_enqueue() 452 spin_unlock_bh(&pool->sp_lock); in svc_xprt_do_enqueue() 478 static struct svc_xprt *svc_xprt_dequeue(struct svc_pool *pool) in svc_xprt_dequeue() argument [all …]
|
/net/9p/ |
D | util.c | 45 struct idr pool; member 62 idr_init(&p->pool); in p9_idpool_create() 75 idr_destroy(&p->pool); in p9_idpool_destroy() 97 i = idr_alloc(&p->pool, p, 0, 0, GFP_NOWAIT); in p9_idpool_get() 125 idr_remove(&p->pool, id); in p9_idpool_put() 138 return idr_find(&p->pool, id) != NULL; in p9_idpool_check()
|
/net/bridge/netfilter/ |
D | ebt_among.c | 36 p = &wh->pool[i]; in ebt_mac_wormhash_contains() 43 p = &wh->pool[i]; in ebt_mac_wormhash_contains()
|