/net/core/ |
D | dst_cache.c | 69 if (!dst_cache->cache) in dst_cache_get() 72 return dst_cache_per_cpu_get(dst_cache, this_cpu_ptr(dst_cache->cache)); in dst_cache_get() 81 if (!dst_cache->cache) in dst_cache_get_ip4() 84 idst = this_cpu_ptr(dst_cache->cache); in dst_cache_get_ip4() 99 if (!dst_cache->cache) in dst_cache_set_ip4() 102 idst = this_cpu_ptr(dst_cache->cache); in dst_cache_set_ip4() 114 if (!dst_cache->cache) in dst_cache_set_ip6() 117 idst = this_cpu_ptr(dst_cache->cache); in dst_cache_set_ip6() 118 dst_cache_per_cpu_dst_set(this_cpu_ptr(dst_cache->cache), dst, in dst_cache_set_ip6() 130 if (!dst_cache->cache) in dst_cache_get_ip6() [all …]
|
D | skbuff.c | 180 struct kmem_cache *cache; in __alloc_skb() local 186 cache = (flags & SKB_ALLOC_FCLONE) in __alloc_skb() 193 skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); in __alloc_skb() 250 kmem_cache_free(cache, skb); in __alloc_skb()
|
D | neighbour.c | 1342 dev->header_ops->cache(n, hh, prot); in neigh_hh_init() 1358 if (dev->header_ops->cache && !READ_ONCE(neigh->hh.hh_len)) in neigh_resolve_output()
|
/net/rds/ |
D | ib_recv.c | 88 static void rds_ib_cache_xfer_to_ready(struct rds_ib_refill_cache *cache) in rds_ib_cache_xfer_to_ready() argument 92 tmp = xchg(&cache->xfer, NULL); in rds_ib_cache_xfer_to_ready() 94 if (cache->ready) in rds_ib_cache_xfer_to_ready() 95 list_splice_entire_tail(tmp, cache->ready); in rds_ib_cache_xfer_to_ready() 97 cache->ready = tmp; in rds_ib_cache_xfer_to_ready() 101 static int rds_ib_recv_alloc_cache(struct rds_ib_refill_cache *cache, gfp_t gfp) in rds_ib_recv_alloc_cache() argument 106 cache->percpu = alloc_percpu_gfp(struct rds_ib_cache_head, gfp); in rds_ib_recv_alloc_cache() 107 if (!cache->percpu) in rds_ib_recv_alloc_cache() 111 head = per_cpu_ptr(cache->percpu, cpu); in rds_ib_recv_alloc_cache() 115 cache->xfer = NULL; in rds_ib_recv_alloc_cache() [all …]
|
/net/ipv6/ |
D | ip6mr.c | 114 struct sk_buff *skb, struct mfc6_cache *cache); 349 struct list_head *cache; member 362 it->cache = &mrt->mfc6_cache_array[it->ct]; in ipmr_mfc_seq_idx() 363 list_for_each_entry(mfc, it->cache, list) in ipmr_mfc_seq_idx() 370 it->cache = &mrt->mfc6_unres_queue; in ipmr_mfc_seq_idx() 371 list_for_each_entry(mfc, it->cache, list) in ipmr_mfc_seq_idx() 376 it->cache = NULL; in ipmr_mfc_seq_idx() 501 it->cache = NULL; in ipmr_mfc_seq_start() 518 if (mfc->list.next != it->cache) in ipmr_mfc_seq_next() 521 if (it->cache == &mrt->mfc6_unres_queue) in ipmr_mfc_seq_next() [all …]
|
D | seg6_iptunnel.c | 36 struct dst_cache cache; member 286 dst = dst_cache_get(&slwt->cache); in seg6_input() 296 dst_cache_set_ip6(&slwt->cache, dst, in seg6_input() 325 dst = dst_cache_get(&slwt->cache); in seg6_output() 347 dst_cache_set_ip6(&slwt->cache, dst, &fl6.saddr); in seg6_output() 423 err = dst_cache_init(&slwt->cache, GFP_ATOMIC); in seg6_build_state() 446 dst_cache_destroy(&seg6_lwt_lwtunnel(lwt)->cache); in seg6_destroy_state()
|
D | calipso.c | 231 secattr->cache = entry->lsm_data; in calipso_cache_check() 299 refcount_inc(&secattr->cache->refcount); in calipso_cache_add() 300 entry->lsm_data = secattr->cache; in calipso_cache_add()
|
D | ndisc.c | 358 if (dev->header_ops->cache) in ndisc_constructor()
|
/net/sunrpc/ |
D | auth.c | 400 rpcauth_clear_credcache(struct rpc_cred_cache *cache) in rpcauth_clear_credcache() argument 405 unsigned int hashsize = 1U << cache->hashbits; in rpcauth_clear_credcache() 409 spin_lock(&cache->lock); in rpcauth_clear_credcache() 411 head = &cache->hashtable[i]; in rpcauth_clear_credcache() 423 spin_unlock(&cache->lock); in rpcauth_clear_credcache() 434 struct rpc_cred_cache *cache = auth->au_credcache; in rpcauth_destroy_credcache() local 436 if (cache) { in rpcauth_destroy_credcache() 438 rpcauth_clear_credcache(cache); in rpcauth_destroy_credcache() 439 kfree(cache->hashtable); in rpcauth_destroy_credcache() 440 kfree(cache); in rpcauth_destroy_credcache() [all …]
|
D | Makefile | 15 sunrpc_syms.o cache.o rpc_pipe.o \
|
/net/ipv4/ |
D | ipmr.c | 107 struct mfc_cache *cache, int local); 738 static void ipmr_update_thresholds(struct mr_table *mrt, struct mfc_cache *cache, in ipmr_update_thresholds() argument 743 cache->mfc_un.res.minvif = MAXVIFS; in ipmr_update_thresholds() 744 cache->mfc_un.res.maxvif = 0; in ipmr_update_thresholds() 745 memset(cache->mfc_un.res.ttls, 255, MAXVIFS); in ipmr_update_thresholds() 750 cache->mfc_un.res.ttls[vifi] = ttls[vifi]; in ipmr_update_thresholds() 751 if (cache->mfc_un.res.minvif > vifi) in ipmr_update_thresholds() 752 cache->mfc_un.res.minvif = vifi; in ipmr_update_thresholds() 753 if (cache->mfc_un.res.maxvif <= vifi) in ipmr_update_thresholds() 754 cache->mfc_un.res.maxvif = vifi + 1; in ipmr_update_thresholds() [all …]
|
D | tcp_input.c | 1674 static int tcp_sack_cache_ok(const struct tcp_sock *tp, const struct tcp_sack_block *cache) in tcp_sack_cache_ok() argument 1676 return cache < tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache); in tcp_sack_cache_ok() 1688 struct tcp_sack_block *cache; in tcp_sacktag_write_queue() local 1783 cache = tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache); in tcp_sacktag_write_queue() 1785 cache = tp->recv_sack_cache; in tcp_sacktag_write_queue() 1787 while (tcp_sack_cache_ok(tp, cache) && !cache->start_seq && in tcp_sacktag_write_queue() 1788 !cache->end_seq) in tcp_sacktag_write_queue() 1789 cache++; in tcp_sacktag_write_queue() 1802 while (tcp_sack_cache_ok(tp, cache) && in tcp_sacktag_write_queue() 1803 !before(start_seq, cache->end_seq)) in tcp_sacktag_write_queue() [all …]
|
D | cipso_ipv4.c | 269 secattr->cache = entry->lsm_data; in cipso_v4_cache_check() 335 refcount_inc(&secattr->cache->refcount); in cipso_v4_cache_add() 336 entry->lsm_data = secattr->cache; in cipso_v4_cache_add()
|
D | arp.c | 282 if (dev->header_ops->cache) in arp_constructor()
|
/net/tipc/ |
D | monitor.c | 92 struct tipc_mon_domain cache; member 243 struct tipc_mon_domain *cache = &mon->cache; in mon_update_local_domain() local 263 cache->members[i] = htonl(peer->addr); in mon_update_local_domain() 269 cache->len = htons(dom->len); in mon_update_local_domain() 270 cache->gen = htons(dom->gen); in mon_update_local_domain() 271 cache->member_cnt = htons(member_cnt); in mon_update_local_domain() 272 cache->up_map = cpu_to_be64(dom->up_map); in mon_update_local_domain() 548 len = ntohs(mon->cache.len); in tipc_mon_prep() 550 memcpy(data, &mon->cache, len); in tipc_mon_prep()
|
/net/bluetooth/ |
D | hci_core.c | 1033 struct discovery_state *cache = &hdev->discovery; in hci_inquiry_cache_flush() local 1036 list_for_each_entry_safe(p, n, &cache->all, all) { in hci_inquiry_cache_flush() 1041 INIT_LIST_HEAD(&cache->unknown); in hci_inquiry_cache_flush() 1042 INIT_LIST_HEAD(&cache->resolve); in hci_inquiry_cache_flush() 1048 struct discovery_state *cache = &hdev->discovery; in hci_inquiry_cache_lookup() local 1051 BT_DBG("cache %p, %pMR", cache, bdaddr); in hci_inquiry_cache_lookup() 1053 list_for_each_entry(e, &cache->all, all) { in hci_inquiry_cache_lookup() 1064 struct discovery_state *cache = &hdev->discovery; in hci_inquiry_cache_lookup_unknown() local 1067 BT_DBG("cache %p, %pMR", cache, bdaddr); in hci_inquiry_cache_lookup_unknown() 1069 list_for_each_entry(e, &cache->unknown, list) { in hci_inquiry_cache_lookup_unknown() [all …]
|
D | hci_debugfs.c | 425 struct discovery_state *cache = &hdev->discovery; in inquiry_cache_show() local 430 list_for_each_entry(e, &cache->all, all) { in inquiry_cache_show()
|
/net/decnet/ |
D | TODO | 5 just the 60 second On-Ethernet cache value.
|
/net/netfilter/ |
D | nf_conntrack_ecache.c | 196 events = xchg(&e->cache, 0); in nf_ct_deliver_cached_events()
|
/net/ethernet/ |
D | eth.c | 349 .cache = eth_header_cache,
|
/net/openvswitch/ |
D | conntrack.c | 1076 struct nf_conntrack_ecache *cache = nf_ct_ecache_find(ct); in ovs_ct_commit() local 1078 if (cache) in ovs_ct_commit() 1079 cache->ctmask = info->eventmask; in ovs_ct_commit()
|
/net/netfilter/ipvs/ |
D | Kconfig | 181 destination IP load balancing. It is usually used in cache cluster. 196 usually used in cache cluster. It differs from the LBLC scheduling
|