/net/atm/ |
D | mpoa_caches.c | 38 in_cache_entry *entry; in in_cache_get() local 41 entry = client->in_cache; in in_cache_get() 42 while (entry != NULL) { in in_cache_get() 43 if (entry->ctrl_info.in_dst_ip == dst_ip) { in in_cache_get() 44 refcount_inc(&entry->use); in in_cache_get() 46 return entry; in in_cache_get() 48 entry = entry->next; in in_cache_get() 59 in_cache_entry *entry; in in_cache_get_with_mask() local 62 entry = client->in_cache; in in_cache_get_with_mask() 63 while (entry != NULL) { in in_cache_get_with_mask() [all …]
|
D | lec.c | 106 static inline void lec_arp_hold(struct lec_arp_table *entry) in lec_arp_hold() argument 108 refcount_inc(&entry->usage); in lec_arp_hold() 111 static inline void lec_arp_put(struct lec_arp_table *entry) in lec_arp_put() argument 113 if (refcount_dec_and_test(&entry->usage)) in lec_arp_put() 114 kfree(entry); in lec_arp_put() 211 struct lec_arp_table *entry; in lec_start_xmit() local 282 entry = NULL; in lec_start_xmit() 283 vcc = lec_arp_resolve(priv, dst, is_rdesc, &entry); in lec_start_xmit() 285 dev->name, vcc, vcc ? vcc->flags : 0, entry); in lec_start_xmit() 287 if (entry && (entry->tx_wait.qlen < LEC_UNRES_QUE_LEN)) { in lec_start_xmit() [all …]
|
D | mpc.c | 87 static void purge_egress_shortcut(struct atm_vcc *vcc, eg_cache_entry *entry); 179 struct atm_mpoa_qos *entry; in atm_mpoa_add_qos() local 181 entry = atm_mpoa_search_qos(dst_ip); in atm_mpoa_add_qos() 182 if (entry != NULL) { in atm_mpoa_add_qos() 183 entry->qos = *qos; in atm_mpoa_add_qos() 184 return entry; in atm_mpoa_add_qos() 187 entry = kmalloc(sizeof(struct atm_mpoa_qos), GFP_KERNEL); in atm_mpoa_add_qos() 188 if (entry == NULL) { in atm_mpoa_add_qos() 190 return entry; in atm_mpoa_add_qos() 193 entry->ipaddr = dst_ip; in atm_mpoa_add_qos() [all …]
|
D | clip.c | 76 static void link_vcc(struct clip_vcc *clip_vcc, struct atmarp_entry *entry) in link_vcc() argument 78 pr_debug("%p to entry %p (neigh %p)\n", clip_vcc, entry, entry->neigh); in link_vcc() 79 clip_vcc->entry = entry; in link_vcc() 81 clip_vcc->next = entry->vccs; in link_vcc() 82 entry->vccs = clip_vcc; in link_vcc() 83 entry->neigh->used = jiffies; in link_vcc() 88 struct atmarp_entry *entry = clip_vcc->entry; in unlink_clip_vcc() local 91 if (!entry) { in unlink_clip_vcc() 95 netif_tx_lock_bh(entry->neigh->dev); /* block clip_start_xmit() */ in unlink_clip_vcc() 96 entry->neigh->used = jiffies; in unlink_clip_vcc() [all …]
|
/net/netfilter/ |
D | nf_queue.c | 58 static void nf_queue_entry_release_refs(struct nf_queue_entry *entry) in nf_queue_entry_release_refs() argument 60 struct nf_hook_state *state = &entry->state; in nf_queue_entry_release_refs() 69 dev_put(entry->physin); in nf_queue_entry_release_refs() 70 dev_put(entry->physout); in nf_queue_entry_release_refs() 74 void nf_queue_entry_free(struct nf_queue_entry *entry) in nf_queue_entry_free() argument 76 nf_queue_entry_release_refs(entry); in nf_queue_entry_free() 77 kfree(entry); in nf_queue_entry_free() 81 static void __nf_queue_entry_init_physdevs(struct nf_queue_entry *entry) in __nf_queue_entry_init_physdevs() argument 84 const struct sk_buff *skb = entry->skb; in __nf_queue_entry_init_physdevs() 87 entry->physin = nf_bridge_get_physindev(skb, entry->state.net); in __nf_queue_entry_init_physdevs() [all …]
|
D | nfnetlink_queue.c | 195 __enqueue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry) in __enqueue_entry() argument 197 list_add_tail(&entry->list, &queue->queue_list); in __enqueue_entry() 202 __dequeue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry) in __dequeue_entry() argument 204 list_del(&entry->list); in __dequeue_entry() 211 struct nf_queue_entry *entry = NULL, *i; in find_dequeue_entry() local 217 entry = i; in find_dequeue_entry() 222 if (entry) in find_dequeue_entry() 223 __dequeue_entry(queue, entry); in find_dequeue_entry() 227 return entry; in find_dequeue_entry() 230 static void nfqnl_reinject(struct nf_queue_entry *entry, unsigned int verdict) in nfqnl_reinject() argument [all …]
|
D | nf_flow_table_offload.c | 204 static void flow_offload_mangle(struct flow_action_entry *entry, in flow_offload_mangle() argument 208 entry->id = FLOW_ACTION_MANGLE; in flow_offload_mangle() 209 entry->mangle.htype = htype; in flow_offload_mangle() 210 entry->mangle.offset = offset; in flow_offload_mangle() 211 memcpy(&entry->mangle.mask, mask, sizeof(u32)); in flow_offload_mangle() 212 memcpy(&entry->mangle.val, value, sizeof(u32)); in flow_offload_mangle() 332 struct flow_action_entry *entry = flow_action_entry_next(flow_rule); in flow_offload_ipv4_snat() local 350 flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP4, offset, in flow_offload_ipv4_snat() 359 struct flow_action_entry *entry = flow_action_entry_next(flow_rule); in flow_offload_ipv4_dnat() local 377 flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP4, offset, in flow_offload_ipv4_dnat() [all …]
|
/net/netlabel/ |
D | netlabel_domainhash.c | 64 static void netlbl_domhsh_free_entry(struct rcu_head *entry) in netlbl_domhsh_free_entry() argument 74 ptr = container_of(entry, struct netlbl_dom_map, rcu); in netlbl_domhsh_free_entry() 175 struct netlbl_dom_map *entry; in netlbl_domhsh_search_def() local 177 entry = netlbl_domhsh_search(domain, family); in netlbl_domhsh_search_def() 178 if (entry != NULL) in netlbl_domhsh_search_def() 179 return entry; in netlbl_domhsh_search_def() 181 entry = netlbl_domhsh_rcu_deref(netlbl_domhsh_def_ipv4); in netlbl_domhsh_search_def() 182 if (entry != NULL && entry->valid) in netlbl_domhsh_search_def() 183 return entry; in netlbl_domhsh_search_def() 186 entry = netlbl_domhsh_rcu_deref(netlbl_domhsh_def_ipv6); in netlbl_domhsh_search_def() [all …]
|
D | netlabel_addrlist.c | 148 int netlbl_af4list_add(struct netlbl_af4list *entry, struct list_head *head) in netlbl_af4list_add() argument 152 iter = netlbl_af4list_search(entry->addr, head); in netlbl_af4list_add() 154 iter->addr == entry->addr && iter->mask == entry->mask) in netlbl_af4list_add() 163 ntohl(entry->mask) > ntohl(iter->mask)) { in netlbl_af4list_add() 164 __list_add_rcu(&entry->list, in netlbl_af4list_add() 169 list_add_tail_rcu(&entry->list, head); in netlbl_af4list_add() 185 int netlbl_af6list_add(struct netlbl_af6list *entry, struct list_head *head) in netlbl_af6list_add() argument 189 iter = netlbl_af6list_search(&entry->addr, head); in netlbl_af6list_add() 191 ipv6_addr_equal(&iter->addr, &entry->addr) && in netlbl_af6list_add() 192 ipv6_addr_equal(&iter->mask, &entry->mask)) in netlbl_af6list_add() [all …]
|
D | netlabel_kapi.c | 102 struct netlbl_dom_map *entry; in netlbl_cfg_unlbl_map_add() local 107 entry = kzalloc(sizeof(*entry), GFP_ATOMIC); in netlbl_cfg_unlbl_map_add() 108 if (entry == NULL) in netlbl_cfg_unlbl_map_add() 111 entry->domain = kstrdup(domain, GFP_ATOMIC); in netlbl_cfg_unlbl_map_add() 112 if (entry->domain == NULL) in netlbl_cfg_unlbl_map_add() 115 entry->family = family; in netlbl_cfg_unlbl_map_add() 118 entry->def.type = NETLBL_NLTYPE_UNLABELED; in netlbl_cfg_unlbl_map_add() 169 entry->def.addrsel = addrmap; in netlbl_cfg_unlbl_map_add() 170 entry->def.type = NETLBL_NLTYPE_ADDRSELECT; in netlbl_cfg_unlbl_map_add() 176 ret_val = netlbl_domhsh_add(entry, audit_info); in netlbl_cfg_unlbl_map_add() [all …]
|
D | netlabel_mgmt.c | 87 struct netlbl_dom_map *entry = kzalloc(sizeof(*entry), GFP_KERNEL); in netlbl_mgmt_add_common() local 89 if (!entry) in netlbl_mgmt_add_common() 91 entry->def.type = nla_get_u32(info->attrs[NLBL_MGMT_A_PROTOCOL]); in netlbl_mgmt_add_common() 94 entry->domain = kmalloc(tmp_size, GFP_KERNEL); in netlbl_mgmt_add_common() 95 if (entry->domain == NULL) { in netlbl_mgmt_add_common() 99 nla_strscpy(entry->domain, in netlbl_mgmt_add_common() 108 switch (entry->def.type) { in netlbl_mgmt_add_common() 111 entry->family = in netlbl_mgmt_add_common() 114 entry->family = AF_UNSPEC; in netlbl_mgmt_add_common() 124 entry->family = AF_INET; in netlbl_mgmt_add_common() [all …]
|
D | netlabel_unlabeled.c | 147 static void netlbl_unlhsh_free_iface(struct rcu_head *entry) in netlbl_unlhsh_free_iface() argument 157 iface = container_of(entry, struct netlbl_unlhsh_iface, rcu); in netlbl_unlhsh_free_iface() 237 struct netlbl_unlhsh_addr4 *entry; in netlbl_unlhsh_add_addr4() local 239 entry = kzalloc(sizeof(*entry), GFP_ATOMIC); in netlbl_unlhsh_add_addr4() 240 if (entry == NULL) in netlbl_unlhsh_add_addr4() 243 entry->list.addr = addr->s_addr & mask->s_addr; in netlbl_unlhsh_add_addr4() 244 entry->list.mask = mask->s_addr; in netlbl_unlhsh_add_addr4() 245 entry->list.valid = 1; in netlbl_unlhsh_add_addr4() 246 entry->secid = secid; in netlbl_unlhsh_add_addr4() 249 ret_val = netlbl_af4list_add(&entry->list, &iface->addr4_list); in netlbl_unlhsh_add_addr4() [all …]
|
/net/ipv4/ |
D | udp_tunnel_nic.c | 73 udp_tunnel_nic_entry_is_free(struct udp_tunnel_nic_table_entry *entry) in udp_tunnel_nic_entry_is_free() argument 75 return entry->use_cnt == 0 && !entry->flags; in udp_tunnel_nic_entry_is_free() 79 udp_tunnel_nic_entry_is_present(struct udp_tunnel_nic_table_entry *entry) in udp_tunnel_nic_entry_is_present() argument 81 return entry->use_cnt && !(entry->flags & ~UDP_TUNNEL_NIC_ENTRY_FROZEN); in udp_tunnel_nic_entry_is_present() 85 udp_tunnel_nic_entry_is_frozen(struct udp_tunnel_nic_table_entry *entry) in udp_tunnel_nic_entry_is_frozen() argument 87 return entry->flags & UDP_TUNNEL_NIC_ENTRY_FROZEN; in udp_tunnel_nic_entry_is_frozen() 91 udp_tunnel_nic_entry_freeze_used(struct udp_tunnel_nic_table_entry *entry) in udp_tunnel_nic_entry_freeze_used() argument 93 if (!udp_tunnel_nic_entry_is_free(entry)) in udp_tunnel_nic_entry_freeze_used() 94 entry->flags |= UDP_TUNNEL_NIC_ENTRY_FROZEN; in udp_tunnel_nic_entry_freeze_used() 98 udp_tunnel_nic_entry_unfreeze(struct udp_tunnel_nic_table_entry *entry) in udp_tunnel_nic_entry_unfreeze() argument [all …]
|
/net/mptcp/ |
D | pm_netlink.c | 156 struct mptcp_pm_addr_entry *entry; in select_local_address() local 162 list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) { in select_local_address() 163 if (!(entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW)) in select_local_address() 166 if (!test_bit(entry->addr.id, msk->pm.id_avail_bitmap)) in select_local_address() 169 *new_entry = *entry; in select_local_address() 182 struct mptcp_pm_addr_entry *entry; in select_signal_address() local 191 list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) { in select_signal_address() 192 if (!test_bit(entry->addr.id, msk->pm.id_avail_bitmap)) in select_signal_address() 195 if (!(entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL)) in select_signal_address() 198 *new_entry = *entry; in select_signal_address() [all …]
|
D | pm_userspace.c | 12 struct mptcp_pm_addr_entry *entry, *tmp; in mptcp_free_local_addr_list() local 23 list_for_each_entry_safe(entry, tmp, &free_list, list) { in mptcp_free_local_addr_list() 24 sock_kfree_s(sk, entry, sizeof(*entry)); in mptcp_free_local_addr_list() 29 struct mptcp_pm_addr_entry *entry, in mptcp_userspace_pm_append_new_local_addr() argument 44 addr_match = mptcp_addresses_equal(&e->addr, &entry->addr, true); in mptcp_userspace_pm_append_new_local_addr() 45 if (addr_match && entry->addr.id == 0 && needs_id) in mptcp_userspace_pm_append_new_local_addr() 46 entry->addr.id = e->addr.id; in mptcp_userspace_pm_append_new_local_addr() 47 id_match = (e->addr.id == entry->addr.id); in mptcp_userspace_pm_append_new_local_addr() 67 *e = *entry; in mptcp_userspace_pm_append_new_local_addr() 76 ret = entry->addr.id; in mptcp_userspace_pm_append_new_local_addr() [all …]
|
/net/dccp/ |
D | feat.c | 279 static void dccp_feat_print_entry(struct dccp_feat_entry const *entry) in dccp_feat_print_entry() argument 281 dccp_debug(" * %s %s = ", entry->is_local ? "local" : "remote", in dccp_feat_print_entry() 282 dccp_feat_fname(entry->feat_num)); in dccp_feat_print_entry() 283 dccp_feat_printval(entry->feat_num, &entry->val); in dccp_feat_print_entry() 284 dccp_pr_debug_cat(", state=%s %s\n", dccp_feat_sname[entry->state], in dccp_feat_print_entry() 285 entry->needs_confirm ? "(Confirm pending)" : ""); in dccp_feat_print_entry() 411 static void dccp_feat_entry_destructor(struct dccp_feat_entry *entry) in dccp_feat_entry_destructor() argument 413 if (entry != NULL) { in dccp_feat_entry_destructor() 414 dccp_feat_val_destructor(entry->feat_num, &entry->val); in dccp_feat_entry_destructor() 415 kfree(entry); in dccp_feat_entry_destructor() [all …]
|
/net/sched/ |
D | act_gate.c | 176 static int fill_gate_entry(struct nlattr **tb, struct tcfg_gate_entry *entry, in fill_gate_entry() argument 181 entry->gate_state = nla_get_flag(tb[TCA_GATE_ENTRY_GATE]); in fill_gate_entry() 191 entry->interval = interval; in fill_gate_entry() 194 entry->ipv = nla_get_s32(tb[TCA_GATE_ENTRY_IPV]); in fill_gate_entry() 196 entry->ipv = -1; in fill_gate_entry() 199 entry->maxoctets = nla_get_s32(tb[TCA_GATE_ENTRY_MAX_OCTETS]); in fill_gate_entry() 201 entry->maxoctets = -1; in fill_gate_entry() 206 static int parse_gate_entry(struct nlattr *n, struct tcfg_gate_entry *entry, in parse_gate_entry() argument 218 entry->index = index; in parse_gate_entry() 220 return fill_gate_entry(tb, entry, extack); in parse_gate_entry() [all …]
|
D | sch_ingress.c | 82 struct bpf_mprog_entry *entry; in ingress_init() local 91 entry = tcx_entry_fetch_or_create(dev, true, &created); in ingress_init() 92 if (!entry) in ingress_init() 94 tcx_miniq_inc(entry); in ingress_init() 95 mini_qdisc_pair_init(&q->miniqp, sch, &tcx_entry(entry)->miniq); in ingress_init() 97 tcx_entry_update(dev, entry, true); in ingress_init() 116 struct bpf_mprog_entry *entry = rtnl_dereference(dev->tcx_ingress); in ingress_destroy() local 123 if (entry) { in ingress_destroy() 124 tcx_miniq_dec(entry); in ingress_destroy() 125 if (!tcx_entry_is_active(entry)) { in ingress_destroy() [all …]
|
D | sch_taprio.c | 117 struct sched_entry *entry, *cur; in taprio_calculate_gate_durations() local 120 list_for_each_entry(entry, &sched->entries, list) { in taprio_calculate_gate_durations() 121 u32 gates_still_open = entry->gate_mask; in taprio_calculate_gate_durations() 127 cur = entry; in taprio_calculate_gate_durations() 138 entry->gate_duration[tc] += cur->interval; in taprio_calculate_gate_durations() 144 } while (cur != entry); in taprio_calculate_gate_durations() 151 if (entry->gate_duration[tc] && in taprio_calculate_gate_durations() 152 sched->max_open_gate_duration[tc] < entry->gate_duration[tc]) in taprio_calculate_gate_durations() 153 sched->max_open_gate_duration[tc] = entry->gate_duration[tc]; in taprio_calculate_gate_durations() 158 struct sched_entry *entry, int tc) in taprio_entry_allows_tx() argument [all …]
|
/net/ipv6/ |
D | calipso.c | 101 static void calipso_cache_entry_free(struct calipso_map_cache_entry *entry) in calipso_cache_entry_free() argument 103 if (entry->lsm_data) in calipso_cache_entry_free() 104 netlbl_secattr_cache_free(entry->lsm_data); in calipso_cache_entry_free() 105 kfree(entry->key); in calipso_cache_entry_free() 106 kfree(entry); in calipso_cache_entry_free() 161 struct calipso_map_cache_entry *entry, *tmp_entry; in calipso_cache_invalidate() local 166 list_for_each_entry_safe(entry, in calipso_cache_invalidate() 169 list_del(&entry->list); in calipso_cache_invalidate() 170 calipso_cache_entry_free(entry); in calipso_cache_invalidate() 204 struct calipso_map_cache_entry *entry; in calipso_cache_check() local [all …]
|
/net/appletalk/ |
D | aarp.c | 484 struct aarp_entry *entry; in aarp_proxy_probe_network() local 499 entry = aarp_alloc(); in aarp_proxy_probe_network() 501 if (!entry) in aarp_proxy_probe_network() 504 entry->expires_at = -1; in aarp_proxy_probe_network() 505 entry->status = ATIF_PROBE; in aarp_proxy_probe_network() 506 entry->target_addr.s_node = sa->s_node; in aarp_proxy_probe_network() 507 entry->target_addr.s_net = sa->s_net; in aarp_proxy_probe_network() 508 entry->dev = atif->dev; in aarp_proxy_probe_network() 513 entry->next = proxies[hash]; in aarp_proxy_probe_network() 514 proxies[hash] = entry; in aarp_proxy_probe_network() [all …]
|
/net/mac80211/ |
D | mesh_pathtbl.c | 47 struct ieee80211_mesh_fast_tx *entry = ptr; in __mesh_fast_tx_entry_free() local 49 kfree_rcu(entry, fast_tx.rcu_head); in __mesh_fast_tx_entry_free() 421 struct ieee80211_mesh_fast_tx *entry) in mesh_fast_tx_entry_free() argument 423 hlist_del_rcu(&entry->walk_list); in mesh_fast_tx_entry_free() 424 rhashtable_remove_fast(&cache->rht, &entry->rhash, fast_tx_rht_params); in mesh_fast_tx_entry_free() 425 kfree_rcu(entry, fast_tx.rcu_head); in mesh_fast_tx_entry_free() 432 struct ieee80211_mesh_fast_tx *entry; in mesh_fast_tx_get() local 436 entry = rhashtable_lookup(&cache->rht, key, fast_tx_rht_params); in mesh_fast_tx_get() 437 if (!entry) in mesh_fast_tx_get() 440 if (!(entry->mpath->flags & MESH_PATH_ACTIVE) || in mesh_fast_tx_get() [all …]
|
/net/bluetooth/ |
D | 6lowpan.c | 200 struct lowpan_btle_dev *entry; in lookup_peer() local 205 list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) { in lookup_peer() 206 peer = __peer_lookup_conn(entry, conn); in lookup_peer() 218 struct lowpan_btle_dev *entry; in lookup_dev() local 223 list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) { in lookup_dev() 224 if (conn->hcon->hdev == entry->hdev) { in lookup_dev() 225 dev = entry; in lookup_dev() 462 struct lowpan_btle_dev *entry; in send_mcast_pkt() local 467 list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) { in send_mcast_pkt() 471 if (entry->netdev != netdev) in send_mcast_pkt() [all …]
|
D | hci_codec.c | 15 struct codec_list *entry; in hci_codec_list_add() local 17 entry = kzalloc(sizeof(*entry) + len, GFP_KERNEL); in hci_codec_list_add() 18 if (!entry) in hci_codec_list_add() 21 entry->id = sent->id; in hci_codec_list_add() 23 entry->cid = __le16_to_cpu(sent->cid); in hci_codec_list_add() 24 entry->vid = __le16_to_cpu(sent->vid); in hci_codec_list_add() 26 entry->transport = sent->transport; in hci_codec_list_add() 27 entry->len = len; in hci_codec_list_add() 28 entry->num_caps = 0; in hci_codec_list_add() 30 entry->num_caps = rp->num_caps; in hci_codec_list_add() [all …]
|
/net/dccp/ccids/lib/ |
D | packet_history.c | 51 struct tfrc_tx_hist_entry *entry = kmem_cache_alloc(tfrc_tx_hist_slab, gfp_any()); in tfrc_tx_hist_add() local 53 if (entry == NULL) in tfrc_tx_hist_add() 55 entry->seqno = seqno; in tfrc_tx_hist_add() 56 entry->stamp = ktime_get_real(); in tfrc_tx_hist_add() 57 entry->next = *headp; in tfrc_tx_hist_add() 58 *headp = entry; in tfrc_tx_hist_add() 97 static inline void tfrc_rx_hist_entry_from_skb(struct tfrc_rx_hist_entry *entry, in tfrc_rx_hist_entry_from_skb() argument 103 entry->tfrchrx_seqno = DCCP_SKB_CB(skb)->dccpd_seq; in tfrc_rx_hist_entry_from_skb() 104 entry->tfrchrx_ccval = dh->dccph_ccval; in tfrc_rx_hist_entry_from_skb() 105 entry->tfrchrx_type = dh->dccph_type; in tfrc_rx_hist_entry_from_skb() [all …]
|