Home
last modified time | relevance | path

Searched refs:pp (Results 1 – 25 of 36) sorted by relevance

12

/net/netfilter/ipvs/
Dip_vs_proto.c49 static int __used __init register_ip_vs_protocol(struct ip_vs_protocol *pp) in register_ip_vs_protocol() argument
51 unsigned int hash = IP_VS_PROTO_HASH(pp->protocol); in register_ip_vs_protocol()
53 pp->next = ip_vs_proto_table[hash]; in register_ip_vs_protocol()
54 ip_vs_proto_table[hash] = pp; in register_ip_vs_protocol()
56 if (pp->init != NULL) in register_ip_vs_protocol()
57 pp->init(pp); in register_ip_vs_protocol()
66 register_ip_vs_proto_netns(struct netns_ipvs *ipvs, struct ip_vs_protocol *pp) in register_ip_vs_proto_netns() argument
68 unsigned int hash = IP_VS_PROTO_HASH(pp->protocol); in register_ip_vs_proto_netns()
75 pd->pp = pp; /* For speed issues */ in register_ip_vs_proto_netns()
80 if (pp->init_netns != NULL) { in register_ip_vs_proto_netns()
[all …]
Dip_vs_core.c73 tcp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
79 udp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
237 if (likely(pd->pp->state_transition)) in ip_vs_set_state()
238 pd->pp->state_transition(cp, direction, skb, pd); in ip_vs_set_state()
457 struct ip_vs_protocol *pp = pd->pp; in ip_vs_schedule() local
492 IP_VS_DBG_PKT(12, svc->af, pp, skb, iph->off, in ip_vs_schedule()
502 cp = INDIRECT_CALL_1(pp->conn_in_get, in ip_vs_schedule()
508 IP_VS_DBG_PKT(12, svc->af, pp, skb, iph->off, in ip_vs_schedule()
643 ret = cp->packet_xmit(skb, cp, pd->pp, iph); in ip_vs_leave()
766 void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp, in ip_vs_nat_icmp() argument
[all …]
Dip_vs_app.c76 struct ip_vs_protocol *pp; in ip_vs_app_inc_new() local
80 if (!(pp = ip_vs_proto_get(proto))) in ip_vs_app_inc_new()
83 if (!pp->unregister_app) in ip_vs_app_inc_new()
105 ret = pp->register_app(ipvs, inc); in ip_vs_app_inc_new()
111 pp->name, inc->name, ntohs(inc->port)); in ip_vs_app_inc_new()
127 struct ip_vs_protocol *pp; in ip_vs_app_inc_release() local
129 if (!(pp = ip_vs_proto_get(inc->protocol))) in ip_vs_app_inc_release()
132 if (pp->unregister_app) in ip_vs_app_inc_release()
133 pp->unregister_app(ipvs, inc); in ip_vs_app_inc_release()
136 pp->name, inc->name, ntohs(inc->port)); in ip_vs_app_inc_release()
[all …]
Dip_vs_xmit.c691 struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh) in ip_vs_null_xmit() argument
705 struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh) in ip_vs_bypass_xmit() argument
734 struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh) in ip_vs_bypass_xmit_v6() argument
766 struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh) in ip_vs_nat_xmit() argument
802 IP_VS_DBG_RL_PKT(10, AF_INET, pp, skb, ipvsh->off, in ip_vs_nat_xmit()
812 IP_VS_DBG_RL_PKT(1, AF_INET, pp, skb, ipvsh->off, in ip_vs_nat_xmit()
826 if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp, ipvsh)) in ip_vs_nat_xmit()
831 IP_VS_DBG_PKT(10, AF_INET, pp, skb, ipvsh->off, "After DNAT"); in ip_vs_nat_xmit()
854 struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh) in ip_vs_nat_xmit_v6() argument
890 IP_VS_DBG_RL_PKT(10, AF_INET6, pp, skb, ipvsh->off, in ip_vs_nat_xmit_v6()
[all …]
Dip_vs_proto_udp.c28 udp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp);
136 udp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp, in udp_snat_handler() argument
158 if (!udp_csum_check(cp->af, skb, pp)) in udp_snat_handler()
211 pp->name, udph->check, in udp_snat_handler()
219 udp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp, in udp_dnat_handler() argument
241 if (!udp_csum_check(cp->af, skb, pp)) in udp_dnat_handler()
300 udp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp) in udp_csum_check() argument
330 IP_VS_DBG_RL_PKT(0, af, pp, skb, 0, in udp_csum_check()
341 IP_VS_DBG_RL_PKT(0, af, pp, skb, 0, in udp_csum_check()
Dip_vs_proto_tcp.c32 tcp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp);
147 tcp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp, in tcp_snat_handler() argument
169 if (!tcp_csum_check(cp->af, skb, pp)) in tcp_snat_handler()
217 pp->name, tcph->check, in tcp_snat_handler()
225 tcp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp, in tcp_dnat_handler() argument
247 if (!tcp_csum_check(cp->af, skb, pp)) in tcp_dnat_handler()
304 tcp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp) in tcp_csum_check() argument
327 IP_VS_DBG_RL_PKT(0, af, pp, skb, 0, in tcp_csum_check()
338 IP_VS_DBG_RL_PKT(0, af, pp, skb, 0, in tcp_csum_check()
544 pd->pp->name, in set_tcp_state()
Dip_vs_proto_sctp.c13 sctp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp);
91 sctp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp, in sctp_snat_handler() argument
111 if (!sctp_csum_check(cp->af, skb, pp)) in sctp_snat_handler()
138 sctp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp, in sctp_dnat_handler() argument
158 if (!sctp_csum_check(cp->af, skb, pp)) in sctp_dnat_handler()
186 sctp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp) in sctp_csum_check() argument
205 IP_VS_DBG_RL_PKT(0, af, pp, skb, 0, in sctp_csum_check()
441 pd->pp->name, in set_sctp_state()
/net/rxrpc/
Dcall_accept.c107 struct rb_node *parent, **pp; in rxrpc_service_prealloc_one() local
110 pp = &rx->calls.rb_node; in rxrpc_service_prealloc_one()
112 while (*pp) { in rxrpc_service_prealloc_one()
113 parent = *pp; in rxrpc_service_prealloc_one()
116 pp = &(*pp)->rb_left; in rxrpc_service_prealloc_one()
118 pp = &(*pp)->rb_right; in rxrpc_service_prealloc_one()
128 rb_link_node(&call->sock_node, parent, pp); in rxrpc_service_prealloc_one()
467 struct rb_node *parent, **pp; in rxrpc_accept_call() local
484 pp = &rx->calls.rb_node; in rxrpc_accept_call()
486 while (*pp) { in rxrpc_accept_call()
[all …]
Dconn_service.c68 struct rb_node **pp, *parent; in rxrpc_publish_service_conn() local
72 pp = &peer->service_conns.rb_node; in rxrpc_publish_service_conn()
74 while (*pp) { in rxrpc_publish_service_conn()
75 parent = *pp; in rxrpc_publish_service_conn()
80 pp = &(*pp)->rb_left; in rxrpc_publish_service_conn()
82 pp = &(*pp)->rb_right; in rxrpc_publish_service_conn()
87 rb_link_node_rcu(&conn->service_node, parent, pp); in rxrpc_publish_service_conn()
Dcall_object.c227 struct rb_node *parent, **pp; in rxrpc_new_client_call() local
255 pp = &rx->calls.rb_node; in rxrpc_new_client_call()
257 while (*pp) { in rxrpc_new_client_call()
258 parent = *pp; in rxrpc_new_client_call()
262 pp = &(*pp)->rb_left; in rxrpc_new_client_call()
264 pp = &(*pp)->rb_right; in rxrpc_new_client_call()
273 rb_link_node(&call->sock_node, parent, pp); in rxrpc_new_client_call()
Dconn_client.c283 struct rb_node *p, **pp, *parent; in rxrpc_get_client_conn() local
370 pp = &local->client_conns.rb_node; in rxrpc_get_client_conn()
372 while (*pp) { in rxrpc_get_client_conn()
373 parent = *pp; in rxrpc_get_client_conn()
383 pp = &(*pp)->rb_left; in rxrpc_get_client_conn()
385 pp = &(*pp)->rb_right; in rxrpc_get_client_conn()
402 rb_link_node(&candidate->client_node, parent, pp); in rxrpc_get_client_conn()
/net/ipv4/
Dinetpeer.c106 struct rb_node **pp, *parent, *next; in lookup() local
109 pp = &base->rb_root.rb_node; in lookup()
114 next = rcu_dereference_raw(*pp); in lookup()
132 pp = &next->rb_left; in lookup()
134 pp = &next->rb_right; in lookup()
137 *pp_p = pp; in lookup()
187 struct rb_node **pp, *parent; in inet_getpeer() local
196 p = lookup(daddr, base, seq, NULL, &gc_cnt, &parent, &pp); in inet_getpeer()
214 p = lookup(daddr, base, seq, gc_stack, &gc_cnt, &parent, &pp); in inet_getpeer()
230 rb_link_node(&p->rb_node, parent, pp); in inet_getpeer()
Dudp_offload.c353 struct sk_buff *pp = NULL; in udp_gro_receive_segment() local
395 pp = p; in udp_gro_receive_segment()
397 return pp; in udp_gro_receive_segment()
409 struct sk_buff *pp = NULL; in udp_gro_receive() local
423 pp = call_gro_receive(udp_gro_receive_segment, head, skb); in udp_gro_receive()
425 return pp; in udp_gro_receive()
458 pp = call_gro_receive_sk(udp_sk(sk)->gro_receive, sk, head, skb); in udp_gro_receive()
462 skb_gro_flush_final(skb, pp, flush); in udp_gro_receive()
463 return pp; in udp_gro_receive()
Dtcp_metrics.c878 struct tcp_metrics_block __rcu **pp; in tcp_metrics_flush_all() local
882 pp = &hb->chain; in tcp_metrics_flush_all()
883 for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) { in tcp_metrics_flush_all()
887 *pp = tm->tcpm_next; in tcp_metrics_flush_all()
890 pp = &tm->tcpm_next; in tcp_metrics_flush_all()
901 struct tcp_metrics_block __rcu **pp; in tcp_metrics_nl_cmd_del() local
922 pp = &hb->chain; in tcp_metrics_nl_cmd_del()
924 for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) { in tcp_metrics_nl_cmd_del()
928 *pp = tm->tcpm_next; in tcp_metrics_nl_cmd_del()
932 pp = &tm->tcpm_next; in tcp_metrics_nl_cmd_del()
Dgre_offload.c110 struct sk_buff *pp = NULL; in gre_gro_receive() local
216 pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb); in gre_gro_receive()
222 skb_gro_flush_final(skb, pp, flush); in gre_gro_receive()
224 return pp; in gre_gro_receive()
Dnexthop.c138 struct rb_node **pp, *parent = NULL, *next; in nexthop_find_by_id() local
140 pp = &net->nexthop.rb_root.rb_node; in nexthop_find_by_id()
144 next = rcu_dereference_raw(*pp); in nexthop_find_by_id()
151 pp = &next->rb_left; in nexthop_find_by_id()
153 pp = &next->rb_right; in nexthop_find_by_id()
996 struct rb_node **pp, *parent = NULL, *next; in insert_nexthop() local
1004 pp = &root->rb_node; in insert_nexthop()
1008 next = rtnl_dereference(*pp); in insert_nexthop()
1016 pp = &next->rb_left; in insert_nexthop()
1018 pp = &next->rb_right; in insert_nexthop()
[all …]
/net/netfilter/
Dcore.c185 int nf_hook_entries_insert_raw(struct nf_hook_entries __rcu **pp, in nf_hook_entries_insert_raw() argument
191 p = rcu_dereference_raw(*pp); in nf_hook_entries_insert_raw()
198 rcu_assign_pointer(*pp, new_hooks); in nf_hook_entries_insert_raw()
223 struct nf_hook_entries __rcu **pp) in __nf_hook_entries_try_shrink() argument
262 rcu_assign_pointer(*pp, new); in __nf_hook_entries_try_shrink()
318 struct nf_hook_entries __rcu **pp; in __nf_register_net_hook() local
330 pp = nf_hook_entry_head(net, pf, reg->hooknum, reg->dev); in __nf_register_net_hook()
331 if (!pp) in __nf_register_net_hook()
336 p = nf_entry_dereference(*pp); in __nf_register_net_hook()
340 rcu_assign_pointer(*pp, new_hooks); in __nf_register_net_hook()
[all …]
Dxt_ipvs.c56 struct ip_vs_protocol *pp; in ipvs_mt() local
81 pp = ip_vs_proto_get(iph.protocol); in ipvs_mt()
82 if (unlikely(!pp)) { in ipvs_mt()
90 cp = pp->conn_out_get(ipvs, family, skb, &iph); in ipvs_mt()
Dnf_internals.h16 void nf_hook_entries_delete_raw(struct nf_hook_entries __rcu **pp,
18 int nf_hook_entries_insert_raw(struct nf_hook_entries __rcu **pp,
/net/phonet/
Daf_phonet.c29 const struct phonet_protocol *pp; in phonet_proto_get() local
35 pp = rcu_dereference(proto_tab[protocol]); in phonet_proto_get()
36 if (pp && !try_module_get(pp->prot->owner)) in phonet_proto_get()
37 pp = NULL; in phonet_proto_get()
40 return pp; in phonet_proto_get()
43 static inline void phonet_proto_put(const struct phonet_protocol *pp) in phonet_proto_put() argument
45 module_put(pp->prot->owner); in phonet_proto_put()
460 const struct phonet_protocol *pp) in phonet_proto_register() argument
467 err = proto_register(pp->prot, 1); in phonet_proto_register()
475 rcu_assign_pointer(proto_tab[protocol], pp); in phonet_proto_register()
[all …]
/net/bridge/
Dbr_mdb.c144 struct net_bridge_port_group __rcu **pp; in br_mdb_fill_info() local
163 for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL; in br_mdb_fill_info()
164 pp = &p->next) { in br_mdb_fill_info()
317 struct net_bridge_port_group __rcu **pp; in br_mdb_complete() local
330 for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL; in br_mdb_complete()
331 pp = &p->next) { in br_mdb_complete()
607 struct net_bridge_port_group __rcu **pp; in br_mdb_add_group() local
632 for (pp = &mp->ports; in br_mdb_add_group()
633 (p = mlock_dereference(*pp, br)) != NULL; in br_mdb_add_group()
634 pp = &p->next) { in br_mdb_add_group()
[all …]
Dbr_multicast.c171 struct net_bridge_port_group __rcu **pp; in br_multicast_del_pg() local
177 for (pp = &mp->ports; in br_multicast_del_pg()
178 (p = mlock_dereference(*pp, br)) != NULL; in br_multicast_del_pg()
179 pp = &p->next) { in br_multicast_del_pg()
183 rcu_assign_pointer(*pp, p->next); in br_multicast_del_pg()
540 struct net_bridge_port_group __rcu **pp; in br_multicast_add_group() local
561 for (pp = &mp->ports; in br_multicast_add_group()
562 (p = mlock_dereference(*pp, br)) != NULL; in br_multicast_add_group()
563 pp = &p->next) { in br_multicast_add_group()
570 p = br_multicast_new_port_group(port, group, *pp, 0, src); in br_multicast_add_group()
[all …]
/net/ethernet/
Deth.c457 struct sk_buff *pp = NULL; in eth_gro_receive() local
496 pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb); in eth_gro_receive()
501 skb_gro_flush_final(skb, pp, flush); in eth_gro_receive()
503 return pp; in eth_gro_receive()
/net/ipv6/
Dip6_offload.c188 struct sk_buff *pp = NULL; in ipv6_gro_receive() local
280 pp = indirect_call_gro_receive_l4(tcp6_gro_receive, udp6_gro_receive, in ipv6_gro_receive()
287 skb_gro_flush_final(skb, pp, flush); in ipv6_gro_receive()
289 return pp; in ipv6_gro_receive()
/net/sctp/
Dsocket.c8161 struct sctp_bind_bucket *pp; in sctp_get_port_local() local
8190 sctp_for_each_hentry(pp, &head->chain) in sctp_get_port_local()
8191 if ((pp->port == rover) && in sctp_get_port_local()
8192 net_eq(sock_net(sk), pp->net)) in sctp_get_port_local()
8218 sctp_for_each_hentry(pp, &head->chain) { in sctp_get_port_local()
8219 if ((pp->port == snum) && net_eq(pp->net, sock_net(sk))) in sctp_get_port_local()
8223 pp = NULL; in sctp_get_port_local()
8226 if (!hlist_empty(&pp->owner)) { in sctp_get_port_local()
8236 if ((pp->fastreuse && reuse && in sctp_get_port_local()
8238 (pp->fastreuseport && sk->sk_reuseport && in sctp_get_port_local()
[all …]

12