/net/l2tp/ |
D | l2tp_debugfs.c | 48 static void l2tp_dfs_next_tunnel(struct l2tp_dfs_seq_data *pd) in l2tp_dfs_next_tunnel() argument 50 pd->tunnel = l2tp_tunnel_find_nth(pd->net, pd->tunnel_idx); in l2tp_dfs_next_tunnel() 51 pd->tunnel_idx++; in l2tp_dfs_next_tunnel() 54 static void l2tp_dfs_next_session(struct l2tp_dfs_seq_data *pd) in l2tp_dfs_next_session() argument 56 pd->session = l2tp_session_get_nth(pd->tunnel, pd->session_idx, true); in l2tp_dfs_next_session() 57 pd->session_idx++; in l2tp_dfs_next_session() 59 if (pd->session == NULL) { in l2tp_dfs_next_session() 60 pd->session_idx = 0; in l2tp_dfs_next_session() 61 l2tp_dfs_next_tunnel(pd); in l2tp_dfs_next_session() 68 struct l2tp_dfs_seq_data *pd = SEQ_START_TOKEN; in l2tp_dfs_seq_start() local [all …]
|
D | l2tp_ppp.c | 1556 static void pppol2tp_next_tunnel(struct net *net, struct pppol2tp_seq_data *pd) in pppol2tp_next_tunnel() argument 1559 pd->tunnel = l2tp_tunnel_find_nth(net, pd->tunnel_idx); in pppol2tp_next_tunnel() 1560 pd->tunnel_idx++; in pppol2tp_next_tunnel() 1562 if (pd->tunnel == NULL) in pppol2tp_next_tunnel() 1566 if (pd->tunnel->version < 3) in pppol2tp_next_tunnel() 1571 static void pppol2tp_next_session(struct net *net, struct pppol2tp_seq_data *pd) in pppol2tp_next_session() argument 1573 pd->session = l2tp_session_get_nth(pd->tunnel, pd->session_idx, true); in pppol2tp_next_session() 1574 pd->session_idx++; in pppol2tp_next_session() 1576 if (pd->session == NULL) { in pppol2tp_next_session() 1577 pd->session_idx = 0; in pppol2tp_next_session() [all …]
|
/net/netfilter/ipvs/ |
D | ip_vs_proto.c | 69 struct ip_vs_proto_data *pd = in register_ip_vs_proto_netns() local 72 if (!pd) in register_ip_vs_proto_netns() 75 pd->pp = pp; /* For speed issues */ in register_ip_vs_proto_netns() 76 pd->next = ipvs->proto_data_table[hash]; in register_ip_vs_proto_netns() 77 ipvs->proto_data_table[hash] = pd; in register_ip_vs_proto_netns() 78 atomic_set(&pd->appcnt, 0); /* Init app counter */ in register_ip_vs_proto_netns() 81 int ret = pp->init_netns(ipvs, pd); in register_ip_vs_proto_netns() 84 ipvs->proto_data_table[hash] = pd->next; in register_ip_vs_proto_netns() 85 kfree(pd); in register_ip_vs_proto_netns() 118 unregister_ip_vs_proto_netns(struct netns_ipvs *ipvs, struct ip_vs_proto_data *pd) in unregister_ip_vs_proto_netns() argument [all …]
|
D | ip_vs_proto_tcp.c | 36 struct ip_vs_proto_data *pd, in tcp_conn_schedule() argument 92 *cpp = ip_vs_schedule(svc, skb, pd, &ignored, iph); in tcp_conn_schedule() 95 *verdict = ip_vs_leave(svc, skb, pd, iph); in tcp_conn_schedule() 488 static void tcp_timeout_change(struct ip_vs_proto_data *pd, int flags) in tcp_timeout_change() argument 498 pd->tcp_state_table = (on ? tcp_states_dos : tcp_states); in tcp_timeout_change() 515 set_tcp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp, in set_tcp_state() argument 539 pd->tcp_state_table[state_off+state_idx].next_state[cp->state]; in set_tcp_state() 547 pd->pp->name, in set_tcp_state() 577 if (likely(pd)) in set_tcp_state() 578 cp->timeout = pd->timeout_table[cp->state = new_state]; in set_tcp_state() [all …]
|
D | ip_vs_proto_udp.c | 33 struct ip_vs_proto_data *pd, in udp_conn_schedule() argument 81 *cpp = ip_vs_schedule(svc, skb, pd, &ignored, iph); in udp_conn_schedule() 84 *verdict = ip_vs_leave(svc, skb, pd, iph); in udp_conn_schedule() 371 struct ip_vs_proto_data *pd = ip_vs_proto_data_get(ipvs, IPPROTO_UDP); in udp_register_app() local 382 atomic_inc(&pd->appcnt); in udp_register_app() 392 struct ip_vs_proto_data *pd = ip_vs_proto_data_get(ipvs, IPPROTO_UDP); in udp_unregister_app() local 394 atomic_dec(&pd->appcnt); in udp_unregister_app() 462 struct ip_vs_proto_data *pd) in udp_state_transition() argument 464 if (unlikely(!pd)) { in udp_state_transition() 469 cp->timeout = pd->timeout_table[IP_VS_UDP_S_NORMAL]; in udp_state_transition() [all …]
|
D | ip_vs_proto_sctp.c | 13 struct ip_vs_proto_data *pd, in sctp_conn_schedule() argument 65 *cpp = ip_vs_schedule(svc, skb, pd, &ignored, iph); in sctp_conn_schedule() 68 *verdict = ip_vs_leave(svc, skb, pd, iph); in sctp_conn_schedule() 378 set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp, in set_sctp_state() argument 441 pd->pp->name, in set_sctp_state() 465 if (likely(pd)) in set_sctp_state() 466 cp->timeout = pd->timeout_table[cp->state = next_state]; in set_sctp_state() 473 const struct sk_buff *skb, struct ip_vs_proto_data *pd) in sctp_state_transition() argument 476 set_sctp_state(pd, cp, direction, skb); in sctp_state_transition() 492 struct ip_vs_proto_data *pd = ip_vs_proto_data_get(ipvs, IPPROTO_SCTP); in sctp_register_app() local [all …]
|
D | ip_vs_core.c | 206 struct ip_vs_proto_data *pd) in ip_vs_set_state() argument 208 if (likely(pd->pp->state_transition)) in ip_vs_set_state() 209 pd->pp->state_transition(cp, direction, skb, pd); in ip_vs_set_state() 425 struct ip_vs_proto_data *pd, int *ignored, in ip_vs_schedule() argument 428 struct ip_vs_protocol *pp = pd->pp; in ip_vs_schedule() 567 struct ip_vs_proto_data *pd, struct ip_vs_iphdr *iph) in ip_vs_leave() argument 609 ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd); in ip_vs_leave() 612 ret = cp->packet_xmit(skb, cp, pd->pp, iph); in ip_vs_leave() 1248 handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, in handle_response() argument 1252 struct ip_vs_protocol *pp = pd->pp; in handle_response() [all …]
|
D | ip_vs_conn.c | 664 struct ip_vs_proto_data *pd; in ip_vs_try_bind_dest() local 690 pd = ip_vs_proto_data_get(cp->ipvs, cp->protocol); in ip_vs_try_bind_dest() 691 if (pd && atomic_read(&pd->appcnt)) in ip_vs_try_bind_dest() 692 ip_vs_bind_app(cp, pd->pp); in ip_vs_try_bind_dest() 902 struct ip_vs_proto_data *pd = ip_vs_proto_data_get(p->ipvs, in ip_vs_conn_new() local 979 if (unlikely(pd && atomic_read(&pd->appcnt))) in ip_vs_conn_new() 980 ip_vs_bind_app(cp, pd->pp); in ip_vs_conn_new()
|
D | ip_vs_proto_ah_esp.c | 108 struct ip_vs_proto_data *pd, in ah_esp_conn_schedule() argument
|
D | ip_vs_ctl.c | 2253 struct ip_vs_proto_data *pd; in ip_vs_set_timeout() local 2263 pd = ip_vs_proto_data_get(ipvs, IPPROTO_TCP); in ip_vs_set_timeout() 2264 pd->timeout_table[IP_VS_TCP_S_ESTABLISHED] in ip_vs_set_timeout() 2269 pd = ip_vs_proto_data_get(ipvs, IPPROTO_TCP); in ip_vs_set_timeout() 2270 pd->timeout_table[IP_VS_TCP_S_FIN_WAIT] in ip_vs_set_timeout() 2277 pd = ip_vs_proto_data_get(ipvs, IPPROTO_UDP); in ip_vs_set_timeout() 2278 pd->timeout_table[IP_VS_UDP_S_NORMAL] in ip_vs_set_timeout() 2635 struct ip_vs_proto_data *pd; in __ip_vs_get_timeouts() local 2641 pd = ip_vs_proto_data_get(ipvs, IPPROTO_TCP); in __ip_vs_get_timeouts() 2642 u->tcp_timeout = pd->timeout_table[IP_VS_TCP_S_ESTABLISHED] / HZ; in __ip_vs_get_timeouts() [all …]
|
D | ip_vs_sync.c | 942 struct ip_vs_proto_data *pd; in ip_vs_proc_conn() local 944 pd = ip_vs_proto_data_get(ipvs, protocol); in ip_vs_proc_conn() 945 if (!(flags & IP_VS_CONN_F_TEMPLATE) && pd && pd->timeout_table) in ip_vs_proc_conn() 946 cp->timeout = pd->timeout_table[state]; in ip_vs_proc_conn()
|
/net/dsa/ |
D | dsa.c | 466 struct dsa_chip_data *cd = dst->pd->chip + index; in dsa_switch_setup() 651 static int dsa_of_setup_routing_table(struct dsa_platform_data *pd, in dsa_of_setup_routing_table() argument 675 if (link_sw_addr >= pd->nr_chips) in dsa_of_setup_routing_table() 683 static int dsa_of_probe_links(struct dsa_platform_data *pd, in dsa_of_probe_links() argument 698 if (!strcmp(port_name, "dsa") && pd->nr_chips > 1) { in dsa_of_probe_links() 699 ret = dsa_of_setup_routing_table(pd, cd, chip_index, in dsa_of_probe_links() 708 static void dsa_of_free_platform_data(struct dsa_platform_data *pd) in dsa_of_free_platform_data() argument 713 for (i = 0; i < pd->nr_chips; i++) { in dsa_of_free_platform_data() 716 kfree(pd->chip[i].port_names[port_index]); in dsa_of_free_platform_data() 721 if (pd->chip[i].host_dev) in dsa_of_free_platform_data() [all …]
|
/net/9p/ |
D | trans_rdma.c | 94 struct ib_pd *pd; member 363 if (rdma->pd && !IS_ERR(rdma->pd)) in rdma_destroy_trans() 364 ib_dealloc_pd(rdma->pd); in rdma_destroy_trans() 392 sge.lkey = rdma->pd->local_dma_lkey; in post_recv() 484 sge.lkey = rdma->pd->local_dma_lkey; in rdma_request() 683 rdma->pd = ib_alloc_pd(rdma->cm_id->device, 0); in rdma_create_trans() 684 if (IS_ERR(rdma->pd)) in rdma_create_trans() 699 err = rdma_create_qp(rdma->cm_id, rdma->pd, &qp_attr); in rdma_create_trans()
|
/net/rds/ |
D | ib.c | 106 if (rds_ibdev->pd) in rds_ib_dev_free() 107 ib_dealloc_pd(rds_ibdev->pd); in rds_ib_dev_free() 163 rds_ibdev->pd = ib_alloc_pd(device, 0); in rds_ib_add_one() 164 if (IS_ERR(rds_ibdev->pd)) { in rds_ib_add_one() 165 rds_ibdev->pd = NULL; in rds_ib_add_one()
|
D | ib_fmr.c | 59 fmr->fmr = ib_alloc_fmr(rds_ibdev->pd, in rds_ib_alloc_fmr()
|
D | ib.h | 206 struct ib_pd *pd; member
|
D | ib_frmr.c | 60 frmr->mr = ib_alloc_mr(rds_ibdev->pd, IB_MR_TYPE_MEM_REG, in rds_ib_alloc_frmr()
|
D | ib_cm.c | 397 ic->i_pd = rds_ibdev->pd; in rds_ib_setup_qp()
|
/net/sctp/ |
D | ulpevent.c | 745 struct sctp_pdapi_event *pd; in sctp_ulpevent_make_pdapi() local 754 pd = (struct sctp_pdapi_event *) in sctp_ulpevent_make_pdapi() 763 pd->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT; in sctp_ulpevent_make_pdapi() 764 pd->pdapi_flags = 0; in sctp_ulpevent_make_pdapi() 772 pd->pdapi_length = sizeof(struct sctp_pdapi_event); in sctp_ulpevent_make_pdapi() 778 pd->pdapi_indication = indication; in sctp_ulpevent_make_pdapi() 785 pd->pdapi_assoc_id = sctp_assoc2id(asoc); in sctp_ulpevent_make_pdapi()
|
/net/ipv4/ |
D | fou.c | 89 __be16 *pd = data; in gue_remcsum() local 90 size_t start = ntohs(pd[0]); in gue_remcsum() 91 size_t offset = ntohs(pd[1]); in gue_remcsum() 289 __be16 *pd = data; in gue_gro_remcsum() local 290 size_t start = ntohs(pd[0]); in gue_gro_remcsum() 291 size_t offset = ntohs(pd[1]); in gue_gro_remcsum() 960 __be16 *pd = data; in __gue_build_header() local 966 pd[0] = htons(csum_start); in __gue_build_header() 967 pd[1] = htons(csum_start + skb->csum_offset); in __gue_build_header()
|