/net/l2tp/ |
D | l2tp_debugfs.c | 42 static void l2tp_dfs_next_tunnel(struct l2tp_dfs_seq_data *pd) in l2tp_dfs_next_tunnel() argument 45 if (pd->tunnel) in l2tp_dfs_next_tunnel() 46 l2tp_tunnel_dec_refcount(pd->tunnel); in l2tp_dfs_next_tunnel() 48 pd->tunnel = l2tp_tunnel_get_nth(pd->net, pd->tunnel_idx); in l2tp_dfs_next_tunnel() 49 pd->tunnel_idx++; in l2tp_dfs_next_tunnel() 52 static void l2tp_dfs_next_session(struct l2tp_dfs_seq_data *pd) in l2tp_dfs_next_session() argument 55 if (pd->session) in l2tp_dfs_next_session() 56 l2tp_session_dec_refcount(pd->session); in l2tp_dfs_next_session() 58 pd->session = l2tp_session_get_nth(pd->tunnel, pd->session_idx); in l2tp_dfs_next_session() 59 pd->session_idx++; in l2tp_dfs_next_session() [all …]
|
D | l2tp_ppp.c | 1420 static void pppol2tp_next_tunnel(struct net *net, struct pppol2tp_seq_data *pd) in pppol2tp_next_tunnel() argument 1423 if (pd->tunnel) in pppol2tp_next_tunnel() 1424 l2tp_tunnel_dec_refcount(pd->tunnel); in pppol2tp_next_tunnel() 1427 pd->tunnel = l2tp_tunnel_get_nth(net, pd->tunnel_idx); in pppol2tp_next_tunnel() 1428 pd->tunnel_idx++; in pppol2tp_next_tunnel() 1431 if (!pd->tunnel || pd->tunnel->version == 2) in pppol2tp_next_tunnel() 1434 l2tp_tunnel_dec_refcount(pd->tunnel); in pppol2tp_next_tunnel() 1438 static void pppol2tp_next_session(struct net *net, struct pppol2tp_seq_data *pd) in pppol2tp_next_session() argument 1441 if (pd->session) in pppol2tp_next_session() 1442 l2tp_session_dec_refcount(pd->session); in pppol2tp_next_session() [all …]
|
/net/netfilter/ipvs/ |
D | ip_vs_proto.c | 69 struct ip_vs_proto_data *pd = in register_ip_vs_proto_netns() local 72 if (!pd) in register_ip_vs_proto_netns() 75 pd->pp = pp; /* For speed issues */ in register_ip_vs_proto_netns() 76 pd->next = ipvs->proto_data_table[hash]; in register_ip_vs_proto_netns() 77 ipvs->proto_data_table[hash] = pd; in register_ip_vs_proto_netns() 78 atomic_set(&pd->appcnt, 0); /* Init app counter */ in register_ip_vs_proto_netns() 81 int ret = pp->init_netns(ipvs, pd); in register_ip_vs_proto_netns() 84 ipvs->proto_data_table[hash] = pd->next; in register_ip_vs_proto_netns() 85 kfree(pd); in register_ip_vs_proto_netns() 118 unregister_ip_vs_proto_netns(struct netns_ipvs *ipvs, struct ip_vs_proto_data *pd) in unregister_ip_vs_proto_netns() argument [all …]
|
D | ip_vs_proto_tcp.c | 36 struct ip_vs_proto_data *pd, in tcp_conn_schedule() argument 90 *cpp = ip_vs_schedule(svc, skb, pd, &ignored, iph); in tcp_conn_schedule() 93 *verdict = ip_vs_leave(svc, skb, pd, iph); in tcp_conn_schedule() 485 static void tcp_timeout_change(struct ip_vs_proto_data *pd, int flags) in tcp_timeout_change() argument 495 pd->tcp_state_table = (on ? tcp_states_dos : tcp_states); in tcp_timeout_change() 512 set_tcp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp, in set_tcp_state() argument 536 pd->tcp_state_table[state_off+state_idx].next_state[cp->state]; in set_tcp_state() 544 pd->pp->name, in set_tcp_state() 578 if (likely(pd)) in set_tcp_state() 579 cp->timeout = pd->timeout_table[cp->state = new_state]; in set_tcp_state() [all …]
|
D | ip_vs_proto_udp.c | 32 struct ip_vs_proto_data *pd, in udp_conn_schedule() argument 78 *cpp = ip_vs_schedule(svc, skb, pd, &ignored, iph); in udp_conn_schedule() 81 *verdict = ip_vs_leave(svc, skb, pd, iph); in udp_conn_schedule() 367 struct ip_vs_proto_data *pd = ip_vs_proto_data_get(ipvs, IPPROTO_UDP); in udp_register_app() local 378 atomic_inc(&pd->appcnt); in udp_register_app() 388 struct ip_vs_proto_data *pd = ip_vs_proto_data_get(ipvs, IPPROTO_UDP); in udp_unregister_app() local 390 atomic_dec(&pd->appcnt); in udp_unregister_app() 454 struct ip_vs_proto_data *pd) in udp_state_transition() argument 456 if (unlikely(!pd)) { in udp_state_transition() 461 cp->timeout = pd->timeout_table[IP_VS_UDP_S_NORMAL]; in udp_state_transition() [all …]
|
D | ip_vs_proto_sctp.c | 17 struct ip_vs_proto_data *pd, in sctp_conn_schedule() argument 70 *cpp = ip_vs_schedule(svc, skb, pd, &ignored, iph); in sctp_conn_schedule() 73 *verdict = ip_vs_leave(svc, skb, pd, iph); in sctp_conn_schedule() 378 set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp, in set_sctp_state() argument 441 pd->pp->name, in set_sctp_state() 467 if (likely(pd)) in set_sctp_state() 468 cp->timeout = pd->timeout_table[cp->state = next_state]; in set_sctp_state() 475 const struct sk_buff *skb, struct ip_vs_proto_data *pd) in sctp_state_transition() argument 478 set_sctp_state(pd, cp, direction, skb); in sctp_state_transition() 494 struct ip_vs_proto_data *pd = ip_vs_proto_data_get(ipvs, IPPROTO_SCTP); in sctp_register_app() local [all …]
|
D | ip_vs_core.c | 223 struct ip_vs_proto_data *pd) in ip_vs_set_state() argument 225 if (likely(pd->pp->state_transition)) in ip_vs_set_state() 226 pd->pp->state_transition(cp, direction, skb, pd); in ip_vs_set_state() 442 struct ip_vs_proto_data *pd, int *ignored, in ip_vs_schedule() argument 445 struct ip_vs_protocol *pp = pd->pp; in ip_vs_schedule() 586 struct ip_vs_proto_data *pd, struct ip_vs_iphdr *iph) in ip_vs_leave() argument 628 ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd); in ip_vs_leave() 631 ret = cp->packet_xmit(skb, cp, pd->pp, iph); in ip_vs_leave() 1260 handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, in handle_response() argument 1264 struct ip_vs_protocol *pp = pd->pp; in handle_response() [all …]
|
D | ip_vs_conn.c | 666 struct ip_vs_proto_data *pd; in ip_vs_try_bind_dest() local 692 pd = ip_vs_proto_data_get(cp->ipvs, cp->protocol); in ip_vs_try_bind_dest() 693 if (pd && atomic_read(&pd->appcnt)) in ip_vs_try_bind_dest() 694 ip_vs_bind_app(cp, pd->pp); in ip_vs_try_bind_dest() 947 struct ip_vs_proto_data *pd = ip_vs_proto_data_get(p->ipvs, in ip_vs_conn_new() local 1024 if (unlikely(pd && atomic_read(&pd->appcnt))) in ip_vs_conn_new() 1025 ip_vs_bind_app(cp, pd->pp); in ip_vs_conn_new()
|
D | ip_vs_ctl.c | 2343 struct ip_vs_proto_data *pd; in ip_vs_set_timeout() local 2365 pd = ip_vs_proto_data_get(ipvs, IPPROTO_TCP); in ip_vs_set_timeout() 2366 pd->timeout_table[IP_VS_TCP_S_ESTABLISHED] in ip_vs_set_timeout() 2371 pd = ip_vs_proto_data_get(ipvs, IPPROTO_TCP); in ip_vs_set_timeout() 2372 pd->timeout_table[IP_VS_TCP_S_FIN_WAIT] in ip_vs_set_timeout() 2379 pd = ip_vs_proto_data_get(ipvs, IPPROTO_UDP); in ip_vs_set_timeout() 2380 pd->timeout_table[IP_VS_UDP_S_NORMAL] in ip_vs_set_timeout() 2735 struct ip_vs_proto_data *pd; in __ip_vs_get_timeouts() local 2741 pd = ip_vs_proto_data_get(ipvs, IPPROTO_TCP); in __ip_vs_get_timeouts() 2742 u->tcp_timeout = pd->timeout_table[IP_VS_TCP_S_ESTABLISHED] / HZ; in __ip_vs_get_timeouts() [all …]
|
D | ip_vs_proto_ah_esp.c | 104 struct ip_vs_proto_data *pd, in ah_esp_conn_schedule() argument
|
D | ip_vs_sync.c | 945 struct ip_vs_proto_data *pd; in ip_vs_proc_conn() local 947 pd = ip_vs_proto_data_get(ipvs, protocol); in ip_vs_proc_conn() 948 if (!(flags & IP_VS_CONN_F_TEMPLATE) && pd && pd->timeout_table) in ip_vs_proc_conn() 949 cp->timeout = pd->timeout_table[state]; in ip_vs_proc_conn()
|
D | ip_vs_xmit.c | 1056 __be16 *pd; in ipvs_gue_encap() local 1066 pd = data; in ipvs_gue_encap() 1067 pd[0] = htons(csum_start); in ipvs_gue_encap() 1068 pd[1] = htons(csum_start + skb->csum_offset); in ipvs_gue_encap()
|
/net/9p/ |
D | trans_rdma.c | 80 struct ib_pd *pd; member 373 if (rdma->pd && !IS_ERR(rdma->pd)) in rdma_destroy_trans() 374 ib_dealloc_pd(rdma->pd); in rdma_destroy_trans() 403 sge.lkey = rdma->pd->local_dma_lkey; in post_recv() 500 sge.lkey = rdma->pd->local_dma_lkey; in rdma_request() 707 rdma->pd = ib_alloc_pd(rdma->cm_id->device, 0); in rdma_create_trans() 708 if (IS_ERR(rdma->pd)) in rdma_create_trans() 723 err = rdma_create_qp(rdma->cm_id, rdma->pd, &qp_attr); in rdma_create_trans()
|
/net/rds/ |
D | ib.c | 108 if (rds_ibdev->pd) in rds_ib_dev_free() 109 ib_dealloc_pd(rds_ibdev->pd); in rds_ib_dev_free() 186 rds_ibdev->pd = ib_alloc_pd(device, 0); in rds_ib_add_one() 187 if (IS_ERR(rds_ibdev->pd)) { in rds_ib_add_one() 188 ret = PTR_ERR(rds_ibdev->pd); in rds_ib_add_one() 189 rds_ibdev->pd = NULL; in rds_ib_add_one()
|
D | ib_rdma.c | 580 ib_mr = ib_reg_user_mr(rds_ibdev->pd, start, length, virt_addr, in rds_ib_get_mr() 605 ib_advise_mr(rds_ibdev->pd, in rds_ib_get_mr()
|
D | ib.h | 248 struct ib_pd *pd; member
|
D | ib_frmr.c | 78 frmr->mr = ib_alloc_mr(rds_ibdev->pd, IB_MR_TYPE_MEM_REG, in rds_ib_alloc_frmr()
|
D | ib_cm.c | 540 ic->i_pd = rds_ibdev->pd; in rds_ib_setup_qp()
|
/net/sctp/ |
D | ulpevent.c | 777 struct sctp_pdapi_event *pd; in sctp_ulpevent_make_pdapi() local 786 pd = skb_put(skb, sizeof(struct sctp_pdapi_event)); in sctp_ulpevent_make_pdapi() 794 pd->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT; in sctp_ulpevent_make_pdapi() 795 pd->pdapi_flags = flags; in sctp_ulpevent_make_pdapi() 796 pd->pdapi_stream = sid; in sctp_ulpevent_make_pdapi() 797 pd->pdapi_seq = seq; in sctp_ulpevent_make_pdapi() 805 pd->pdapi_length = sizeof(struct sctp_pdapi_event); in sctp_ulpevent_make_pdapi() 811 pd->pdapi_indication = indication; in sctp_ulpevent_make_pdapi() 818 pd->pdapi_assoc_id = sctp_assoc2id(asoc); in sctp_ulpevent_make_pdapi()
|
/net/ipv4/ |
D | fou.c | 92 __be16 *pd = data; in gue_remcsum() local 93 size_t start = ntohs(pd[0]); in gue_remcsum() 94 size_t offset = ntohs(pd[1]); in gue_remcsum() 292 __be16 *pd = data; in gue_gro_remcsum() local 293 size_t start = ntohs(pd[0]); in gue_gro_remcsum() 294 size_t offset = ntohs(pd[1]); in gue_gro_remcsum() 1036 __be16 *pd = data; in __gue_build_header() local 1042 pd[0] = htons(csum_start); in __gue_build_header() 1043 pd[1] = htons(csum_start + skb->csum_offset); in __gue_build_header()
|
/net/smc/ |
D | smc_ib.h | 84 int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags,
|
D | smc_ib.c | 575 int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags, in smc_ib_get_memory_region() argument 582 ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, 1 << buf_slot->order); in smc_ib_get_memory_region()
|