/net/rds/ |
D | transport.c | 44 void rds_trans_register(struct rds_transport *trans) in rds_trans_register() argument 46 BUG_ON(strlen(trans->t_name) + 1 > TRANSNAMSIZ); in rds_trans_register() 50 if (transports[trans->t_type]) in rds_trans_register() 52 trans->t_type); in rds_trans_register() 54 transports[trans->t_type] = trans; in rds_trans_register() 55 printk(KERN_INFO "Registered RDS/%s transport\n", trans->t_name); in rds_trans_register() 62 void rds_trans_unregister(struct rds_transport *trans) in rds_trans_unregister() argument 66 transports[trans->t_type] = NULL; in rds_trans_unregister() 67 printk(KERN_INFO "Unregistered RDS/%s transport\n", trans->t_name); in rds_trans_unregister() 73 void rds_trans_put(struct rds_transport *trans) in rds_trans_put() argument [all …]
|
D | connection.c | 86 struct rds_transport *trans, in rds_conn_lookup() argument 94 conn->c_trans == trans && in rds_conn_lookup() 163 struct rds_transport *trans, in __rds_conn_create() argument 173 int npaths = (trans->t_mp_capable ? RDS_MPATH_WORKERS : 1); in __rds_conn_create() 176 conn = rds_conn_lookup(net, head, laddr, faddr, trans, tos, dev_if); in __rds_conn_create() 243 if (trans->t_prefer_loopback) { in __rds_conn_create() 250 trans = &rds_loop_transport; in __rds_conn_create() 264 conn->c_trans = trans; in __rds_conn_create() 276 ret = trans->conn_alloc(conn, GFP_ATOMIC); in __rds_conn_create() 287 strnlen(trans->t_name, sizeof(trans->t_name)) ? in __rds_conn_create() [all …]
|
D | bind.c | 168 struct rds_transport *trans; in rds_bind() local 246 trans = rs->rs_transport; in rds_bind() 247 if (!trans->laddr_check || in rds_bind() 248 trans->laddr_check(sock_net(sock->sk), in rds_bind() 254 trans = rds_trans_get_preferred(sock_net(sock->sk), in rds_bind() 256 if (!trans) { in rds_bind() 262 rs->rs_transport = trans; in rds_bind()
|
D | rdma_transport.c | 55 struct rds_transport *trans; in rds_rdma_cm_event_handler_cmn() local 64 trans = &rds_ib_transport; in rds_rdma_cm_event_handler_cmn() 85 ret = trans->cm_handle_connect(cm_id, event, isv6); in rds_rdma_cm_event_handler_cmn() 108 ret = trans->cm_initiate_connect(cm_id, isv6); in rds_rdma_cm_event_handler_cmn() 117 trans->cm_connect_complete(conn, event); in rds_rdma_cm_event_handler_cmn()
|
D | stats.c | 126 goto trans; in rds_stats_info() 140 trans: in rds_stats_info()
|
D | rds.h | 772 struct rds_transport *trans, 778 struct rds_transport *trans, 1004 void rds_trans_register(struct rds_transport *trans); 1005 void rds_trans_unregister(struct rds_transport *trans); 1009 void rds_trans_put(struct rds_transport *trans);
|
/net/netfilter/ |
D | nf_tables_api.c | 122 struct nft_trans *trans; in nft_trans_alloc_gfp() local 124 trans = kzalloc(sizeof(struct nft_trans) + size, gfp); in nft_trans_alloc_gfp() 125 if (trans == NULL) in nft_trans_alloc_gfp() 128 INIT_LIST_HEAD(&trans->list); in nft_trans_alloc_gfp() 129 INIT_LIST_HEAD(&trans->binding_list); in nft_trans_alloc_gfp() 130 trans->msg_type = msg_type; in nft_trans_alloc_gfp() 131 trans->ctx = *ctx; in nft_trans_alloc_gfp() 133 return trans; in nft_trans_alloc_gfp() 142 static void nft_trans_list_del(struct nft_trans *trans) in nft_trans_list_del() argument 144 list_del(&trans->list); in nft_trans_list_del() [all …]
|
D | nf_tables_offload.c | 351 struct nft_trans *trans; in nft_flow_rule_offload_commit() local 355 list_for_each_entry(trans, &nft_net->commit_list, list) { in nft_flow_rule_offload_commit() 356 if (trans->ctx.family != NFPROTO_NETDEV) in nft_flow_rule_offload_commit() 359 switch (trans->msg_type) { in nft_flow_rule_offload_commit() 361 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD) || in nft_flow_rule_offload_commit() 362 nft_trans_chain_update(trans)) in nft_flow_rule_offload_commit() 365 policy = nft_trans_chain_policy(trans); in nft_flow_rule_offload_commit() 366 err = nft_flow_offload_chain(trans->ctx.chain, &policy, in nft_flow_rule_offload_commit() 370 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)) in nft_flow_rule_offload_commit() 373 policy = nft_trans_chain_policy(trans); in nft_flow_rule_offload_commit() [all …]
|
/net/switchdev/ |
D | switchdev.c | 104 struct switchdev_trans *trans) in switchdev_port_attr_notify() argument 111 .trans = trans, in switchdev_port_attr_notify() 132 struct switchdev_trans trans; in switchdev_port_attr_set_now() local 142 trans.ph_prepare = true; in switchdev_port_attr_set_now() 144 &trans); in switchdev_port_attr_set_now() 153 trans.ph_prepare = false; in switchdev_port_attr_set_now() 155 &trans); in switchdev_port_attr_set_now() 224 struct switchdev_trans *trans, in switchdev_port_obj_notify() argument 232 .trans = trans, in switchdev_port_obj_notify() 251 struct switchdev_trans trans; in switchdev_port_obj_add_now() local [all …]
|
/net/dsa/ |
D | dsa_priv.h | 30 struct switchdev_trans *trans; member 52 struct switchdev_trans *trans; member 60 struct switchdev_trans *trans; member 133 struct switchdev_trans *trans); 141 struct switchdev_trans *trans); 143 struct switchdev_trans *trans); 151 struct switchdev_trans *trans); 155 struct switchdev_trans *trans); 157 struct switchdev_trans *trans); 159 struct switchdev_trans *trans); [all …]
|
D | port.c | 27 struct switchdev_trans *trans) in dsa_port_set_state() argument 32 if (switchdev_trans_ph_prepare(trans)) in dsa_port_set_state() 211 struct switchdev_trans *trans) in dsa_port_vlan_filtering() argument 217 if (switchdev_trans_ph_prepare(trans)) in dsa_port_vlan_filtering() 242 struct switchdev_trans *trans) in dsa_port_ageing_time() argument 248 .trans = trans, in dsa_port_ageing_time() 251 if (switchdev_trans_ph_prepare(trans)) in dsa_port_ageing_time() 260 struct switchdev_trans *trans) in dsa_port_pre_bridge_flags() argument 272 struct switchdev_trans *trans) in dsa_port_bridge_flags() argument 278 if (switchdev_trans_ph_prepare(trans)) in dsa_port_bridge_flags() [all …]
|
D | switch.c | 36 struct switchdev_trans *trans = info->trans; in dsa_switch_ageing_time() local 38 if (switchdev_trans_ph_prepare(trans)) { in dsa_switch_ageing_time() 99 struct switchdev_trans trans = {0}; in dsa_switch_bridge_leave() local 102 false, &trans); in dsa_switch_bridge_leave() 167 if (switchdev_trans_ph_prepare(info->trans)) in dsa_switch_mdb_add() 269 if (switchdev_trans_ph_prepare(info->trans)) in dsa_switch_vlan_add()
|
D | slave.c | 277 struct switchdev_trans *trans) in dsa_slave_port_attr_set() argument 284 ret = dsa_port_set_state(dp, attr->u.stp_state, trans); in dsa_slave_port_attr_set() 288 trans); in dsa_slave_port_attr_set() 291 ret = dsa_port_ageing_time(dp, attr->u.ageing_time, trans); in dsa_slave_port_attr_set() 295 trans); in dsa_slave_port_attr_set() 298 ret = dsa_port_bridge_flags(dp, attr->u.brport_flags, trans); in dsa_slave_port_attr_set() 301 ret = dsa_port_mrouter(dp->cpu_dp, attr->u.mrouter, trans); in dsa_slave_port_attr_set() 313 struct switchdev_trans *trans) in dsa_slave_vlan_add() argument 327 err = dsa_port_vlan_add(dp, &vlan, trans); in dsa_slave_vlan_add() 337 err = dsa_port_vlan_add(dp->cpu_dp, &vlan, trans); in dsa_slave_vlan_add() [all …]
|
/net/sctp/ |
D | associola.c | 1102 struct sctp_transport *trans; in sctp_assoc_update() local 1118 trans = list_entry(pos, struct sctp_transport, transports); in sctp_assoc_update() 1119 if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr)) { in sctp_assoc_update() 1120 sctp_assoc_rm_peer(asoc, trans); in sctp_assoc_update() 1125 sctp_transport_reset(trans); in sctp_assoc_update() 1157 list_for_each_entry(trans, &new->peer.transport_addr_list, in sctp_assoc_update() 1159 if (!sctp_assoc_add_peer(asoc, &trans->ipaddr, in sctp_assoc_update() 1160 GFP_ATOMIC, trans->state)) in sctp_assoc_update() 1220 static u8 sctp_trans_score(const struct sctp_transport *trans) in sctp_trans_score() argument 1222 switch (trans->state) { in sctp_trans_score() [all …]
|
D | socket.c | 642 struct sctp_transport *trans; in sctp_send_asconf_add_ip() local 644 list_for_each_entry(trans, in sctp_send_asconf_add_ip() 646 trans->cwnd = min(4*asoc->pathmtu, max_t(__u32, in sctp_send_asconf_add_ip() 648 trans->ssthresh = asoc->peer.i.a_rwnd; in sctp_send_asconf_add_ip() 649 trans->rto = asoc->rto_initial; in sctp_send_asconf_add_ip() 650 sctp_max_rto(asoc, trans); in sctp_send_asconf_add_ip() 651 trans->rtt = trans->srtt = trans->rttvar = 0; in sctp_send_asconf_add_ip() 653 sctp_transport_route(trans, NULL, in sctp_send_asconf_add_ip() 2459 struct sctp_transport *trans, in sctp_apply_peer_addr_params() argument 2468 if (params->spp_flags & SPP_HB_DEMAND && trans) { in sctp_apply_peer_addr_params() [all …]
|
D | tsnmap.c | 96 struct sctp_transport *trans) in sctp_tsnmap_mark() argument 115 if (trans) in sctp_tsnmap_mark() 116 trans->sack_generation = in sctp_tsnmap_mark() 117 trans->asoc->peer.sack_generation; in sctp_tsnmap_mark()
|
D | transport.c | 625 unsigned long sctp_transport_timeout(struct sctp_transport *trans) in sctp_transport_timeout() argument 628 unsigned long timeout = trans->rto >> 1; in sctp_transport_timeout() 630 if (trans->state != SCTP_UNCONFIRMED && in sctp_transport_timeout() 631 trans->state != SCTP_PF) in sctp_transport_timeout() 632 timeout += trans->hbinterval; in sctp_transport_timeout()
|
/net/vmw_vsock/ |
D | hyperv_transport.c | 238 struct hvsock *hvs = vsk->trans; in hvs_channel_cb() 330 hvs_new = vnew->trans; in hvs_open_connection() 333 hvs = vsock_sk(sk)->trans; in hvs_open_connection() 430 vsk->trans = hvs; in hvs_sock_init() 440 struct hvsock *h = vsk->trans; in hvs_connect() 470 hvs_shutdown_lock_held(vsk->trans, mode); in hvs_shutdown() 500 hvs_shutdown_lock_held(vsk->trans, SHUTDOWN_MASK); in hvs_close_lock_held() 527 struct hvsock *hvs = vsk->trans; in hvs_destruct() 582 struct hvsock *hvs = vsk->trans; in hvs_stream_dequeue() 622 struct hvsock *hvs = vsk->trans; in hvs_stream_enqueue() [all …]
|
D | virtio_transport_common.c | 185 vvs = vsk->trans; in virtio_transport_send_pkt_info() 279 struct virtio_vsock_sock *vvs = vsk->trans; in virtio_transport_stream_do_dequeue() 370 struct virtio_vsock_sock *vvs = vsk->trans; in virtio_transport_stream_has_data() 383 struct virtio_vsock_sock *vvs = vsk->trans; in virtio_transport_has_space() 395 struct virtio_vsock_sock *vvs = vsk->trans; in virtio_transport_stream_has_space() 415 vsk->trans = vvs; in virtio_transport_do_socket_init() 418 struct virtio_vsock_sock *ptrans = psk->trans; in virtio_transport_do_socket_init() 442 struct virtio_vsock_sock *vvs = vsk->trans; in virtio_transport_get_buffer_size() 450 struct virtio_vsock_sock *vvs = vsk->trans; in virtio_transport_get_min_buffer_size() 458 struct virtio_vsock_sock *vvs = vsk->trans; in virtio_transport_get_max_buffer_size() [all …]
|
D | vmci_transport.c | 855 struct vmci_transport *trans = client_data; in vmci_transport_peer_detach_cb() local 864 !vmci_handle_is_equal(trans->qp_handle, e_payload->handle)) in vmci_transport_peer_detach_cb() 873 spin_lock_bh(&trans->lock); in vmci_transport_peer_detach_cb() 874 if (!trans->sk) in vmci_transport_peer_detach_cb() 880 bh_lock_sock(trans->sk); in vmci_transport_peer_detach_cb() 882 vmci_transport_handle_detach(trans->sk); in vmci_transport_peer_detach_cb() 884 bh_unlock_sock(trans->sk); in vmci_transport_peer_detach_cb() 886 spin_unlock_bh(&trans->lock); in vmci_transport_peer_detach_cb() 1577 vsk->trans = kmalloc(sizeof(struct vmci_transport), GFP_KERNEL); in vmci_transport_socket_init() 1578 if (!vsk->trans) in vmci_transport_socket_init() [all …]
|
/net/llc/ |
D | llc_sap.c | 145 struct llc_sap_state_trans *trans, in llc_exec_sap_trans_actions() argument 149 const llc_sap_action_t *next_action = trans->ev_actions; in llc_exec_sap_trans_actions() 169 struct llc_sap_state_trans *trans; in llc_sap_next_state() local 173 trans = llc_find_sap_trans(sap, skb); in llc_sap_next_state() 174 if (!trans) in llc_sap_next_state() 181 rc = llc_exec_sap_trans_actions(sap, trans, skb); in llc_sap_next_state() 187 sap->state = trans->next_state; in llc_sap_next_state()
|
D | llc_conn.c | 36 struct llc_conn_state_trans *trans, 360 struct llc_conn_state_trans *trans; in llc_conn_service() local 365 trans = llc_qualify_conn_ev(sk, skb); in llc_conn_service() 366 if (trans) { in llc_conn_service() 367 rc = llc_exec_conn_trans_actions(sk, trans, skb); in llc_conn_service() 368 if (!rc && trans->next_state != NO_STATE_CHANGE) { in llc_conn_service() 369 llc->state = trans->next_state; in llc_conn_service() 434 struct llc_conn_state_trans *trans, in llc_exec_conn_trans_actions() argument 440 for (next_action = trans->ev_actions; in llc_exec_conn_trans_actions()
|
/net/xfrm/ |
D | xfrm_input.c | 760 struct xfrm_trans_tasklet *trans = (void *)data; in xfrm_trans_reinject() local 765 skb_queue_splice_init(&trans->queue, &queue); in xfrm_trans_reinject() 775 struct xfrm_trans_tasklet *trans; in xfrm_trans_queue() local 777 trans = this_cpu_ptr(&xfrm_trans_tasklet); in xfrm_trans_queue() 779 if (skb_queue_len(&trans->queue) >= netdev_max_backlog) in xfrm_trans_queue() 783 __skb_queue_tail(&trans->queue, skb); in xfrm_trans_queue() 784 tasklet_schedule(&trans->tasklet); in xfrm_trans_queue() 800 struct xfrm_trans_tasklet *trans; in xfrm_input_init() local 802 trans = &per_cpu(xfrm_trans_tasklet, i); in xfrm_input_init() 803 __skb_queue_head_init(&trans->queue); in xfrm_input_init() [all …]
|
/net/9p/ |
D | trans_rdma.c | 152 struct p9_trans_rdma *rdma = clnt->trans; in p9_rdma_show_options() 242 struct p9_trans_rdma *rdma = c->trans; in p9_cm_event_handler() 293 struct p9_trans_rdma *rdma = client->trans; in recv_done() 344 struct p9_trans_rdma *rdma = client->trans; in send_done() 385 struct p9_trans_rdma *rdma = client->trans; in post_recv() 420 struct p9_trans_rdma *rdma = client->trans; in rdma_request() 558 rdma = client->trans; in rdma_close() 606 struct p9_trans_rdma *rdma = client->trans; in rdma_cancelled() 663 client->trans = rdma; in rdma_create_trans()
|
D | trans_fd.c | 228 ts = client->trans; in p9_fd_poll() 257 ts = client->trans; in p9_fd_read() 423 ts = client->trans; in p9_fd_write() 585 struct p9_trans_fd *ts = client->trans; in p9_conn_create() 666 struct p9_trans_fd *ts = client->trans; in p9_fd_request() 835 client->trans = ts; in p9_fd_open() 871 client->trans = p; in p9_socket_open() 921 ts = client->trans; in p9_fd_close()
|