/net/bluetooth/ |
D | hci_conn.c | 63 static void hci_connect_le_scan_cleanup(struct hci_conn *conn) in hci_connect_le_scan_cleanup() argument 66 struct hci_dev *hdev = conn->hdev; in hci_connect_le_scan_cleanup() 71 bdaddr = &conn->dst; in hci_connect_le_scan_cleanup() 72 bdaddr_type = conn->dst_type; in hci_connect_le_scan_cleanup() 114 static void hci_conn_cleanup(struct hci_conn *conn) in hci_conn_cleanup() argument 116 struct hci_dev *hdev = conn->hdev; in hci_conn_cleanup() 118 if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags)) in hci_conn_cleanup() 119 hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type); in hci_conn_cleanup() 121 hci_chan_list_flush(conn); in hci_conn_cleanup() 123 hci_conn_hash_del(hdev, conn); in hci_conn_cleanup() [all …]
|
D | hci_event.c | 104 struct hci_conn *conn; in hci_cc_role_discovery() local 113 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); in hci_cc_role_discovery() 114 if (conn) in hci_cc_role_discovery() 115 conn->role = rp->role; in hci_cc_role_discovery() 123 struct hci_conn *conn; in hci_cc_read_link_policy() local 132 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); in hci_cc_read_link_policy() 133 if (conn) in hci_cc_read_link_policy() 134 conn->link_policy = __le16_to_cpu(rp->policy); in hci_cc_read_link_policy() 142 struct hci_conn *conn; in hci_cc_write_link_policy() local 156 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); in hci_cc_write_link_policy() [all …]
|
D | hci_sysfs.c | 13 struct hci_conn *conn = to_hci_conn(dev); in bt_link_release() local 14 kfree(conn); in bt_link_release() 32 void hci_conn_init_sysfs(struct hci_conn *conn) in hci_conn_init_sysfs() argument 34 struct hci_dev *hdev = conn->hdev; in hci_conn_init_sysfs() 36 BT_DBG("conn %p", conn); in hci_conn_init_sysfs() 38 conn->dev.type = &bt_link; in hci_conn_init_sysfs() 39 conn->dev.class = bt_class; in hci_conn_init_sysfs() 40 conn->dev.parent = &hdev->dev; in hci_conn_init_sysfs() 42 device_initialize(&conn->dev); in hci_conn_init_sysfs() 45 void hci_conn_add_sysfs(struct hci_conn *conn) in hci_conn_add_sysfs() argument [all …]
|
D | l2cap_core.c | 57 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, 59 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, 91 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, in __l2cap_get_chan_by_dcid() argument 96 list_for_each_entry(c, &conn->chan_l, list) { in __l2cap_get_chan_by_dcid() 103 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, in __l2cap_get_chan_by_scid() argument 108 list_for_each_entry(c, &conn->chan_l, list) { in __l2cap_get_chan_by_scid() 117 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, in l2cap_get_chan_by_scid() argument 122 mutex_lock(&conn->chan_lock); in l2cap_get_chan_by_scid() 123 c = __l2cap_get_chan_by_scid(conn, cid); in l2cap_get_chan_by_scid() 126 mutex_unlock(&conn->chan_lock); in l2cap_get_chan_by_scid() [all …]
|
D | sco.c | 69 struct sco_conn *conn; member 107 struct sco_conn *conn = hcon->sco_data; in sco_conn_add() local 109 if (conn) in sco_conn_add() 110 return conn; in sco_conn_add() 112 conn = kzalloc(sizeof(struct sco_conn), GFP_KERNEL); in sco_conn_add() 113 if (!conn) in sco_conn_add() 116 spin_lock_init(&conn->lock); in sco_conn_add() 118 hcon->sco_data = conn; in sco_conn_add() 119 conn->hcon = hcon; in sco_conn_add() 122 conn->mtu = hdev->sco_mtu; in sco_conn_add() [all …]
|
D | smp.c | 98 struct l2cap_conn *conn; member 605 static void smp_send_cmd(struct l2cap_conn *conn, u8 code, u16 len, void *data) in smp_send_cmd() argument 607 struct l2cap_chan *chan = conn->smp; in smp_send_cmd() 663 static void build_pairing_cmd(struct l2cap_conn *conn, in build_pairing_cmd() argument 667 struct l2cap_chan *chan = conn->smp; in build_pairing_cmd() 669 struct hci_conn *hcon = conn->hcon; in build_pairing_cmd() 718 req->io_capability = conn->hcon->io_capability; in build_pairing_cmd() 729 rsp->io_capability = conn->hcon->io_capability; in build_pairing_cmd() 739 static u8 check_enc_key_size(struct l2cap_conn *conn, __u8 max_key_size) in check_enc_key_size() argument 741 struct l2cap_chan *chan = conn->smp; in check_enc_key_size() [all …]
|
/net/rxrpc/ |
D | conn_client.c | 105 static int rxrpc_get_client_connection_id(struct rxrpc_connection *conn, in rxrpc_get_client_connection_id() argument 108 struct rxrpc_net *rxnet = conn->params.local->rxnet; in rxrpc_get_client_connection_id() 116 id = idr_alloc_cyclic(&rxrpc_client_conn_ids, conn, in rxrpc_get_client_connection_id() 124 conn->proto.epoch = rxnet->epoch; in rxrpc_get_client_connection_id() 125 conn->proto.cid = id << RXRPC_CIDSHIFT; in rxrpc_get_client_connection_id() 126 set_bit(RXRPC_CONN_HAS_IDR, &conn->flags); in rxrpc_get_client_connection_id() 127 _leave(" [CID %x]", conn->proto.cid); in rxrpc_get_client_connection_id() 140 static void rxrpc_put_client_connection_id(struct rxrpc_connection *conn) in rxrpc_put_client_connection_id() argument 142 if (test_bit(RXRPC_CONN_HAS_IDR, &conn->flags)) { in rxrpc_put_client_connection_id() 145 conn->proto.cid >> RXRPC_CIDSHIFT); in rxrpc_put_client_connection_id() [all …]
|
D | conn_object.c | 33 struct rxrpc_connection *conn; in rxrpc_alloc_connection() local 37 conn = kzalloc(sizeof(struct rxrpc_connection), gfp); in rxrpc_alloc_connection() 38 if (conn) { in rxrpc_alloc_connection() 39 INIT_LIST_HEAD(&conn->cache_link); in rxrpc_alloc_connection() 40 spin_lock_init(&conn->channel_lock); in rxrpc_alloc_connection() 41 INIT_LIST_HEAD(&conn->waiting_calls); in rxrpc_alloc_connection() 42 INIT_WORK(&conn->processor, &rxrpc_process_connection); in rxrpc_alloc_connection() 43 INIT_LIST_HEAD(&conn->proc_link); in rxrpc_alloc_connection() 44 INIT_LIST_HEAD(&conn->link); in rxrpc_alloc_connection() 45 skb_queue_head_init(&conn->rx_queue); in rxrpc_alloc_connection() [all …]
|
D | conn_event.c | 26 static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn, in rxrpc_conn_retransmit_call() argument 49 _enter("%d", conn->debug_id); in rxrpc_conn_retransmit_call() 51 chan = &conn->channels[sp->hdr.cid & RXRPC_CHANNELMASK]; in rxrpc_conn_retransmit_call() 62 msg.msg_name = &conn->params.peer->srx.transport; in rxrpc_conn_retransmit_call() 63 msg.msg_namelen = conn->params.peer->srx.transport_len; in rxrpc_conn_retransmit_call() 73 pkt.whdr.flags = conn->out_clientflag; in rxrpc_conn_retransmit_call() 75 pkt.whdr.securityIndex = conn->security_ix; in rxrpc_conn_retransmit_call() 77 pkt.whdr.serviceId = htons(conn->service_id); in rxrpc_conn_retransmit_call() 87 mtu = conn->params.peer->if_mtu; in rxrpc_conn_retransmit_call() 88 mtu -= conn->params.peer->hdrsize; in rxrpc_conn_retransmit_call() [all …]
|
D | conn_service.c | 28 struct rxrpc_connection *conn = NULL; in rxrpc_find_service_conn_rcu() local 46 conn = rb_entry(p, struct rxrpc_connection, service_node); in rxrpc_find_service_conn_rcu() 48 if (conn->proto.index_key < k.index_key) in rxrpc_find_service_conn_rcu() 50 else if (conn->proto.index_key > k.index_key) in rxrpc_find_service_conn_rcu() 54 conn = NULL; in rxrpc_find_service_conn_rcu() 59 _leave(" = %d", conn ? conn->debug_id : -1); in rxrpc_find_service_conn_rcu() 60 return conn; in rxrpc_find_service_conn_rcu() 68 struct rxrpc_connection *conn) in rxrpc_publish_service_conn() argument 71 struct rxrpc_conn_proto k = conn->proto; in rxrpc_publish_service_conn() 91 rb_link_node_rcu(&conn->service_node, parent, pp); in rxrpc_publish_service_conn() [all …]
|
D | rxkad.c | 55 static int rxkad_init_connection_security(struct rxrpc_connection *conn) in rxkad_init_connection_security() argument 61 _enter("{%d},{%x}", conn->debug_id, key_serial(conn->params.key)); in rxkad_init_connection_security() 63 token = conn->params.key->payload.data[0]; in rxkad_init_connection_security() 64 conn->security_ix = token->security_index; in rxkad_init_connection_security() 77 switch (conn->params.security_level) { in rxkad_init_connection_security() 81 conn->size_align = 8; in rxkad_init_connection_security() 82 conn->security_size = sizeof(struct rxkad_level1_hdr); in rxkad_init_connection_security() 85 conn->size_align = 8; in rxkad_init_connection_security() 86 conn->security_size = sizeof(struct rxkad_level2_hdr); in rxkad_init_connection_security() 93 conn->cipher = ci; in rxkad_init_connection_security() [all …]
|
D | security.c | 75 int rxrpc_init_client_conn_security(struct rxrpc_connection *conn) in rxrpc_init_client_conn_security() argument 79 struct key *key = conn->params.key; in rxrpc_init_client_conn_security() 82 _enter("{%d},{%x}", conn->debug_id, key_serial(key)); in rxrpc_init_client_conn_security() 98 conn->security = sec; in rxrpc_init_client_conn_security() 100 ret = conn->security->init_connection_security(conn); in rxrpc_init_client_conn_security() 102 conn->security = &rxrpc_no_security; in rxrpc_init_client_conn_security() 113 int rxrpc_init_server_conn_security(struct rxrpc_connection *conn) in rxrpc_init_server_conn_security() argument 116 struct rxrpc_local *local = conn->params.local; in rxrpc_init_server_conn_security() 124 sprintf(kdesc, "%u:%u", conn->service_id, conn->security_ix); in rxrpc_init_server_conn_security() 126 sec = rxrpc_security_lookup(conn->security_ix); in rxrpc_init_server_conn_security() [all …]
|
/net/smc/ |
D | smc_tx.c | 43 if (atomic_read(&smc->conn.sndbuf_space) && sock) { in smc_tx_write_space() 71 struct smc_connection *conn = &smc->conn; in smc_tx_wait_memory() local 83 conn->local_tx_ctrl.conn_state_flags.peer_done_writing) { in smc_tx_wait_memory() 87 if (conn->local_rx_ctrl.conn_state_flags.peer_conn_abort) { in smc_tx_wait_memory() 102 if (atomic_read(&conn->sndbuf_space)) in smc_tx_wait_memory() 109 smc_cdc_rxed_any_close_or_senddone(conn) || in smc_tx_wait_memory() 110 atomic_read(&conn->sndbuf_space), in smc_tx_wait_memory() 125 struct smc_connection *conn = &smc->conn; in smc_tx_sendmsg() local 146 conn->local_tx_ctrl.conn_state_flags.peer_conn_abort) in smc_tx_sendmsg() 148 if (smc_cdc_rxed_any_close(conn)) in smc_tx_sendmsg() [all …]
|
D | smc_cdc.c | 25 struct smc_connection *conn; /* socket connection */ member 40 if (!cdcpend->conn) in smc_cdc_tx_handler() 44 smc = container_of(cdcpend->conn, struct smc_sock, conn); in smc_cdc_tx_handler() 47 diff = smc_curs_diff(cdcpend->conn->sndbuf_size, in smc_cdc_tx_handler() 48 &cdcpend->conn->tx_curs_fin, in smc_cdc_tx_handler() 52 atomic_add(diff, &cdcpend->conn->sndbuf_space); in smc_cdc_tx_handler() 55 smc_curs_write(&cdcpend->conn->tx_curs_fin, in smc_cdc_tx_handler() 56 smc_curs_read(&cdcpend->cursor, cdcpend->conn), in smc_cdc_tx_handler() 57 cdcpend->conn); in smc_cdc_tx_handler() 74 static inline void smc_cdc_add_pending_send(struct smc_connection *conn, in smc_cdc_add_pending_send() argument [all …]
|
D | smc_core.c | 41 static void smc_lgr_add_alert_token(struct smc_connection *conn) in smc_lgr_add_alert_token() argument 44 u32 token = conn->alert_token_local; in smc_lgr_add_alert_token() 46 link = &conn->lgr->conns_all.rb_node; in smc_lgr_add_alert_token() 58 rb_link_node(&conn->alert_node, parent, link); in smc_lgr_add_alert_token() 59 rb_insert_color(&conn->alert_node, &conn->lgr->conns_all); in smc_lgr_add_alert_token() 67 static void smc_lgr_register_conn(struct smc_connection *conn) in smc_lgr_register_conn() argument 69 struct smc_sock *smc = container_of(conn, struct smc_sock, conn); in smc_lgr_register_conn() 76 while (!conn->alert_token_local) { in smc_lgr_register_conn() 77 conn->alert_token_local = atomic_inc_return(&nexttoken); in smc_lgr_register_conn() 78 if (smc_lgr_find_conn(conn->alert_token_local, conn->lgr)) in smc_lgr_register_conn() [all …]
|
D | smc_close.c | 45 !smc_cdc_tx_has_pending(&smc->conn), in smc_close_wait_tx_pends() 62 if (!smc_tx_prepared_sends(&smc->conn)) in smc_close_stream_wait() 71 !smc_tx_prepared_sends(&smc->conn) || in smc_close_stream_wait() 89 static int smc_close_wr(struct smc_connection *conn) in smc_close_wr() argument 91 conn->local_tx_ctrl.conn_state_flags.peer_done_writing = 1; in smc_close_wr() 93 return smc_cdc_get_slot_and_msg_send(conn); in smc_close_wr() 96 static int smc_close_final(struct smc_connection *conn) in smc_close_final() argument 98 if (atomic_read(&conn->bytes_to_rcv)) in smc_close_final() 99 conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1; in smc_close_final() 101 conn->local_tx_ctrl.conn_state_flags.peer_conn_closed = 1; in smc_close_final() [all …]
|
D | smc_cdc.h | 53 static inline bool smc_cdc_rxed_any_close(struct smc_connection *conn) in smc_cdc_rxed_any_close() argument 55 return conn->local_rx_ctrl.conn_state_flags.peer_conn_abort || in smc_cdc_rxed_any_close() 56 conn->local_rx_ctrl.conn_state_flags.peer_conn_closed; in smc_cdc_rxed_any_close() 60 struct smc_connection *conn) in smc_cdc_rxed_any_close_or_senddone() argument 62 return smc_cdc_rxed_any_close(conn) || in smc_cdc_rxed_any_close_or_senddone() 63 conn->local_rx_ctrl.conn_state_flags.peer_done_writing; in smc_cdc_rxed_any_close_or_senddone() 78 struct smc_connection *conn) in smc_curs_read() argument 84 spin_lock_irqsave(&conn->acurs_lock, flags); in smc_curs_read() 86 spin_unlock_irqrestore(&conn->acurs_lock, flags); in smc_curs_read() 94 struct smc_connection *conn) in smc_curs_read_net() argument [all …]
|
D | smc_rx.c | 57 struct smc_connection *conn = &smc->conn; in smc_rx_wait_data() local 61 if (atomic_read(&conn->bytes_to_rcv)) in smc_rx_wait_data() 69 atomic_read(&conn->bytes_to_rcv) || in smc_rx_wait_data() 70 smc_cdc_rxed_any_close_or_senddone(conn), in smc_rx_wait_data() 85 struct smc_connection *conn = &smc->conn; in smc_rx_recvmsg() local 107 rcvbuf_base = conn->rmb_desc->cpu_addr; in smc_rx_recvmsg() 113 if (atomic_read(&conn->bytes_to_rcv)) in smc_rx_recvmsg() 122 smc_cdc_rxed_any_close_or_senddone(conn) || in smc_rx_recvmsg() 123 conn->local_tx_ctrl.conn_state_flags. in smc_rx_recvmsg() 134 smc_cdc_rxed_any_close_or_senddone(conn) || in smc_rx_recvmsg() [all …]
|
D | smc_diag.c | 88 if ((req->diag_ext & (1 << (SMC_DIAG_CONNINFO - 1))) && smc->conn.lgr) { in __smc_diag_dump() 89 struct smc_connection *conn = &smc->conn; in __smc_diag_dump() local 91 .token = conn->alert_token_local, in __smc_diag_dump() 92 .sndbuf_size = conn->sndbuf_size, in __smc_diag_dump() 93 .rmbe_size = conn->rmbe_size, in __smc_diag_dump() 94 .peer_rmbe_size = conn->peer_rmbe_size, in __smc_diag_dump() 96 .rx_prod.wrap = conn->local_rx_ctrl.prod.wrap, in __smc_diag_dump() 97 .rx_prod.count = conn->local_rx_ctrl.prod.count, in __smc_diag_dump() 98 .rx_cons.wrap = conn->local_rx_ctrl.cons.wrap, in __smc_diag_dump() 99 .rx_cons.count = conn->local_rx_ctrl.cons.count, in __smc_diag_dump() [all …]
|
/net/rds/ |
D | connection.c | 78 struct rds_connection *conn, *ret = NULL; in rds_conn_lookup() local 80 hlist_for_each_entry_rcu(conn, head, c_hash_node) { in rds_conn_lookup() 81 if (conn->c_faddr == faddr && conn->c_laddr == laddr && in rds_conn_lookup() 82 conn->c_trans == trans && net == rds_conn_net(conn)) { in rds_conn_lookup() 83 ret = conn; in rds_conn_lookup() 100 struct rds_connection *conn = cp->cp_conn; in rds_conn_path_reset() local 103 &conn->c_laddr, &conn->c_faddr); in rds_conn_path_reset() 115 static void __rds_conn_path_init(struct rds_connection *conn, in __rds_conn_path_init() argument 124 cp->cp_conn = conn; in __rds_conn_path_init() 149 struct rds_connection *conn, *parent = NULL; in __rds_conn_create() local [all …]
|
D | ib_cm.c | 46 static void rds_ib_set_protocol(struct rds_connection *conn, unsigned int version) in rds_ib_set_protocol() argument 48 conn->c_version = version; in rds_ib_set_protocol() 54 static void rds_ib_set_flow_control(struct rds_connection *conn, u32 credits) in rds_ib_set_flow_control() argument 56 struct rds_ib_connection *ic = conn->c_transport_data; in rds_ib_set_flow_control() 61 rds_ib_send_add_credits(conn, credits); in rds_ib_set_flow_control() 96 void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_event *event) in rds_ib_cm_connect_complete() argument 99 struct rds_ib_connection *ic = conn->c_transport_data; in rds_ib_cm_connect_complete() 103 if (event->param.conn.private_data_len >= sizeof(*dp)) { in rds_ib_cm_connect_complete() 104 dp = event->param.conn.private_data; in rds_ib_cm_connect_complete() 108 rds_ib_set_protocol(conn, in rds_ib_cm_connect_complete() [all …]
|
D | rdma_transport.c | 46 struct rds_connection *conn = cm_id->context; in rds_rdma_cm_event_handler() local 50 rdsdebug("conn %p id %p handling event %u (%s)\n", conn, cm_id, in rds_rdma_cm_event_handler() 58 if (conn) { in rds_rdma_cm_event_handler() 59 mutex_lock(&conn->c_cm_lock); in rds_rdma_cm_event_handler() 64 if (rds_conn_state(conn) == RDS_CONN_DISCONNECTING) { in rds_rdma_cm_event_handler() 88 if (conn) { in rds_rdma_cm_event_handler() 91 ibic = conn->c_transport_data; in rds_rdma_cm_event_handler() 95 rds_conn_drop(conn); in rds_rdma_cm_event_handler() 100 trans->cm_connect_complete(conn, event); in rds_rdma_cm_event_handler() 113 if (conn) in rds_rdma_cm_event_handler() [all …]
|
D | loop.c | 61 static int rds_loop_xmit(struct rds_connection *conn, struct rds_message *rm, in rds_loop_xmit() argument 71 rds_cong_map_updated(conn->c_fcong, ~(u64) 0); in rds_loop_xmit() 72 ret = min_t(int, ret, sgp->length - conn->c_xmit_data_off); in rds_loop_xmit() 78 rds_inc_init(&rm->m_inc, conn, conn->c_laddr); in rds_loop_xmit() 82 rds_recv_incoming(conn, conn->c_laddr, conn->c_faddr, &rm->m_inc, in rds_loop_xmit() 85 rds_send_drop_acked(conn, be64_to_cpu(rm->m_inc.i_hdr.h_sequence), in rds_loop_xmit() 112 struct rds_connection *conn; member 121 static int rds_loop_conn_alloc(struct rds_connection *conn, gfp_t gfp) in rds_loop_conn_alloc() argument 131 lc->conn = conn; in rds_loop_conn_alloc() 132 conn->c_transport_data = lc; in rds_loop_conn_alloc() [all …]
|
/net/wireless/ |
D | sme.c | 56 if (!wdev->conn) in cfg80211_sme_free() 59 kfree(wdev->conn->ie); in cfg80211_sme_free() 60 kfree(wdev->conn); in cfg80211_sme_free() 61 wdev->conn = NULL; in cfg80211_sme_free() 76 if (wdev->conn->params.channel) in cfg80211_conn_scan() 87 if (wdev->conn->params.channel) { in cfg80211_conn_scan() 88 enum nl80211_band band = wdev->conn->params.channel->band; in cfg80211_conn_scan() 96 request->channels[0] = wdev->conn->params.channel; in cfg80211_conn_scan() 122 memcpy(request->ssids[0].ssid, wdev->conn->params.ssid, in cfg80211_conn_scan() 123 wdev->conn->params.ssid_len); in cfg80211_conn_scan() [all …]
|
/net/netfilter/ |
D | xt_connlimit.c | 90 static inline bool already_closed(const struct nf_conn *conn) in already_closed() argument 92 if (nf_ct_protonum(conn) == IPPROTO_TCP) in already_closed() 93 return conn->proto.tcp.state == TCP_CONNTRACK_TIME_WAIT || in already_closed() 94 conn->proto.tcp.state == TCP_CONNTRACK_CLOSE; in already_closed() 124 struct xt_connlimit_conn *conn; in nf_conncount_add() local 126 conn = kmem_cache_alloc(connlimit_conn_cachep, GFP_ATOMIC); in nf_conncount_add() 127 if (conn == NULL) in nf_conncount_add() 129 conn->tuple = *tuple; in nf_conncount_add() 130 conn->zone = *zone; in nf_conncount_add() 131 conn->cpu = raw_smp_processor_id(); in nf_conncount_add() [all …]
|