/net/rds/ |
D | tcp.c | 96 u32 rds_tcp_snd_nxt(struct rds_tcp_connection *tc) in rds_tcp_snd_nxt() argument 98 return tcp_sk(tc->t_sock->sk)->snd_nxt; in rds_tcp_snd_nxt() 101 u32 rds_tcp_snd_una(struct rds_tcp_connection *tc) in rds_tcp_snd_una() argument 103 return tcp_sk(tc->t_sock->sk)->snd_una; in rds_tcp_snd_una() 107 struct rds_tcp_connection *tc) in rds_tcp_restore_callbacks() argument 109 rdsdebug("restoring sock %p callbacks from tc %p\n", sock, tc); in rds_tcp_restore_callbacks() 114 list_del_init(&tc->t_list_item); in rds_tcp_restore_callbacks() 118 tc->t_sock = NULL; in rds_tcp_restore_callbacks() 120 sock->sk->sk_write_space = tc->t_orig_write_space; in rds_tcp_restore_callbacks() 121 sock->sk->sk_data_ready = tc->t_orig_data_ready; in rds_tcp_restore_callbacks() [all …]
|
D | tcp_recv.c | 159 struct rds_tcp_connection *tc = cp->cp_transport_data; in rds_tcp_data_recv() local 160 struct rds_tcp_incoming *tinc = tc->t_tinc; in rds_tcp_data_recv() 164 rdsdebug("tcp data tc %p skb %p offset %u len %zu\n", tc, skb, offset, in rds_tcp_data_recv() 179 tc->t_tinc = tinc; in rds_tcp_data_recv() 190 if (left && tc->t_tinc_hdr_rem) { in rds_tcp_data_recv() 191 to_copy = min(tc->t_tinc_hdr_rem, left); in rds_tcp_data_recv() 197 tc->t_tinc_hdr_rem, in rds_tcp_data_recv() 199 tc->t_tinc_hdr_rem -= to_copy; in rds_tcp_data_recv() 203 if (tc->t_tinc_hdr_rem == 0) { in rds_tcp_data_recv() 205 tc->t_tinc_data_rem = in rds_tcp_data_recv() [all …]
|
D | tcp_send.c | 54 struct rds_tcp_connection *tc = cp->cp_transport_data; in rds_tcp_xmit_path_prepare() local 56 rds_tcp_cork(tc->t_sock, 1); in rds_tcp_xmit_path_prepare() 61 struct rds_tcp_connection *tc = cp->cp_transport_data; in rds_tcp_xmit_path_complete() local 63 rds_tcp_cork(tc->t_sock, 0); in rds_tcp_xmit_path_complete() 85 struct rds_tcp_connection *tc = cp->cp_transport_data; in rds_tcp_xmit() local 95 tc->t_last_sent_nxt = rds_tcp_snd_nxt(tc); in rds_tcp_xmit() 96 rm->m_ack_seq = tc->t_last_sent_nxt + in rds_tcp_xmit() 101 tc->t_last_expected_una = rm->m_ack_seq + 1; in rds_tcp_xmit() 104 rm, rds_tcp_snd_nxt(tc), in rds_tcp_xmit() 110 set_bit(SOCK_NOSPACE, &tc->t_sock->sk->sk_socket->flags); in rds_tcp_xmit() [all …]
|
D | tcp_connect.c | 44 struct rds_tcp_connection *tc; in rds_tcp_state_change() local 52 tc = cp->cp_transport_data; in rds_tcp_state_change() 53 state_change = tc->t_orig_state_change; in rds_tcp_state_change() 55 rdsdebug("sock %p state_change to %d\n", tc->t_sock, sk->sk_state); in rds_tcp_state_change() 82 struct rds_tcp_connection *tc = cp->cp_transport_data; in rds_tcp_conn_path_connect() local 90 mutex_lock(&tc->t_conn_path_lock); in rds_tcp_conn_path_connect() 93 mutex_unlock(&tc->t_conn_path_lock); in rds_tcp_conn_path_connect() 138 mutex_unlock(&tc->t_conn_path_lock); in rds_tcp_conn_path_connect() 155 struct rds_tcp_connection *tc = cp->cp_transport_data; in rds_tcp_conn_path_shutdown() local 156 struct socket *sock = tc->t_sock; in rds_tcp_conn_path_shutdown() [all …]
|
D | tcp.h | 56 struct rds_tcp_connection *tc); 57 u32 rds_tcp_snd_nxt(struct rds_tcp_connection *tc); 58 u32 rds_tcp_snd_una(struct rds_tcp_connection *tc); 59 u64 rds_tcp_map_seq(struct rds_tcp_connection *tc, u32 seq);
|
/net/sched/ |
D | sch_mqprio.c | 31 struct tc_to_netdev tc = {.type = TC_SETUP_MQPRIO}; in mqprio_destroy() local 43 dev->netdev_ops->ndo_setup_tc(dev, sch->handle, 0, &tc); in mqprio_destroy() 142 struct tc_to_netdev tc = {.type = TC_SETUP_MQPRIO, in mqprio_init() local 143 { .tc = qopt->num_tc }}; in mqprio_init() 146 err = dev->netdev_ops->ndo_setup_tc(dev, sch->handle, 0, &tc); in mqprio_init() 301 struct netdev_tc_txq tc = dev->tc_to_txq[i]; in mqprio_dump_class() local 304 if (q_idx > tc.offset && in mqprio_dump_class() 305 q_idx <= tc.offset + tc.count) { in mqprio_dump_class() 331 struct netdev_tc_txq tc = dev->tc_to_txq[cl - 1]; in mqprio_dump_class_stats() local 341 for (i = tc.offset; i < tc.offset + tc.count; i++) { in mqprio_dump_class_stats()
|
D | cls_flower.c | 213 struct tc_to_netdev tc; in fl_hw_destroy_filter() local 221 tc.type = TC_SETUP_CLSFLOWER; in fl_hw_destroy_filter() 222 tc.cls_flower = &offload; in fl_hw_destroy_filter() 224 dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, &tc); in fl_hw_destroy_filter() 236 struct tc_to_netdev tc; in fl_hw_replace_filter() local 249 tc.type = TC_SETUP_CLSFLOWER; in fl_hw_replace_filter() 250 tc.cls_flower = &offload; in fl_hw_replace_filter() 253 &tc); in fl_hw_replace_filter() 265 struct tc_to_netdev tc; in fl_hw_update_stats() local 274 tc.type = TC_SETUP_CLSFLOWER; in fl_hw_update_stats() [all …]
|
D | Kconfig | 25 from the package iproute2+tc at
|
/net/mpls/ |
D | internal.h | 8 u8 tc; member 93 static inline struct mpls_shim_hdr mpls_entry_encode(u32 label, unsigned ttl, unsigned tc, bool bos) in mpls_entry_encode() argument 98 (tc << MPLS_LS_TC_SHIFT) | in mpls_entry_encode() 111 result.tc = (entry & MPLS_LS_TC_MASK) >> MPLS_LS_TC_SHIFT; in mpls_entry_decode()
|
D | af_mpls.c | 1159 if ((dec.bos != bos) || dec.ttl || dec.tc) in nla_get_labels()
|
/net/9p/ |
D | trans_virtio.c | 275 VIRTQUEUE_NUM, req->tc->sdata, req->tc->size); in p9_virtio_request() 416 memcpy(&req->tc->sdata[req->tc->size - 4], &v, 4); in p9_virtio_zc_request() 427 memcpy(&req->tc->sdata[req->tc->size - 4], &v, 4); in p9_virtio_zc_request() 439 VIRTQUEUE_NUM, req->tc->sdata, req->tc->size); in p9_virtio_zc_request()
|
D | client.c | 261 c->reqs[row][col].tc = NULL; in p9_tag_alloc() 278 if (!req->tc) in p9_tag_alloc() 279 req->tc = p9_fcall_alloc(alloc_msize); in p9_tag_alloc() 282 if (!req->tc || !req->rc) in p9_tag_alloc() 285 p9pdu_reset(req->tc); in p9_tag_alloc() 288 req->tc->tag = tag-1; in p9_tag_alloc() 295 kfree(req->tc); in p9_tag_alloc() 298 req->tc = req->rc = NULL; in p9_tag_alloc() 388 kfree(c->reqs[row][col].tc); in p9_tag_cleanup() 405 int tag = r->tc->tag; in p9_free_req() [all …]
|
D | trans_rdma.c | 343 c->busa, c->req->tc->size, in send_done() 473 c->req->tc->sdata, c->req->tc->size, in rdma_request() 483 sge.length = c->req->tc->size; in rdma_request()
|
D | trans_fd.c | 476 m->wbuf = req->tc->sdata; in p9_write_work() 477 m->wsize = req->tc->size; in p9_write_work() 671 m, current, req->tc, req->tc->id); in p9_fd_request()
|
/net/6lowpan/ |
D | iphc.c | 984 u8 tc = lowpan_iphc_get_tc(hdr), tf[4], val; in lowpan_iphc_tf_compress() local 987 pr_debug("tc 0x%02x\n", tc); in lowpan_iphc_tf_compress() 990 if (!tc) { in lowpan_iphc_tf_compress() 1001 lowpan_push_hc_data(hc_ptr, &tc, sizeof(tc)); in lowpan_iphc_tf_compress() 1006 if (!(tc & 0x3f)) { in lowpan_iphc_tf_compress() 1019 tf[0] |= (tc & 0xc0); in lowpan_iphc_tf_compress() 1032 memcpy(&tf[0], &tc, sizeof(tc)); in lowpan_iphc_tf_compress()
|
/net/ipv6/ |
D | datagram.c | 965 int tc; in ip6_datagram_send_ctl() local 971 tc = *(int *)CMSG_DATA(cmsg); in ip6_datagram_send_ctl() 972 if (tc < -1 || tc > 0xff) in ip6_datagram_send_ctl() 976 ipc6->tclass = tc; in ip6_datagram_send_ctl()
|
/net/core/ |
D | dev.c | 1949 struct netdev_tc_txq *tc = &dev->tc_to_txq[0]; in netif_setup_tc() local 1952 if (tc->offset + tc->count > txq) { in netif_setup_tc() 1962 tc = &dev->tc_to_txq[q]; in netif_setup_tc() 1963 if (tc->offset + tc->count > txq) { in netif_setup_tc() 2444 u8 tc = netdev_get_prio_tc_map(dev, skb->priority); in __skb_tx_hash() local 2445 qoffset = dev->tc_to_txq[tc].offset; in __skb_tx_hash() 2446 qcount = dev->tc_to_txq[tc].count; in __skb_tx_hash()
|
/net/netfilter/ |
D | Kconfig | 1422 in tc world.
|