/net/rds/ |
D | tcp.c | 92 u32 rds_tcp_write_seq(struct rds_tcp_connection *tc) in rds_tcp_write_seq() argument 95 return tcp_sk(tc->t_sock->sk)->write_seq; in rds_tcp_write_seq() 98 u32 rds_tcp_snd_una(struct rds_tcp_connection *tc) in rds_tcp_snd_una() argument 100 return tcp_sk(tc->t_sock->sk)->snd_una; in rds_tcp_snd_una() 104 struct rds_tcp_connection *tc) in rds_tcp_restore_callbacks() argument 106 rdsdebug("restoring sock %p callbacks from tc %p\n", sock, tc); in rds_tcp_restore_callbacks() 111 list_del_init(&tc->t_list_item); in rds_tcp_restore_callbacks() 115 if (!tc->t_cpath->cp_conn->c_isv6) in rds_tcp_restore_callbacks() 119 tc->t_sock = NULL; in rds_tcp_restore_callbacks() 121 sock->sk->sk_write_space = tc->t_orig_write_space; in rds_tcp_restore_callbacks() [all …]
|
D | tcp_recv.c | 159 struct rds_tcp_connection *tc = cp->cp_transport_data; in rds_tcp_data_recv() local 160 struct rds_tcp_incoming *tinc = tc->t_tinc; in rds_tcp_data_recv() 164 rdsdebug("tcp data tc %p skb %p offset %u len %zu\n", tc, skb, offset, in rds_tcp_data_recv() 179 tc->t_tinc = tinc; in rds_tcp_data_recv() 193 if (left && tc->t_tinc_hdr_rem) { in rds_tcp_data_recv() 194 to_copy = min(tc->t_tinc_hdr_rem, left); in rds_tcp_data_recv() 200 tc->t_tinc_hdr_rem, in rds_tcp_data_recv() 202 tc->t_tinc_hdr_rem -= to_copy; in rds_tcp_data_recv() 206 if (tc->t_tinc_hdr_rem == 0) { in rds_tcp_data_recv() 208 tc->t_tinc_data_rem = in rds_tcp_data_recv() [all …]
|
D | tcp_send.c | 43 struct rds_tcp_connection *tc = cp->cp_transport_data; in rds_tcp_xmit_path_prepare() local 45 tcp_sock_set_cork(tc->t_sock->sk, true); in rds_tcp_xmit_path_prepare() 50 struct rds_tcp_connection *tc = cp->cp_transport_data; in rds_tcp_xmit_path_complete() local 52 tcp_sock_set_cork(tc->t_sock->sk, false); in rds_tcp_xmit_path_complete() 74 struct rds_tcp_connection *tc = cp->cp_transport_data; in rds_tcp_xmit() local 84 tc->t_last_sent_nxt = rds_tcp_write_seq(tc); in rds_tcp_xmit() 85 rm->m_ack_seq = tc->t_last_sent_nxt + in rds_tcp_xmit() 90 tc->t_last_expected_una = rm->m_ack_seq + 1; in rds_tcp_xmit() 96 rm, rds_tcp_write_seq(tc), in rds_tcp_xmit() 102 set_bit(SOCK_NOSPACE, &tc->t_sock->sk->sk_socket->flags); in rds_tcp_xmit() [all …]
|
D | tcp_connect.c | 44 struct rds_tcp_connection *tc; in rds_tcp_state_change() local 52 tc = cp->cp_transport_data; in rds_tcp_state_change() 53 state_change = tc->t_orig_state_change; in rds_tcp_state_change() 55 rdsdebug("sock %p state_change to %d\n", tc->t_sock, sk->sk_state); in rds_tcp_state_change() 100 struct rds_tcp_connection *tc = cp->cp_transport_data; in rds_tcp_conn_path_connect() local 108 mutex_lock(&tc->t_conn_path_lock); in rds_tcp_conn_path_connect() 111 mutex_unlock(&tc->t_conn_path_lock); in rds_tcp_conn_path_connect() 186 mutex_unlock(&tc->t_conn_path_lock); in rds_tcp_conn_path_connect() 203 struct rds_tcp_connection *tc = cp->cp_transport_data; in rds_tcp_conn_path_shutdown() local 204 struct socket *sock = tc->t_sock; in rds_tcp_conn_path_shutdown() [all …]
|
D | tcp.h | 56 struct rds_tcp_connection *tc); 57 u32 rds_tcp_write_seq(struct rds_tcp_connection *tc); 58 u32 rds_tcp_snd_una(struct rds_tcp_connection *tc); 59 u64 rds_tcp_map_seq(struct rds_tcp_connection *tc, u32 seq);
|
/net/sched/ |
D | sch_mqprio.c | 454 unsigned int ntx, tc; in mqprio_dump() local 496 for (tc = 0; tc < netdev_get_num_tc(dev); tc++) { in mqprio_dump() 497 opt.count[tc] = dev->tc_to_txq[tc].count; in mqprio_dump() 498 opt.offset[tc] = dev->tc_to_txq[tc].offset; in mqprio_dump() 558 int tc = netdev_txq_to_tc(dev, cl - 1); in mqprio_dump_class() local 560 tcm->tcm_parent = (tc < 0) ? 0 : in mqprio_dump_class() 562 TC_H_MIN(tc + TC_H_MIN_PRIORITY)); in mqprio_dump_class() 583 struct netdev_tc_txq tc = dev->tc_to_txq[cl & TC_BITMASK]; in mqprio_dump_class_stats() local 593 for (i = tc.offset; i < tc.offset + tc.count; i++) { in mqprio_dump_class_stats()
|
D | sch_taprio.c | 203 int tc, n; in find_entry_to_transmit() local 205 tc = netdev_get_prio_tc_map(dev, skb->priority); in find_entry_to_transmit() 227 if (!(entry->gate_mask & BIT(tc)) || in find_entry_to_transmit() 514 u8 tc; in taprio_peek_soft() local 527 tc = netdev_get_prio_tc_map(dev, prio); in taprio_peek_soft() 529 if (!(gate_mask & BIT(tc))) in taprio_peek_soft() 585 u8 tc; in taprio_dequeue_soft() local 602 tc = netdev_get_prio_tc_map(dev, prio); in taprio_dequeue_soft() 604 if (!(gate_mask & BIT(tc))) { in taprio_dequeue_soft()
|
D | Kconfig | 26 from the package iproute2+tc at 726 Say Y here to allow packet sampling tc action. The packet sample 897 tristate "connection tracking tc action" 908 tristate "Frame gate entry list control tc action" 937 Say Y here to allow tc chain misses to continue in OvS datapath in 939 the correct chain in tc software datapath. 941 Say N here if you won't be using tc<->ovs offload or tc chains offload.
|
D | cls_u32.c | 342 struct tc_u_common *tc; in tc_u_common_find() local 343 hlist_for_each_entry(tc, tc_u_hash(key), hnode) { in tc_u_common_find() 344 if (tc->ptr == key) in tc_u_common_find() 345 return tc; in tc_u_common_find()
|
D | cls_flower.c | 875 u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]); in fl_set_key_mpls_lse() local 877 if (tc & ~MPLS_TC_MASK) { in fl_set_key_mpls_lse() 883 lse_val->mpls_tc = tc; in fl_set_key_mpls_lse() 984 u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TC]); in fl_set_key_mpls() local 986 if (tc & ~MPLS_TC_MASK) { in fl_set_key_mpls() 992 lse_val->mpls_tc = tc; in fl_set_key_mpls()
|
D | cls_api.c | 3685 entry->mpls_push.tc = tcf_mpls_tc(act); in tc_setup_flow_action() 3696 entry->mpls_mangle.tc = tcf_mpls_tc(act); in tc_setup_flow_action()
|
/net/9p/ |
D | trans_virtio.c | 273 VIRTQUEUE_NUM, req->tc.sdata, req->tc.size); in p9_virtio_request() 419 memcpy(&req->tc.sdata[req->tc.size - 4], &v, 4); in p9_virtio_zc_request() 426 sz = cpu_to_le32(req->tc.size + outlen); in p9_virtio_zc_request() 427 memcpy(&req->tc.sdata[0], &sz, sizeof(sz)); in p9_virtio_zc_request() 438 memcpy(&req->tc.sdata[req->tc.size - 4], &v, 4); in p9_virtio_zc_request() 450 VIRTQUEUE_NUM, req->tc.sdata, req->tc.size); in p9_virtio_zc_request()
|
D | client.c | 275 if (p9_fcall_init(c, &req->tc, alloc_msize)) in p9_tag_alloc() 280 p9pdu_reset(&req->tc); in p9_tag_alloc() 299 req->tc.tag = tag; in p9_tag_alloc() 320 p9_fcall_fini(&req->tc); in p9_tag_alloc() 350 if (req->tc.tag != tag) { in p9_tag_lookup() 371 u16 tag = r->tc.tag; in p9_tag_remove() 383 p9_fcall_fini(&r->tc); in p9_req_put() 409 req->tc.tag); in p9_tag_cleanup() 423 p9_debug(P9_DEBUG_MUX, " tag %d\n", req->tc.tag); in p9_client_cb() 432 p9_debug(P9_DEBUG_MUX, "wakeup: %d\n", req->tc.tag); in p9_client_cb() [all …]
|
D | trans_rdma.c | 352 c->busa, c->req->tc.size, in send_done() 489 c->req->tc.sdata, c->req->tc.size, in rdma_request() 499 sge.length = c->req->tc.size; in rdma_request() 528 c->req->tc.size, DMA_TO_DEVICE); in rdma_request()
|
D | trans_xen.c | 144 u32 size = p9_req->tc.size; in p9_xen_request() 157 num = p9_req->tc.tag % priv->num_rings; in p9_xen_request() 179 xen_9pfs_write_packet(ring->data.out, p9_req->tc.sdata, size, in p9_xen_request()
|
D | trans_fd.c | 476 m->wbuf = req->tc.sdata; in p9_write_work() 477 m->wsize = req->tc.size; in p9_write_work() 676 m, current, &req->tc, req->tc.id); in p9_fd_request()
|
/net/core/ |
D | net-sysfs.c | 1229 int num_tc, tc; in traffic_class_show() local 1244 tc = netdev_txq_to_tc(dev, index); in traffic_class_show() 1248 if (tc < 0) in traffic_class_show() 1258 return num_tc < 0 ? sprintf(buf, "%d%d\n", tc, num_tc) : in traffic_class_show() 1259 sprintf(buf, "%d\n", tc); in traffic_class_show() 1421 int tc, char *buf, enum xps_map_type type) in xps_queue_show() argument 1443 if (!dev_maps || tc >= dev_maps->num_tc) in xps_queue_show() 1447 int i, tci = j * dev_maps->num_tc + tc; in xps_queue_show() 1474 int len, tc; in xps_cpus_show() local 1487 tc = netdev_txq_to_tc(dev, index); in xps_cpus_show() [all …]
|
D | dev.c | 2373 struct netdev_tc_txq *tc = &dev->tc_to_txq[0]; in netif_setup_tc() local 2376 if (tc->offset + tc->count > txq) { in netif_setup_tc() 2386 tc = &dev->tc_to_txq[q]; in netif_setup_tc() 2387 if (tc->offset + tc->count > txq) { in netif_setup_tc() 2398 struct netdev_tc_txq *tc = &dev->tc_to_txq[0]; in netdev_txq_to_tc() local 2402 for (i = 0; i < TC_MAX_QUEUE; i++, tc++) { in netdev_txq_to_tc() 2403 if ((txq - tc->offset) < tc->count) in netdev_txq_to_tc() 2576 int tc, bool skip_tc) in xps_copy_dev_maps() argument 2583 if (i == tc && skip_tc) in xps_copy_dev_maps() 2600 int maps_sz, num_tc = 1, tc = 0; in __netif_set_xps_queue() local [all …]
|
/net/6lowpan/ |
D | iphc.c | 1020 u8 tc = lowpan_iphc_get_tc(hdr), tf[4], val; in lowpan_iphc_tf_compress() local 1023 pr_debug("tc 0x%02x\n", tc); in lowpan_iphc_tf_compress() 1026 if (!tc) { in lowpan_iphc_tf_compress() 1037 lowpan_push_hc_data(hc_ptr, &tc, sizeof(tc)); in lowpan_iphc_tf_compress() 1042 if (!(tc & 0x3f)) { in lowpan_iphc_tf_compress() 1055 tf[0] |= (tc & 0xc0); in lowpan_iphc_tf_compress() 1068 memcpy(&tf[0], &tc, sizeof(tc)); in lowpan_iphc_tf_compress()
|
/net/mpls/ |
D | internal.h | 14 u8 tc; member 182 result.tc = (entry & MPLS_LS_TC_MASK) >> MPLS_LS_TC_SHIFT; in mpls_entry_decode()
|
D | af_mpls.c | 1774 if (dec.tc) { in nla_get_labels()
|
/net/ipv6/ |
D | datagram.c | 996 int tc; in ip6_datagram_send_ctl() local 1002 tc = *(int *)CMSG_DATA(cmsg); in ip6_datagram_send_ctl() 1003 if (tc < -1 || tc > 0xff) in ip6_datagram_send_ctl() 1007 ipc6->tclass = tc; in ip6_datagram_send_ctl()
|
/net/dsa/ |
D | Kconfig | 121 use with tc-flower.
|
/net/netfilter/ |
D | Kconfig | 1555 in tc world.
|