/net/caif/ |
D | cfpkt_skbuff.c | 17 #define PKT_ERROR(pkt, errmsg) \ argument 19 cfpkt_priv(pkt)->erronous = true; \ 20 skb_reset_tail_pointer(&pkt->skb); \ 45 static inline struct cfpkt_priv_data *cfpkt_priv(struct cfpkt *pkt) in cfpkt_priv() argument 47 return (struct cfpkt_priv_data *) pkt->skb.cb; in cfpkt_priv() 50 static inline bool is_erronous(struct cfpkt *pkt) in is_erronous() argument 52 return cfpkt_priv(pkt)->erronous; in is_erronous() 55 static inline struct sk_buff *pkt_to_skb(struct cfpkt *pkt) in pkt_to_skb() argument 57 return &pkt->skb; in pkt_to_skb() 67 struct cfpkt *pkt = skb_to_pkt(nativepkt); in cfpkt_fromnative() local [all …]
|
D | cfctrl.c | 23 int cmd, struct cfpkt *pkt){ in handle_loop() argument 28 int cmd, struct cfpkt *pkt); 30 static int cfctrl_recv(struct cflayer *layr, struct cfpkt *pkt); 178 struct cfpkt *pkt; in cfctrl_enum_req() local 186 pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN); in cfctrl_enum_req() 187 if (!pkt) in cfctrl_enum_req() 190 init_info(cfpkt_info(pkt), cfctrl); in cfctrl_enum_req() 191 cfpkt_info(pkt)->dev_info->id = physlinkid; in cfctrl_enum_req() 193 cfpkt_addbdy(pkt, CFCTRL_CMD_ENUM); in cfctrl_enum_req() 194 cfpkt_addbdy(pkt, physlinkid); in cfctrl_enum_req() [all …]
|
D | cfserl.c | 29 static int cfserl_receive(struct cflayer *layr, struct cfpkt *pkt); 30 static int cfserl_transmit(struct cflayer *layr, struct cfpkt *pkt); 58 struct cfpkt *pkt = NULL; in cfserl_receive() local 72 pkt = layr->incomplete_frm; in cfserl_receive() 73 if (pkt == NULL) { in cfserl_receive() 78 pkt = newpkt; in cfserl_receive() 85 cfpkt_extr_head(pkt, &tmp8, 1); in cfserl_receive() 87 while (cfpkt_more(pkt) in cfserl_receive() 89 cfpkt_extr_head(pkt, &tmp8, 1); in cfserl_receive() 91 if (!cfpkt_more(pkt)) { in cfserl_receive() [all …]
|
D | cfdgml.c | 24 static int cfdgml_receive(struct cflayer *layr, struct cfpkt *pkt); 25 static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt); 40 static int cfdgml_receive(struct cflayer *layr, struct cfpkt *pkt) in cfdgml_receive() argument 49 if (cfpkt_extr_head(pkt, &cmd, 1) < 0) { in cfdgml_receive() 51 cfpkt_destroy(pkt); in cfdgml_receive() 56 if (cfpkt_extr_head(pkt, &dgmhdr, 3) < 0) { in cfdgml_receive() 58 cfpkt_destroy(pkt); in cfdgml_receive() 61 ret = layr->up->receive(layr->up, pkt); in cfdgml_receive() 68 cfpkt_destroy(pkt); in cfdgml_receive() 72 cfpkt_destroy(pkt); in cfdgml_receive() [all …]
|
D | cfrfml.c | 21 static int cfrfml_receive(struct cflayer *layr, struct cfpkt *pkt); 22 static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt); 72 struct cfpkt *pkt, int *err) in rfm_append() argument 78 if (cfpkt_extr_head(pkt, seghead, 6) < 0) in rfm_append() 85 tmppkt = cfpkt_append(rfml->incomplete_frm, pkt, in rfm_append() 97 static int cfrfml_receive(struct cflayer *layr, struct cfpkt *pkt) in cfrfml_receive() argument 112 if (cfpkt_extr_head(pkt, &tmp, 1) < 0) in cfrfml_receive() 119 if (cfpkt_peek_head(pkt, rfml->seghead, 6) != 0) in cfrfml_receive() 124 if (cfpkt_erroneous(pkt)) in cfrfml_receive() 126 rfml->incomplete_frm = pkt; in cfrfml_receive() [all …]
|
D | cffrml.c | 28 static int cffrml_receive(struct cflayer *layr, struct cfpkt *pkt); 29 static int cffrml_transmit(struct cflayer *layr, struct cfpkt *pkt); 82 static int cffrml_receive(struct cflayer *layr, struct cfpkt *pkt) in cffrml_receive() argument 91 cfpkt_extr_head(pkt, &tmp, 2); in cffrml_receive() 98 if (cfpkt_setlen(pkt, len) < 0) { in cffrml_receive() 101 cfpkt_destroy(pkt); in cffrml_receive() 109 cfpkt_extr_trail(pkt, &tmp, 2); in cffrml_receive() 111 pktchks = cfpkt_iterate(pkt, cffrml_checksum, 0xffff); in cffrml_receive() 113 cfpkt_add_trail(pkt, &tmp, 2); in cffrml_receive() 121 if (cfpkt_erroneous(pkt)) { in cffrml_receive() [all …]
|
D | cfveil.c | 23 static int cfvei_receive(struct cflayer *layr, struct cfpkt *pkt); 24 static int cfvei_transmit(struct cflayer *layr, struct cfpkt *pkt); 39 static int cfvei_receive(struct cflayer *layr, struct cfpkt *pkt) in cfvei_receive() argument 48 if (cfpkt_extr_head(pkt, &cmd, 1) < 0) { in cfvei_receive() 50 cfpkt_destroy(pkt); in cfvei_receive() 55 ret = layr->up->receive(layr->up, pkt); in cfvei_receive() 59 cfpkt_destroy(pkt); in cfvei_receive() 63 cfpkt_destroy(pkt); in cfvei_receive() 66 cfpkt_destroy(pkt); in cfvei_receive() 70 cfpkt_destroy(pkt); in cfvei_receive() [all …]
|
D | cfutill.c | 24 static int cfutill_receive(struct cflayer *layr, struct cfpkt *pkt); 25 static int cfutill_transmit(struct cflayer *layr, struct cfpkt *pkt); 40 static int cfutill_receive(struct cflayer *layr, struct cfpkt *pkt) in cfutill_receive() argument 48 if (cfpkt_extr_head(pkt, &cmd, 1) < 0) { in cfutill_receive() 50 cfpkt_destroy(pkt); in cfutill_receive() 56 return layr->up->receive(layr->up, pkt); in cfutill_receive() 59 cfpkt_destroy(pkt); in cfutill_receive() 63 cfpkt_destroy(pkt); in cfutill_receive() 69 cfpkt_destroy(pkt); in cfutill_receive() 72 cfpkt_destroy(pkt); in cfutill_receive() [all …]
|
/net/vmw_vsock/ |
D | virtio_transport_common.c | 52 struct virtio_vsock_pkt *pkt; in virtio_transport_alloc_pkt() local 55 pkt = kzalloc(sizeof(*pkt), GFP_KERNEL); in virtio_transport_alloc_pkt() 56 if (!pkt) in virtio_transport_alloc_pkt() 59 pkt->hdr.type = cpu_to_le16(info->type); in virtio_transport_alloc_pkt() 60 pkt->hdr.op = cpu_to_le16(info->op); in virtio_transport_alloc_pkt() 61 pkt->hdr.src_cid = cpu_to_le64(src_cid); in virtio_transport_alloc_pkt() 62 pkt->hdr.dst_cid = cpu_to_le64(dst_cid); in virtio_transport_alloc_pkt() 63 pkt->hdr.src_port = cpu_to_le32(src_port); in virtio_transport_alloc_pkt() 64 pkt->hdr.dst_port = cpu_to_le32(dst_port); in virtio_transport_alloc_pkt() 65 pkt->hdr.flags = cpu_to_le32(info->flags); in virtio_transport_alloc_pkt() [all …]
|
D | vmci_transport.c | 40 struct vmci_transport_packet *pkt); 44 struct vmci_transport_packet *pkt); 47 struct vmci_transport_packet *pkt); 50 struct vmci_transport_packet *pkt); 53 struct vmci_transport_packet *pkt); 55 struct vmci_transport_packet *pkt); 65 struct vmci_transport_packet pkt; member 112 vmci_transport_packet_init(struct vmci_transport_packet *pkt, in vmci_transport_packet_init() argument 125 pkt->dg.src = vmci_make_handle(VMADDR_CID_ANY, in vmci_transport_packet_init() 127 pkt->dg.dst = vmci_make_handle(dst->svm_cid, in vmci_transport_packet_init() [all …]
|
D | virtio_transport.c | 104 struct virtio_vsock_pkt *pkt; in virtio_transport_send_pkt_work() local 115 pkt = list_first_entry(&vsock->send_pkt_list, in virtio_transport_send_pkt_work() 117 list_del_init(&pkt->list); in virtio_transport_send_pkt_work() 120 virtio_transport_deliver_tap_pkt(pkt); in virtio_transport_send_pkt_work() 122 reply = pkt->reply; in virtio_transport_send_pkt_work() 124 sg_init_one(&hdr, &pkt->hdr, sizeof(pkt->hdr)); in virtio_transport_send_pkt_work() 126 if (pkt->buf) { in virtio_transport_send_pkt_work() 127 sg_init_one(&buf, pkt->buf, pkt->len); in virtio_transport_send_pkt_work() 131 ret = virtqueue_add_sgs(vq, sgs, out_sg, in_sg, pkt, GFP_KERNEL); in virtio_transport_send_pkt_work() 137 list_add(&pkt->list, &vsock->send_pkt_list); in virtio_transport_send_pkt_work() [all …]
|
D | vsock_loopback.c | 30 static int vsock_loopback_send_pkt(struct virtio_vsock_pkt *pkt) in vsock_loopback_send_pkt() argument 33 int len = pkt->len; in vsock_loopback_send_pkt() 36 list_add_tail(&pkt->list, &vsock->pkt_list); in vsock_loopback_send_pkt() 47 struct virtio_vsock_pkt *pkt, *n; in vsock_loopback_cancel_pkt() local 51 list_for_each_entry_safe(pkt, n, &vsock->pkt_list, list) { in vsock_loopback_cancel_pkt() 52 if (pkt->vsk != vsk) in vsock_loopback_cancel_pkt() 54 list_move(&pkt->list, &freeme); in vsock_loopback_cancel_pkt() 58 list_for_each_entry_safe(pkt, n, &freeme, list) { in vsock_loopback_cancel_pkt() 59 list_del(&pkt->list); in vsock_loopback_cancel_pkt() 60 virtio_transport_free_pkt(pkt); in vsock_loopback_cancel_pkt() [all …]
|
/net/netfilter/ |
D | nft_set_pipapo_avx2.c | 216 const u8 *pkt, bool first, bool last) in nft_pipapo_avx2_lookup_4b_2() argument 219 u8 pg[2] = { pkt[0] >> 4, pkt[0] & 0xf }; in nft_pipapo_avx2_lookup_4b_2() 278 const u8 *pkt, bool first, bool last) in nft_pipapo_avx2_lookup_4b_4() argument 281 u8 pg[4] = { pkt[0] >> 4, pkt[0] & 0xf, pkt[1] >> 4, pkt[1] & 0xf }; in nft_pipapo_avx2_lookup_4b_4() 354 const u8 *pkt, bool first, bool last) in nft_pipapo_avx2_lookup_4b_8() argument 356 u8 pg[8] = { pkt[0] >> 4, pkt[0] & 0xf, pkt[1] >> 4, pkt[1] & 0xf, in nft_pipapo_avx2_lookup_4b_8() 357 pkt[2] >> 4, pkt[2] & 0xf, pkt[3] >> 4, pkt[3] & 0xf, in nft_pipapo_avx2_lookup_4b_8() 449 const u8 *pkt, bool first, bool last) in nft_pipapo_avx2_lookup_4b_12() argument 451 u8 pg[12] = { pkt[0] >> 4, pkt[0] & 0xf, pkt[1] >> 4, pkt[1] & 0xf, in nft_pipapo_avx2_lookup_4b_12() 452 pkt[2] >> 4, pkt[2] & 0xf, pkt[3] >> 4, pkt[3] & 0xf, in nft_pipapo_avx2_lookup_4b_12() [all …]
|
D | nft_reject_netdev.c | 85 const struct nft_pktinfo *pkt) in nft_reject_netdev_eval() argument 87 struct ethhdr *eth = eth_hdr(pkt->skb); in nft_reject_netdev_eval() 99 nft_reject_netdev_send_v4_unreach(nft_net(pkt), pkt->skb, in nft_reject_netdev_eval() 100 nft_in(pkt), in nft_reject_netdev_eval() 101 nft_hook(pkt), in nft_reject_netdev_eval() 105 nft_reject_netdev_send_v4_tcp_reset(nft_net(pkt), pkt->skb, in nft_reject_netdev_eval() 106 nft_in(pkt), in nft_reject_netdev_eval() 107 nft_hook(pkt)); in nft_reject_netdev_eval() 110 nft_reject_netdev_send_v4_unreach(nft_net(pkt), pkt->skb, in nft_reject_netdev_eval() 111 nft_in(pkt), in nft_reject_netdev_eval() [all …]
|
D | nft_reject_inet.c | 19 const struct nft_pktinfo *pkt) in nft_reject_inet_eval() argument 23 switch (nft_pf(pkt)) { in nft_reject_inet_eval() 27 nf_send_unreach(pkt->skb, priv->icmp_code, in nft_reject_inet_eval() 28 nft_hook(pkt)); in nft_reject_inet_eval() 31 nf_send_reset(nft_net(pkt), nft_sk(pkt), in nft_reject_inet_eval() 32 pkt->skb, nft_hook(pkt)); in nft_reject_inet_eval() 35 nf_send_unreach(pkt->skb, in nft_reject_inet_eval() 37 nft_hook(pkt)); in nft_reject_inet_eval() 44 nf_send_unreach6(nft_net(pkt), pkt->skb, in nft_reject_inet_eval() 45 priv->icmp_code, nft_hook(pkt)); in nft_reject_inet_eval() [all …]
|
D | nft_chain_filter.c | 18 struct nft_pktinfo pkt; in nft_do_chain_ipv4() local 20 nft_set_pktinfo(&pkt, skb, state); in nft_do_chain_ipv4() 21 nft_set_pktinfo_ipv4(&pkt); in nft_do_chain_ipv4() 23 return nft_do_chain(&pkt, priv); in nft_do_chain_ipv4() 62 struct nft_pktinfo pkt; in nft_do_chain_arp() local 64 nft_set_pktinfo(&pkt, skb, state); in nft_do_chain_arp() 65 nft_set_pktinfo_unspec(&pkt); in nft_do_chain_arp() 67 return nft_do_chain(&pkt, priv); in nft_do_chain_arp() 102 struct nft_pktinfo pkt; in nft_do_chain_ipv6() local 104 nft_set_pktinfo(&pkt, skb, state); in nft_do_chain_ipv6() [all …]
|
D | nft_meta.c | 80 nft_meta_get_eval_pkttype_lo(const struct nft_pktinfo *pkt, in nft_meta_get_eval_pkttype_lo() argument 83 const struct sk_buff *skb = pkt->skb; in nft_meta_get_eval_pkttype_lo() 85 switch (nft_pf(pkt)) { in nft_meta_get_eval_pkttype_lo() 132 const struct nft_pktinfo *pkt) in nft_meta_get_eval_skugid() argument 134 struct sock *sk = skb_to_full_sk(pkt->skb); in nft_meta_get_eval_skugid() 137 if (!sk || !sk_fullsock(sk) || !net_eq(nft_net(pkt), sock_net(sk))) in nft_meta_get_eval_skugid() 166 nft_meta_get_eval_cgroup(u32 *dest, const struct nft_pktinfo *pkt) in nft_meta_get_eval_cgroup() argument 168 struct sock *sk = skb_to_full_sk(pkt->skb); in nft_meta_get_eval_cgroup() 170 if (!sk || !sk_fullsock(sk) || !net_eq(nft_net(pkt), sock_net(sk))) in nft_meta_get_eval_cgroup() 180 const struct nft_pktinfo *pkt) in nft_meta_get_eval_kind() argument [all …]
|
D | nf_tables_trace.c | 110 const struct nft_pktinfo *pkt) in nf_trace_fill_pkt_info() argument 112 const struct sk_buff *skb = pkt->skb; in nf_trace_fill_pkt_info() 116 nh_end = pkt->flags & NFT_PKTINFO_L4PROTO ? nft_thoff(pkt) : skb->len; in nf_trace_fill_pkt_info() 122 if (pkt->flags & NFT_PKTINFO_L4PROTO) { in nf_trace_fill_pkt_info() 123 len = min_t(unsigned int, skb->len - nft_thoff(pkt), in nf_trace_fill_pkt_info() 126 nft_thoff(pkt), len)) in nf_trace_fill_pkt_info() 185 const struct nft_pktinfo *pkt = info->pkt; in nft_trace_notify() local 191 if (!nfnetlink_has_listeners(nft_net(pkt), NFNLGRP_NFTRACE)) in nft_trace_notify() 226 if (nla_put_be32(skb, NFTA_TRACE_NFPROTO, htonl(nft_pf(pkt)))) in nft_trace_notify() 232 if (trace_fill_id(skb, pkt->skb)) in nft_trace_notify() [all …]
|
D | nft_exthdr.c | 48 const struct nft_pktinfo *pkt) in nft_exthdr_ipv6_eval() argument 55 if (pkt->skb->protocol != htons(ETH_P_IPV6)) in nft_exthdr_ipv6_eval() 58 err = ipv6_find_hdr(pkt->skb, &offset, priv->type, NULL, NULL); in nft_exthdr_ipv6_eval() 67 if (nft_skb_copy_to_reg(pkt->skb, offset, dest, priv->len) < 0) in nft_exthdr_ipv6_eval() 143 const struct nft_pktinfo *pkt) in nft_exthdr_ipv4_eval() argument 147 struct sk_buff *skb = pkt->skb; in nft_exthdr_ipv4_eval() 154 err = ipv4_find_option(nft_net(pkt), skb, &offset, priv->type); in nft_exthdr_ipv4_eval() 163 if (nft_skb_copy_to_reg(pkt->skb, offset, dest, priv->len) < 0) in nft_exthdr_ipv4_eval() 171 nft_tcp_header_pointer(const struct nft_pktinfo *pkt, in nft_tcp_header_pointer() argument 176 if (pkt->tprot != IPPROTO_TCP || pkt->fragoff) in nft_tcp_header_pointer() [all …]
|
/net/qrtr/ |
D | ns.c | 110 struct qrtr_ctrl_pkt pkt; in service_announce_new() local 117 iv.iov_base = &pkt; in service_announce_new() 118 iv.iov_len = sizeof(pkt); in service_announce_new() 120 memset(&pkt, 0, sizeof(pkt)); in service_announce_new() 121 pkt.cmd = cpu_to_le32(QRTR_TYPE_NEW_SERVER); in service_announce_new() 122 pkt.server.service = cpu_to_le32(srv->service); in service_announce_new() 123 pkt.server.instance = cpu_to_le32(srv->instance); in service_announce_new() 124 pkt.server.node = cpu_to_le32(srv->node); in service_announce_new() 125 pkt.server.port = cpu_to_le32(srv->port); in service_announce_new() 130 return kernel_sendmsg(qrtr_ns.sock, &msg, &iv, 1, sizeof(pkt)); in service_announce_new() [all …]
|
/net/bridge/netfilter/ |
D | nft_reject_bridge.c | 112 const struct nft_pktinfo *pkt) in nft_reject_bridge_eval() argument 115 const unsigned char *dest = eth_hdr(pkt->skb)->h_dest; in nft_reject_bridge_eval() 121 switch (eth_hdr(pkt->skb)->h_proto) { in nft_reject_bridge_eval() 125 nft_reject_br_send_v4_unreach(nft_net(pkt), pkt->skb, in nft_reject_bridge_eval() 126 nft_in(pkt), in nft_reject_bridge_eval() 127 nft_hook(pkt), in nft_reject_bridge_eval() 131 nft_reject_br_send_v4_tcp_reset(nft_net(pkt), pkt->skb, in nft_reject_bridge_eval() 132 nft_in(pkt), in nft_reject_bridge_eval() 133 nft_hook(pkt)); in nft_reject_bridge_eval() 136 nft_reject_br_send_v4_unreach(nft_net(pkt), pkt->skb, in nft_reject_bridge_eval() [all …]
|
/net/rxrpc/ |
D | output.c | 71 struct rxrpc_ack_buffer *pkt, in rxrpc_fill_out_ack() argument 81 u8 *ackp = pkt->acks; in rxrpc_fill_out_ack() 96 pkt->ack.bufferSpace = htons(0); in rxrpc_fill_out_ack() 97 pkt->ack.maxSkew = htons(0); in rxrpc_fill_out_ack() 98 pkt->ack.firstPacket = htonl(hard_ack + 1); in rxrpc_fill_out_ack() 99 pkt->ack.previousPacket = htonl(call->ackr_highest_seq); in rxrpc_fill_out_ack() 100 pkt->ack.serial = htonl(serial); in rxrpc_fill_out_ack() 101 pkt->ack.reason = reason; in rxrpc_fill_out_ack() 102 pkt->ack.nAcks = top - hard_ack; in rxrpc_fill_out_ack() 105 pkt->whdr.flags |= RXRPC_REQUEST_ACK; in rxrpc_fill_out_ack() [all …]
|
D | conn_event.c | 36 } __attribute__((packed)) pkt; in rxrpc_conn_retransmit_call() local 46 &pkt.ack, sizeof(pkt.ack)) < 0) in rxrpc_conn_retransmit_call() 48 if (pkt.ack.reason == RXRPC_ACK_PING_RESPONSE) in rxrpc_conn_retransmit_call() 69 iov[0].iov_base = &pkt; in rxrpc_conn_retransmit_call() 70 iov[0].iov_len = sizeof(pkt.whdr); in rxrpc_conn_retransmit_call() 76 pkt.whdr.epoch = htonl(conn->proto.epoch); in rxrpc_conn_retransmit_call() 77 pkt.whdr.cid = htonl(conn->proto.cid | channel); in rxrpc_conn_retransmit_call() 78 pkt.whdr.callNumber = htonl(call_id); in rxrpc_conn_retransmit_call() 79 pkt.whdr.seq = 0; in rxrpc_conn_retransmit_call() 80 pkt.whdr.type = chan->last_type; in rxrpc_conn_retransmit_call() [all …]
|
/net/ipv6/netfilter/ |
D | nft_fib_ipv6.c | 23 const struct nft_pktinfo *pkt, in nft_fib6_flowi_init() argument 39 fl6->flowi6_oif = get_ifindex(dev ? dev : pkt->skb->dev); in nft_fib6_flowi_init() 49 fl6->flowi6_mark = pkt->skb->mark; in nft_fib6_flowi_init() 57 const struct nft_pktinfo *pkt, in __nft_fib6_eval_type() argument 65 .flowi6_proto = pkt->tprot, in __nft_fib6_eval_type() 70 dev = nft_in(pkt); in __nft_fib6_eval_type() 72 dev = nft_out(pkt); in __nft_fib6_eval_type() 74 nft_fib6_flowi_init(&fl6, priv, pkt, dev, iph); in __nft_fib6_eval_type() 76 if (dev && nf_ipv6_chk_addr(nft_net(pkt), &fl6.daddr, dev, true)) in __nft_fib6_eval_type() 79 route_err = nf_ip6_route(nft_net(pkt), (struct dst_entry **)&rt, in __nft_fib6_eval_type() [all …]
|
/net/ipv4/netfilter/ |
D | nft_fib_ipv4.c | 28 const struct nft_pktinfo *pkt) in nft_fib4_eval_type() argument 31 int noff = skb_network_offset(pkt->skb); in nft_fib4_eval_type() 38 dev = nft_in(pkt); in nft_fib4_eval_type() 40 dev = nft_out(pkt); in nft_fib4_eval_type() 42 iph = skb_header_pointer(pkt->skb, noff, sizeof(_iph), &_iph); in nft_fib4_eval_type() 53 *dst = inet_dev_addr_type(nft_net(pkt), dev, addr); in nft_fib4_eval_type() 58 const struct nft_pktinfo *pkt) in nft_fib4_eval() argument 61 int noff = skb_network_offset(pkt->skb); in nft_fib4_eval() 80 oif = nft_out(pkt); in nft_fib4_eval() 82 oif = nft_in(pkt); in nft_fib4_eval() [all …]
|