/net/bridge/ |
D | br_netfilter.c | 51 #define skb_origaddr(skb) (((struct bridge_skb_cb *) \ argument 52 (skb->nf_bridge->data))->daddr.ipv4) 53 #define store_orig_dstaddr(skb) (skb_origaddr(skb) = ip_hdr(skb)->daddr) argument 54 #define dnat_took_place(skb) (skb_origaddr(skb) != ip_hdr(skb)->daddr) argument 68 static inline __be16 vlan_proto(const struct sk_buff *skb) in vlan_proto() argument 70 return vlan_eth_hdr(skb)->h_vlan_encapsulated_proto; in vlan_proto() 73 #define IS_VLAN_IP(skb) \ argument 74 (skb->protocol == htons(ETH_P_8021Q) && \ 75 vlan_proto(skb) == htons(ETH_P_IP) && \ 78 #define IS_VLAN_IPV6(skb) \ argument [all …]
|
D | br_forward.c | 23 const struct sk_buff *skb) in should_deliver() argument 25 return (skb->dev != p->dev && p->state == BR_STATE_FORWARDING); in should_deliver() 28 static inline unsigned packet_length(const struct sk_buff *skb) in packet_length() argument 30 return skb->len - (skb->protocol == htons(ETH_P_8021Q) ? VLAN_HLEN : 0); in packet_length() 33 int br_dev_queue_push_xmit(struct sk_buff *skb) in br_dev_queue_push_xmit() argument 36 if (packet_length(skb) > skb->dev->mtu && !skb_is_gso(skb)) in br_dev_queue_push_xmit() 37 kfree_skb(skb); in br_dev_queue_push_xmit() 40 if (nf_bridge_maybe_copy_header(skb)) in br_dev_queue_push_xmit() 41 kfree_skb(skb); in br_dev_queue_push_xmit() 43 skb_push(skb, ETH_HLEN); in br_dev_queue_push_xmit() [all …]
|
D | br_input.c | 23 static void br_pass_frame_up(struct net_bridge *br, struct sk_buff *skb) in br_pass_frame_up() argument 28 brdev->stats.rx_bytes += skb->len; in br_pass_frame_up() 30 indev = skb->dev; in br_pass_frame_up() 31 skb->dev = brdev; in br_pass_frame_up() 33 NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, indev, NULL, in br_pass_frame_up() 38 int br_handle_frame_finish(struct sk_buff *skb) in br_handle_frame_finish() argument 40 const unsigned char *dest = eth_hdr(skb)->h_dest; in br_handle_frame_finish() 41 struct net_bridge_port *p = rcu_dereference(skb->dev->br_port); in br_handle_frame_finish() 51 br_fdb_update(br, p, eth_hdr(skb)->h_source); in br_handle_frame_finish() 60 skb2 = skb; in br_handle_frame_finish() [all …]
|
/net/core/ |
D | skbuff.c | 117 void skb_over_panic(struct sk_buff *skb, int sz, void *here) in skb_over_panic() argument 121 here, skb->len, sz, skb->head, skb->data, in skb_over_panic() 122 (unsigned long)skb->tail, (unsigned long)skb->end, in skb_over_panic() 123 skb->dev ? skb->dev->name : "<NULL>"); in skb_over_panic() 136 void skb_under_panic(struct sk_buff *skb, int sz, void *here) in skb_under_panic() argument 140 here, skb->len, sz, skb->head, skb->data, in skb_under_panic() 141 (unsigned long)skb->tail, (unsigned long)skb->end, in skb_under_panic() 142 skb->dev ? skb->dev->name : "<NULL>"); in skb_under_panic() 172 struct sk_buff *skb; in __alloc_skb() local 178 skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); in __alloc_skb() [all …]
|
/net/ipv6/ |
D | exthdrs.c | 50 int ipv6_find_tlv(struct sk_buff *skb, int offset, int type) in ipv6_find_tlv() argument 52 const unsigned char *nh = skb_network_header(skb); in ipv6_find_tlv() 53 int packet_len = skb->tail - skb->network_header; in ipv6_find_tlv() 104 int (*func)(struct sk_buff *skb, int offset); 113 static int ip6_tlvopt_unknown(struct sk_buff *skb, int optoff) in ip6_tlvopt_unknown() argument 115 switch ((skb_network_header(skb)[optoff] & 0xC0) >> 6) { in ip6_tlvopt_unknown() 126 if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) in ip6_tlvopt_unknown() 129 icmpv6_param_prob(skb, ICMPV6_UNK_OPTION, optoff); in ip6_tlvopt_unknown() 133 kfree_skb(skb); in ip6_tlvopt_unknown() 139 static int ip6_parse_tlv(struct tlvtype_proc *procs, struct sk_buff *skb) in ip6_parse_tlv() argument [all …]
|
D | ip6_input.c | 49 inline int ip6_rcv_finish( struct sk_buff *skb) in ip6_rcv_finish() argument 51 if (skb->dst == NULL) in ip6_rcv_finish() 52 ip6_route_input(skb); in ip6_rcv_finish() 54 return dst_input(skb); in ip6_rcv_finish() 57 int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device… in ipv6_rcv() argument 62 struct net *net = dev_net(skb->dev); in ipv6_rcv() 64 if (skb->pkt_type == PACKET_OTHERHOST) { in ipv6_rcv() 65 kfree_skb(skb); in ipv6_rcv() 71 idev = __in6_dev_get(skb->dev); in ipv6_rcv() 75 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL || in ipv6_rcv() [all …]
|
D | ip6_output.c | 58 static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)); 60 static __inline__ void ipv6_select_ident(struct sk_buff *skb, struct frag_hdr *fhdr) in ipv6_select_ident() argument 72 int __ip6_local_out(struct sk_buff *skb) in __ip6_local_out() argument 76 len = skb->len - sizeof(struct ipv6hdr); in __ip6_local_out() 79 ipv6_hdr(skb)->payload_len = htons(len); in __ip6_local_out() 81 return nf_hook(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, skb->dst->dev, in __ip6_local_out() 85 int ip6_local_out(struct sk_buff *skb) in ip6_local_out() argument 89 err = __ip6_local_out(skb); in ip6_local_out() 91 err = dst_output(skb); in ip6_local_out() 97 static int ip6_output_finish(struct sk_buff *skb) in ip6_output_finish() argument [all …]
|
D | netfilter.c | 13 int ip6_route_me_harder(struct sk_buff *skb) in ip6_route_me_harder() argument 15 struct net *net = dev_net(skb->dst->dev); in ip6_route_me_harder() 16 struct ipv6hdr *iph = ipv6_hdr(skb); in ip6_route_me_harder() 19 .oif = skb->sk ? skb->sk->sk_bound_dev_if : 0, in ip6_route_me_harder() 20 .mark = skb->mark, in ip6_route_me_harder() 27 dst = ip6_route_output(net, skb->sk, &fl); in ip6_route_me_harder() 30 if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) && in ip6_route_me_harder() 31 xfrm_decode_session(skb, &fl, AF_INET6) == 0) in ip6_route_me_harder() 32 if (xfrm_lookup(net, &skb->dst, &fl, skb->sk, 0)) in ip6_route_me_harder() 44 dst_release(skb->dst); in ip6_route_me_harder() [all …]
|
/net/netfilter/ipvs/ |
D | ip_vs_xmit.c | 192 #define IP_VS_XMIT(pf, skb, rt) \ argument 194 (skb)->ipvs_property = 1; \ 195 skb_forward_csum(skb); \ 196 NF_HOOK(pf, NF_INET_LOCAL_OUT, (skb), NULL, \ 205 ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, in ip_vs_null_xmit() argument 219 ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, in ip_vs_bypass_xmit() argument 223 struct iphdr *iph = ip_hdr(skb); in ip_vs_bypass_xmit() 245 if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF))) { in ip_vs_bypass_xmit() 247 icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu)); in ip_vs_bypass_xmit() 256 if (unlikely((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)) { in ip_vs_bypass_xmit() [all …]
|
/net/lapb/ |
D | lapb_subr.c | 51 struct sk_buff *skb; in lapb_frames_acked() local 61 skb = skb_dequeue(&lapb->ack_queue); in lapb_frames_acked() 62 kfree_skb(skb); in lapb_frames_acked() 69 struct sk_buff *skb, *skb_prev = NULL; in lapb_requeue_frames() local 76 while ((skb = skb_dequeue(&lapb->ack_queue)) != NULL) { in lapb_requeue_frames() 78 skb_queue_head(&lapb->write_queue, skb); in lapb_requeue_frames() 80 skb_append(skb_prev, skb, &lapb->write_queue); in lapb_requeue_frames() 81 skb_prev = skb; in lapb_requeue_frames() 109 int lapb_decode(struct lapb_cb *lapb, struct sk_buff *skb, in lapb_decode() argument 117 skb->data[0], skb->data[1], skb->data[2]); in lapb_decode() [all …]
|
/net/ax25/ |
D | ax25_in.c | 38 static int ax25_rx_fragment(ax25_cb *ax25, struct sk_buff *skb) in ax25_rx_fragment() argument 43 if (!(*skb->data & AX25_SEG_FIRST)) { in ax25_rx_fragment() 44 if ((ax25->fragno - 1) == (*skb->data & AX25_SEG_REM)) { in ax25_rx_fragment() 46 ax25->fragno = *skb->data & AX25_SEG_REM; in ax25_rx_fragment() 47 skb_pull(skb, 1); /* skip fragno */ in ax25_rx_fragment() 48 ax25->fraglen += skb->len; in ax25_rx_fragment() 49 skb_queue_tail(&ax25->frag_queue, skb); in ax25_rx_fragment() 86 if (*skb->data & AX25_SEG_FIRST) { in ax25_rx_fragment() 88 ax25->fragno = *skb->data & AX25_SEG_REM; in ax25_rx_fragment() 89 skb_pull(skb, 1); /* skip fragno */ in ax25_rx_fragment() [all …]
|
/net/ipv4/ |
D | ip_output.c | 92 int __ip_local_out(struct sk_buff *skb) in __ip_local_out() argument 94 struct iphdr *iph = ip_hdr(skb); in __ip_local_out() 96 iph->tot_len = htons(skb->len); in __ip_local_out() 98 return nf_hook(PF_INET, NF_INET_LOCAL_OUT, skb, NULL, skb->dst->dev, in __ip_local_out() 102 int ip_local_out(struct sk_buff *skb) in ip_local_out() argument 106 err = __ip_local_out(skb); in ip_local_out() 108 err = dst_output(skb); in ip_local_out() 139 int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk, in ip_build_and_send_pkt() argument 143 struct rtable *rt = skb->rtable; in ip_build_and_send_pkt() 147 skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0)); in ip_build_and_send_pkt() [all …]
|
D | ip_input.c | 150 int ip_call_ra_chain(struct sk_buff *skb) in ip_call_ra_chain() argument 153 u8 protocol = ip_hdr(skb)->protocol; in ip_call_ra_chain() 155 struct net_device *dev = skb->dev; in ip_call_ra_chain() 168 if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) { in ip_call_ra_chain() 169 if (ip_defrag(skb, IP_DEFRAG_CALL_RA_CHAIN)) { in ip_call_ra_chain() 175 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); in ip_call_ra_chain() 184 raw_rcv(last, skb); in ip_call_ra_chain() 192 static int ip_local_deliver_finish(struct sk_buff *skb) in ip_local_deliver_finish() argument 194 struct net *net = dev_net(skb->dev); in ip_local_deliver_finish() 196 __skb_pull(skb, ip_hdrlen(skb)); in ip_local_deliver_finish() [all …]
|
D | netfilter.c | 13 int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type) in ip_route_me_harder() argument 15 struct net *net = dev_net(skb->dst->dev); in ip_route_me_harder() 16 const struct iphdr *iph = ip_hdr(skb); in ip_route_me_harder() 24 if (skb->sk && inet_sk(skb->sk)->transparent) in ip_route_me_harder() 37 fl.oif = skb->sk ? skb->sk->sk_bound_dev_if : 0; in ip_route_me_harder() 38 fl.mark = skb->mark; in ip_route_me_harder() 39 fl.flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : 0; in ip_route_me_harder() 44 dst_release(skb->dst); in ip_route_me_harder() 45 skb->dst = &rt->u.dst; in ip_route_me_harder() 53 odst = skb->dst; in ip_route_me_harder() [all …]
|
D | tcp_output.c | 62 static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb) in tcp_event_new_data_sent() argument 67 tcp_advance_send_head(sk, skb); in tcp_event_new_data_sent() 68 tp->snd_nxt = TCP_SKB_CB(skb)->end_seq; in tcp_event_new_data_sent() 74 tp->packets_out += tcp_skb_pcount(skb); in tcp_event_new_data_sent() 146 struct sk_buff *skb, struct sock *sk) in tcp_event_data_sent() argument 279 static inline void TCP_ECN_send_synack(struct tcp_sock *tp, struct sk_buff *skb) in TCP_ECN_send_synack() argument 281 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_CWR; in TCP_ECN_send_synack() 283 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_ECE; in TCP_ECN_send_synack() 286 static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb) in TCP_ECN_send_syn() argument 292 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ECE | TCPCB_FLAG_CWR; in TCP_ECN_send_syn() [all …]
|
D | xfrm4_input.c | 19 int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb) in xfrm4_extract_input() argument 21 return xfrm4_extract_header(skb); in xfrm4_extract_input() 24 static inline int xfrm4_rcv_encap_finish(struct sk_buff *skb) in xfrm4_rcv_encap_finish() argument 26 if (skb->dst == NULL) { in xfrm4_rcv_encap_finish() 27 const struct iphdr *iph = ip_hdr(skb); in xfrm4_rcv_encap_finish() 29 if (ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, in xfrm4_rcv_encap_finish() 30 skb->dev)) in xfrm4_rcv_encap_finish() 33 return dst_input(skb); in xfrm4_rcv_encap_finish() 35 kfree_skb(skb); in xfrm4_rcv_encap_finish() 39 int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi, in xfrm4_rcv_encap() argument [all …]
|
/net/llc/ |
D | llc_c_ev.c | 78 struct sk_buff *skb; in llc_util_nr_inside_tx_window() local 88 skb = skb_peek(&llc->pdu_unack_q); in llc_util_nr_inside_tx_window() 89 pdu = llc_pdu_sn_hdr(skb); in llc_util_nr_inside_tx_window() 91 skb = skb_peek_tail(&llc->pdu_unack_q); in llc_util_nr_inside_tx_window() 92 pdu = llc_pdu_sn_hdr(skb); in llc_util_nr_inside_tx_window() 99 int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb) in llc_conn_ev_conn_req() argument 101 const struct llc_conn_state_ev *ev = llc_conn_ev(skb); in llc_conn_ev_conn_req() 107 int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb) in llc_conn_ev_data_req() argument 109 const struct llc_conn_state_ev *ev = llc_conn_ev(skb); in llc_conn_ev_data_req() 115 int llc_conn_ev_disc_req(struct sock *sk, struct sk_buff *skb) in llc_conn_ev_disc_req() argument [all …]
|
D | llc_station.c | 84 llc_station_ev(struct sk_buff *skb) in llc_station_ev() argument 86 return (struct llc_station_state_ev *)skb->cb; in llc_station_ev() 89 typedef int (*llc_station_ev_t)(struct sk_buff *skb); 97 typedef int (*llc_station_action_t)(struct sk_buff *skb); 113 static int llc_stat_ev_enable_with_dup_addr_check(struct sk_buff *skb) in llc_stat_ev_enable_with_dup_addr_check() argument 115 struct llc_station_state_ev *ev = llc_station_ev(skb); in llc_stat_ev_enable_with_dup_addr_check() 122 static int llc_stat_ev_enable_without_dup_addr_check(struct sk_buff *skb) in llc_stat_ev_enable_without_dup_addr_check() argument 124 struct llc_station_state_ev *ev = llc_station_ev(skb); in llc_stat_ev_enable_without_dup_addr_check() 131 static int llc_stat_ev_ack_tmr_exp_lt_retry_cnt_max_retry(struct sk_buff *skb) in llc_stat_ev_ack_tmr_exp_lt_retry_cnt_max_retry() argument 133 struct llc_station_state_ev *ev = llc_station_ev(skb); in llc_stat_ev_ack_tmr_exp_lt_retry_cnt_max_retry() [all …]
|
D | llc_sap.c | 54 struct sk_buff *skb; in llc_alloc_frame() local 57 skb = alloc_skb(hlen + data_size, GFP_ATOMIC); in llc_alloc_frame() 59 if (skb) { in llc_alloc_frame() 60 skb_reset_mac_header(skb); in llc_alloc_frame() 61 skb_reserve(skb, hlen); in llc_alloc_frame() 62 skb_reset_network_header(skb); in llc_alloc_frame() 63 skb_reset_transport_header(skb); in llc_alloc_frame() 64 skb->protocol = htons(ETH_P_802_2); in llc_alloc_frame() 65 skb->dev = dev; in llc_alloc_frame() 67 skb_set_owner_w(skb, sk); in llc_alloc_frame() [all …]
|
/net/decnet/ |
D | dn_nsp_in.c | 81 static void dn_log_martian(struct sk_buff *skb, const char *msg) in dn_log_martian() argument 84 char *devname = skb->dev ? skb->dev->name : "???"; in dn_log_martian() 85 struct dn_skb_cb *cb = DN_SKB_CB(skb); in dn_log_martian() 97 static void dn_ack(struct sock *sk, struct sk_buff *skb, unsigned short ack) in dn_ack() argument 107 wakeup |= dn_nsp_check_xmit_queue(sk, skb, &scp->data_xmit_queue, ack); in dn_ack() 115 wakeup |= dn_nsp_check_xmit_queue(sk, skb, &scp->other_xmit_queue, ack); in dn_ack() 129 static int dn_process_ack(struct sock *sk, struct sk_buff *skb, int oth) in dn_process_ack() argument 131 __le16 *ptr = (__le16 *)skb->data; in dn_process_ack() 135 if (skb->len < 2) in dn_process_ack() 139 skb_pull(skb, 2); in dn_process_ack() [all …]
|
/net/dsa/ |
D | tag_edsa.c | 19 int edsa_xmit(struct sk_buff *skb, struct net_device *dev) in edsa_xmit() argument 25 dev->stats.tx_bytes += skb->len; in edsa_xmit() 33 if (skb->protocol == htons(ETH_P_8021Q)) { in edsa_xmit() 34 if (skb_cow_head(skb, DSA_HLEN) < 0) in edsa_xmit() 36 skb_push(skb, DSA_HLEN); in edsa_xmit() 38 memmove(skb->data, skb->data + DSA_HLEN, 2 * ETH_ALEN); in edsa_xmit() 43 edsa_header = skb->data + 2 * ETH_ALEN; in edsa_xmit() 59 if (skb_cow_head(skb, EDSA_HLEN) < 0) in edsa_xmit() 61 skb_push(skb, EDSA_HLEN); in edsa_xmit() 63 memmove(skb->data, skb->data + EDSA_HLEN, 2 * ETH_ALEN); in edsa_xmit() [all …]
|
D | tag_dsa.c | 18 int dsa_xmit(struct sk_buff *skb, struct net_device *dev) in dsa_xmit() argument 24 dev->stats.tx_bytes += skb->len; in dsa_xmit() 31 if (skb->protocol == htons(ETH_P_8021Q)) { in dsa_xmit() 32 if (skb_cow_head(skb, 0) < 0) in dsa_xmit() 38 dsa_header = skb->data + 2 * ETH_ALEN; in dsa_xmit() 50 if (skb_cow_head(skb, DSA_HLEN) < 0) in dsa_xmit() 52 skb_push(skb, DSA_HLEN); in dsa_xmit() 54 memmove(skb->data, skb->data + DSA_HLEN, 2 * ETH_ALEN); in dsa_xmit() 59 dsa_header = skb->data + 2 * ETH_ALEN; in dsa_xmit() 66 skb->protocol = htons(ETH_P_DSA); in dsa_xmit() [all …]
|
/net/x25/ |
D | x25_dev.c | 27 static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb) in x25_receive_data() argument 33 frametype = skb->data[2]; in x25_receive_data() 34 lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF); in x25_receive_data() 41 x25_link_control(skb, nb, frametype); in x25_receive_data() 51 skb_reset_transport_header(skb); in x25_receive_data() 54 queued = x25_process_rx_frame(sk, skb); in x25_receive_data() 56 sk_add_backlog(sk, skb); in x25_receive_data() 67 return x25_rx_call_request(skb, nb, lci); in x25_receive_data() 74 if (x25_forward_data(lci, nb, skb)) { in x25_receive_data() 78 kfree_skb(skb); in x25_receive_data() [all …]
|
/net/8021q/ |
D | vlan_core.c | 8 int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, in __vlan_hwaccel_rx() argument 11 if (netpoll_rx(skb)) in __vlan_hwaccel_rx() 14 if (skb_bond_should_drop(skb)) in __vlan_hwaccel_rx() 17 skb->vlan_tci = vlan_tci; in __vlan_hwaccel_rx() 18 skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK); in __vlan_hwaccel_rx() 20 if (!skb->dev) in __vlan_hwaccel_rx() 23 return (polling ? netif_receive_skb(skb) : netif_rx(skb)); in __vlan_hwaccel_rx() 26 dev_kfree_skb_any(skb); in __vlan_hwaccel_rx() 31 int vlan_hwaccel_do_receive(struct sk_buff *skb) in vlan_hwaccel_do_receive() argument 33 struct net_device *dev = skb->dev; in vlan_hwaccel_do_receive() [all …]
|
/net/ipv4/netfilter/ |
D | nf_nat_helper.c | 77 static void mangle_contents(struct sk_buff *skb, in mangle_contents() argument 86 BUG_ON(skb_is_nonlinear(skb)); in mangle_contents() 87 data = skb_network_header(skb) + dataoff; in mangle_contents() 92 skb->tail - (skb->network_header + dataoff + in mangle_contents() 101 "%u from %u bytes\n", rep_len - match_len, skb->len); in mangle_contents() 102 skb_put(skb, rep_len - match_len); in mangle_contents() 105 "%u from %u bytes\n", match_len - rep_len, skb->len); in mangle_contents() 106 __skb_trim(skb, skb->len + rep_len - match_len); in mangle_contents() 110 ip_hdr(skb)->tot_len = htons(skb->len); in mangle_contents() 111 ip_send_check(ip_hdr(skb)); in mangle_contents() [all …]
|