1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _INET_ECN_H_
3 #define _INET_ECN_H_
4
5 #include <linux/ip.h>
6 #include <linux/skbuff.h>
7 #include <linux/if_vlan.h>
8
9 #include <net/inet_sock.h>
10 #include <net/dsfield.h>
11
12 enum {
13 INET_ECN_NOT_ECT = 0,
14 INET_ECN_ECT_1 = 1,
15 INET_ECN_ECT_0 = 2,
16 INET_ECN_CE = 3,
17 INET_ECN_MASK = 3,
18 };
19
20 extern int sysctl_tunnel_ecn_log;
21
INET_ECN_is_ce(__u8 dsfield)22 static inline int INET_ECN_is_ce(__u8 dsfield)
23 {
24 return (dsfield & INET_ECN_MASK) == INET_ECN_CE;
25 }
26
INET_ECN_is_not_ect(__u8 dsfield)27 static inline int INET_ECN_is_not_ect(__u8 dsfield)
28 {
29 return (dsfield & INET_ECN_MASK) == INET_ECN_NOT_ECT;
30 }
31
INET_ECN_is_capable(__u8 dsfield)32 static inline int INET_ECN_is_capable(__u8 dsfield)
33 {
34 return dsfield & INET_ECN_ECT_0;
35 }
36
37 /*
38 * RFC 3168 9.1.1
39 * The full-functionality option for ECN encapsulation is to copy the
40 * ECN codepoint of the inside header to the outside header on
41 * encapsulation if the inside header is not-ECT or ECT, and to set the
42 * ECN codepoint of the outside header to ECT(0) if the ECN codepoint of
43 * the inside header is CE.
44 */
INET_ECN_encapsulate(__u8 outer,__u8 inner)45 static inline __u8 INET_ECN_encapsulate(__u8 outer, __u8 inner)
46 {
47 outer &= ~INET_ECN_MASK;
48 outer |= !INET_ECN_is_ce(inner) ? (inner & INET_ECN_MASK) :
49 INET_ECN_ECT_0;
50 return outer;
51 }
52
INET_ECN_xmit(struct sock * sk)53 static inline void INET_ECN_xmit(struct sock *sk)
54 {
55 inet_sk(sk)->tos |= INET_ECN_ECT_0;
56 if (inet6_sk(sk) != NULL)
57 inet6_sk(sk)->tclass |= INET_ECN_ECT_0;
58 }
59
INET_ECN_dontxmit(struct sock * sk)60 static inline void INET_ECN_dontxmit(struct sock *sk)
61 {
62 inet_sk(sk)->tos &= ~INET_ECN_MASK;
63 if (inet6_sk(sk) != NULL)
64 inet6_sk(sk)->tclass &= ~INET_ECN_MASK;
65 }
66
67 #define IP6_ECN_flow_init(label) do { \
68 (label) &= ~htonl(INET_ECN_MASK << 20); \
69 } while (0)
70
71 #define IP6_ECN_flow_xmit(sk, label) do { \
72 if (INET_ECN_is_capable(inet6_sk(sk)->tclass)) \
73 (label) |= htonl(INET_ECN_ECT_0 << 20); \
74 } while (0)
75
IP_ECN_set_ce(struct iphdr * iph)76 static inline int IP_ECN_set_ce(struct iphdr *iph)
77 {
78 u32 check = (__force u32)iph->check;
79 u32 ecn = (iph->tos + 1) & INET_ECN_MASK;
80
81 /*
82 * After the last operation we have (in binary):
83 * INET_ECN_NOT_ECT => 01
84 * INET_ECN_ECT_1 => 10
85 * INET_ECN_ECT_0 => 11
86 * INET_ECN_CE => 00
87 */
88 if (!(ecn & 2))
89 return !ecn;
90
91 /*
92 * The following gives us:
93 * INET_ECN_ECT_1 => check += htons(0xFFFD)
94 * INET_ECN_ECT_0 => check += htons(0xFFFE)
95 */
96 check += (__force u16)htons(0xFFFB) + (__force u16)htons(ecn);
97
98 iph->check = (__force __sum16)(check + (check>=0xFFFF));
99 iph->tos |= INET_ECN_CE;
100 return 1;
101 }
102
IP_ECN_set_ect1(struct iphdr * iph)103 static inline int IP_ECN_set_ect1(struct iphdr *iph)
104 {
105 u32 check = (__force u32)iph->check;
106
107 if ((iph->tos & INET_ECN_MASK) != INET_ECN_ECT_0)
108 return 0;
109
110 check += (__force u16)htons(0x1);
111
112 iph->check = (__force __sum16)(check + (check>=0xFFFF));
113 iph->tos ^= INET_ECN_MASK;
114 return 1;
115 }
116
IP_ECN_clear(struct iphdr * iph)117 static inline void IP_ECN_clear(struct iphdr *iph)
118 {
119 iph->tos &= ~INET_ECN_MASK;
120 }
121
ipv4_copy_dscp(unsigned int dscp,struct iphdr * inner)122 static inline void ipv4_copy_dscp(unsigned int dscp, struct iphdr *inner)
123 {
124 dscp &= ~INET_ECN_MASK;
125 ipv4_change_dsfield(inner, INET_ECN_MASK, dscp);
126 }
127
128 struct ipv6hdr;
129
130 /* Note:
131 * IP_ECN_set_ce() has to tweak IPV4 checksum when setting CE,
132 * meaning both changes have no effect on skb->csum if/when CHECKSUM_COMPLETE
133 * In IPv6 case, no checksum compensates the change in IPv6 header,
134 * so we have to update skb->csum.
135 */
IP6_ECN_set_ce(struct sk_buff * skb,struct ipv6hdr * iph)136 static inline int IP6_ECN_set_ce(struct sk_buff *skb, struct ipv6hdr *iph)
137 {
138 __be32 from, to;
139
140 if (INET_ECN_is_not_ect(ipv6_get_dsfield(iph)))
141 return 0;
142
143 from = *(__be32 *)iph;
144 to = from | htonl(INET_ECN_CE << 20);
145 *(__be32 *)iph = to;
146 if (skb->ip_summed == CHECKSUM_COMPLETE)
147 skb->csum = csum_add(csum_sub(skb->csum, (__force __wsum)from),
148 (__force __wsum)to);
149 return 1;
150 }
151
IP6_ECN_set_ect1(struct sk_buff * skb,struct ipv6hdr * iph)152 static inline int IP6_ECN_set_ect1(struct sk_buff *skb, struct ipv6hdr *iph)
153 {
154 __be32 from, to;
155
156 if ((ipv6_get_dsfield(iph) & INET_ECN_MASK) != INET_ECN_ECT_0)
157 return 0;
158
159 from = *(__be32 *)iph;
160 to = from ^ htonl(INET_ECN_MASK << 20);
161 *(__be32 *)iph = to;
162 if (skb->ip_summed == CHECKSUM_COMPLETE)
163 skb->csum = csum_add(csum_sub(skb->csum, (__force __wsum)from),
164 (__force __wsum)to);
165 return 1;
166 }
167
ipv6_copy_dscp(unsigned int dscp,struct ipv6hdr * inner)168 static inline void ipv6_copy_dscp(unsigned int dscp, struct ipv6hdr *inner)
169 {
170 dscp &= ~INET_ECN_MASK;
171 ipv6_change_dsfield(inner, INET_ECN_MASK, dscp);
172 }
173
INET_ECN_set_ce(struct sk_buff * skb)174 static inline int INET_ECN_set_ce(struct sk_buff *skb)
175 {
176 switch (skb_protocol(skb, true)) {
177 case cpu_to_be16(ETH_P_IP):
178 if (skb_network_header(skb) + sizeof(struct iphdr) <=
179 skb_tail_pointer(skb))
180 return IP_ECN_set_ce(ip_hdr(skb));
181 break;
182
183 case cpu_to_be16(ETH_P_IPV6):
184 if (skb_network_header(skb) + sizeof(struct ipv6hdr) <=
185 skb_tail_pointer(skb))
186 return IP6_ECN_set_ce(skb, ipv6_hdr(skb));
187 break;
188 }
189
190 return 0;
191 }
192
INET_ECN_set_ect1(struct sk_buff * skb)193 static inline int INET_ECN_set_ect1(struct sk_buff *skb)
194 {
195 switch (skb_protocol(skb, true)) {
196 case cpu_to_be16(ETH_P_IP):
197 if (skb_network_header(skb) + sizeof(struct iphdr) <=
198 skb_tail_pointer(skb))
199 return IP_ECN_set_ect1(ip_hdr(skb));
200 break;
201
202 case cpu_to_be16(ETH_P_IPV6):
203 if (skb_network_header(skb) + sizeof(struct ipv6hdr) <=
204 skb_tail_pointer(skb))
205 return IP6_ECN_set_ect1(skb, ipv6_hdr(skb));
206 break;
207 }
208
209 return 0;
210 }
211
212 /*
213 * RFC 6040 4.2
214 * To decapsulate the inner header at the tunnel egress, a compliant
215 * tunnel egress MUST set the outgoing ECN field to the codepoint at the
216 * intersection of the appropriate arriving inner header (row) and outer
217 * header (column) in Figure 4
218 *
219 * +---------+------------------------------------------------+
220 * |Arriving | Arriving Outer Header |
221 * | Inner +---------+------------+------------+------------+
222 * | Header | Not-ECT | ECT(0) | ECT(1) | CE |
223 * +---------+---------+------------+------------+------------+
224 * | Not-ECT | Not-ECT |Not-ECT(!!!)|Not-ECT(!!!)| <drop>(!!!)|
225 * | ECT(0) | ECT(0) | ECT(0) | ECT(1) | CE |
226 * | ECT(1) | ECT(1) | ECT(1) (!) | ECT(1) | CE |
227 * | CE | CE | CE | CE(!!!)| CE |
228 * +---------+---------+------------+------------+------------+
229 *
230 * Figure 4: New IP in IP Decapsulation Behaviour
231 *
232 * returns 0 on success
233 * 1 if something is broken and should be logged (!!! above)
234 * 2 if packet should be dropped
235 */
__INET_ECN_decapsulate(__u8 outer,__u8 inner,bool * set_ce)236 static inline int __INET_ECN_decapsulate(__u8 outer, __u8 inner, bool *set_ce)
237 {
238 if (INET_ECN_is_not_ect(inner)) {
239 switch (outer & INET_ECN_MASK) {
240 case INET_ECN_NOT_ECT:
241 return 0;
242 case INET_ECN_ECT_0:
243 case INET_ECN_ECT_1:
244 return 1;
245 case INET_ECN_CE:
246 return 2;
247 }
248 }
249
250 *set_ce = INET_ECN_is_ce(outer);
251 return 0;
252 }
253
INET_ECN_decapsulate(struct sk_buff * skb,__u8 outer,__u8 inner)254 static inline int INET_ECN_decapsulate(struct sk_buff *skb,
255 __u8 outer, __u8 inner)
256 {
257 bool set_ce = false;
258 int rc;
259
260 rc = __INET_ECN_decapsulate(outer, inner, &set_ce);
261 if (!rc) {
262 if (set_ce)
263 INET_ECN_set_ce(skb);
264 else if ((outer & INET_ECN_MASK) == INET_ECN_ECT_1)
265 INET_ECN_set_ect1(skb);
266 }
267
268 return rc;
269 }
270
IP_ECN_decapsulate(const struct iphdr * oiph,struct sk_buff * skb)271 static inline int IP_ECN_decapsulate(const struct iphdr *oiph,
272 struct sk_buff *skb)
273 {
274 __u8 inner;
275
276 switch (skb_protocol(skb, true)) {
277 case htons(ETH_P_IP):
278 inner = ip_hdr(skb)->tos;
279 break;
280 case htons(ETH_P_IPV6):
281 inner = ipv6_get_dsfield(ipv6_hdr(skb));
282 break;
283 default:
284 return 0;
285 }
286
287 return INET_ECN_decapsulate(skb, oiph->tos, inner);
288 }
289
IP6_ECN_decapsulate(const struct ipv6hdr * oipv6h,struct sk_buff * skb)290 static inline int IP6_ECN_decapsulate(const struct ipv6hdr *oipv6h,
291 struct sk_buff *skb)
292 {
293 __u8 inner;
294
295 switch (skb_protocol(skb, true)) {
296 case htons(ETH_P_IP):
297 inner = ip_hdr(skb)->tos;
298 break;
299 case htons(ETH_P_IPV6):
300 inner = ipv6_get_dsfield(ipv6_hdr(skb));
301 break;
302 default:
303 return 0;
304 }
305
306 return INET_ECN_decapsulate(skb, ipv6_get_dsfield(oipv6h), inner);
307 }
308 #endif
309