1 /*
2 * Copyright (c) 2024 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15 #ifndef NET_FIREWALL_H
16 #define NET_FIREWALL_H
17
18 #include <linux/bpf.h>
19
20 #include "netfirewall_ct.h"
21 #include "netfirewall_def.h"
22 #include "netfirewall_domain.h"
23 #include "netfirewall_event.h"
24 #include "netfirewall_match.h"
25
26 #define FIREWALL_DNS_QUERY_PORT 53
27 #define FIREWALL_DNS_OVER_QUERY_PORT 853
28
29 /**
30 * @brief if tcp socket was intercepted, need send reset packet to peer
31 *
32 * @param tuple match tuple of skb meta data
33 * @param skb struct __sk_buff
34 * @param dir enum stream_dir
35 * @return 0 if no error, -1 if an error occurred
36 */
send_sock_tcp_reset(struct match_tuple * tuple,struct __sk_buff * skb,enum stream_dir dir)37 static __always_inline int send_sock_tcp_reset(struct match_tuple *tuple, struct __sk_buff *skb, enum stream_dir dir)
38 {
39 if (!skb || !tuple) {
40 return -1;
41 }
42 if (tuple->protocol == IPPROTO_TCP) {
43 if (dir == INGRESS) {
44 bpf_sock_tcp_send_reset(skb);
45 } else if (dir == EGRESS) {
46 bpf_sock_destroy(skb);
47 }
48 return 0;
49 }
50 return -1;
51 }
52
53 /**
54 * @brief Get the packet rst on tuple
55 *
56 * @param tuple struct match_tuple
57 * @return true if success or false if an error occurred
58 */
get_packet_rst_flag(struct match_tuple * tuple)59 static __always_inline bool get_packet_rst_flag(struct match_tuple *tuple)
60 {
61 if (!tuple) {
62 return false;
63 }
64
65 if (tuple->rst == 1) {
66 return true;
67 }
68
69 return false;
70 }
71
72 /**
73 * @brief Get the ct tuple from match tuple
74 *
75 * @param match_tpl struct match_tuple
76 * @param ct_tpl struct ct_tuple
77 * @return true if success or false if an error occurred
78 */
get_ct_tuple(struct match_tuple * match_tpl,struct ct_tuple * ct_tpl)79 static __always_inline bool get_ct_tuple(struct match_tuple *match_tpl, struct ct_tuple *ct_tpl)
80 {
81 if (!match_tpl || !ct_tpl) {
82 return false;
83 }
84
85 ct_tpl->uid = match_tpl->uid;
86 ct_tpl->family = match_tpl->family;
87 ct_tpl->protocol = match_tpl->protocol;
88 ct_tpl->sport = match_tpl->sport;
89 ct_tpl->dport = match_tpl->dport;
90
91 if (match_tpl->family == AF_INET) {
92 ct_tpl->ipv4.saddr = match_tpl->ipv4.saddr;
93 ct_tpl->ipv4.daddr = match_tpl->ipv4.daddr;
94 } else {
95 ct_tpl->ipv6.saddr = match_tpl->ipv6.saddr;
96 ct_tpl->ipv6.daddr = match_tpl->ipv6.daddr;
97 }
98
99 return true;
100 }
101
102 /**
103 * @brief Determine ingress packet drop or not
104 *
105 * @param skb struct __sk_buff
106 * @return SK_DROP if intercepted or SK_PASS if not
107 */
netfirewall_policy_ingress(struct __sk_buff * skb)108 static __always_inline enum sk_action netfirewall_policy_ingress(struct __sk_buff *skb)
109 {
110 if (match_dns_query(skb) == SK_DROP) {
111 return SK_DROP;
112 }
113
114 struct match_tuple tuple = { 0 };
115 if (!get_match_tuple(skb, &tuple, INGRESS)) {
116 return SK_PASS;
117 }
118
119 log_tuple(&tuple);
120
121 struct ct_tuple ct_tpl = {};
122 if (!get_ct_tuple(&tuple, &ct_tpl)) {
123 return SK_PASS;
124 }
125
126 enum ct_status status = ct_map_lookup_entry(skb, &ct_tpl, CT_INGRESS, match_loopback(tuple));
127 log_dbg(DBG_CT_LOOKUP, INGRESS, status);
128 if (status != CT_NEW) {
129 return SK_PASS;
130 }
131
132 if (get_packet_rst_flag(&tuple)) {
133 return SK_PASS;
134 }
135
136 struct bitmap key = { 0 };
137 if (!match_action_key(&tuple, &key)) {
138 return SK_PASS;
139 }
140
141 if (match_action(&tuple, &key) != SK_PASS) {
142 log_intercept(&tuple);
143 send_sock_tcp_reset(&tuple, skb, INGRESS);
144 return SK_DROP;
145 }
146
147 if (status == CT_NEW) {
148 ct_create_entry(&ct_tpl, skb, CT_INGRESS);
149 }
150
151 return SK_PASS;
152 }
153
MatchDnsQuery(const struct match_tuple * tuple)154 static __always_inline bool MatchDnsQuery(const struct match_tuple *tuple)
155 {
156 __be16 port = bpf_htons(tuple->sport);
157 if (port == FIREWALL_DNS_QUERY_PORT || port == FIREWALL_DNS_OVER_QUERY_PORT) {
158 struct defalut_action_value *default_value = bpf_map_lookup_elem(&DEFAULT_ACTION_MAP, &tuple->uid);
159 return default_value && default_value->outaction != SK_PASS;
160 }
161 return false;
162 }
163
164 /**
165 * @brief Determine egress packet drop or not
166 *
167 * @param skb struct __sk_buff
168 * @return SK_DROP if intercepted or SK_PASS if not
169 */
netfirewall_policy_egress(struct __sk_buff * skb)170 static __always_inline enum sk_action netfirewall_policy_egress(struct __sk_buff *skb)
171 {
172 if (match_dns_query(skb) == SK_DROP) {
173 return SK_DROP;
174 }
175
176 struct match_tuple tuple = { 0 };
177 if (!get_match_tuple(skb, &tuple, EGRESS)) {
178 return SK_PASS;
179 }
180
181 log_tuple(&tuple);
182
183 if (get_packet_rst_flag(&tuple)) {
184 return SK_PASS;
185 }
186
187 struct ct_tuple ct_tpl = {};
188 if (!get_ct_tuple(&tuple, &ct_tpl)) {
189 return SK_PASS;
190 }
191
192 enum ct_status status = ct_map_lookup_entry(skb, &ct_tpl, CT_EGRESS, match_loopback(tuple));
193 log_dbg(DBG_CT_LOOKUP, EGRESS, status);
194 if (status != CT_NEW) {
195 return SK_PASS;
196 }
197
198 if (get_packet_rst_flag(&tuple)) {
199 return SK_PASS;
200 }
201
202 struct bitmap key = { 0 };
203 if (!match_action_key(&tuple, &key)) {
204 return SK_PASS;
205 }
206 // Outbound DNS queries need to be released
207 if (!MatchDnsQuery(&tuple) && match_action(&tuple, &key) != SK_PASS) {
208 log_intercept(&tuple);
209 send_sock_tcp_reset(&tuple, skb, EGRESS);
210 return SK_DROP;
211 }
212
213 if (status == CT_NEW) {
214 ct_create_entry(&ct_tpl, skb, CT_EGRESS);
215 }
216
217 return SK_PASS;
218 }
219
220 #endif // NET_FIREWALL_H