• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2020 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 package com.android.networkstack.tethering;
18 
19 import static android.net.NetworkStats.DEFAULT_NETWORK_NO;
20 import static android.net.NetworkStats.METERED_NO;
21 import static android.net.NetworkStats.ROAMING_NO;
22 import static android.net.NetworkStats.SET_DEFAULT;
23 import static android.net.NetworkStats.TAG_NONE;
24 import static android.net.NetworkStats.UID_ALL;
25 import static android.net.NetworkStats.UID_TETHERING;
26 import static android.net.ip.ConntrackMonitor.ConntrackEvent;
27 import static android.net.netstats.provider.NetworkStatsProvider.QUOTA_UNLIMITED;
28 import static android.system.OsConstants.ETH_P_IP;
29 import static android.system.OsConstants.ETH_P_IPV6;
30 
31 import static com.android.networkstack.tethering.BpfUtils.DOWNSTREAM;
32 import static com.android.networkstack.tethering.BpfUtils.UPSTREAM;
33 import static com.android.networkstack.tethering.TetheringConfiguration.DEFAULT_TETHER_OFFLOAD_POLL_INTERVAL_MS;
34 import static com.android.networkstack.tethering.UpstreamNetworkState.isVcnInterface;
35 
36 import android.app.usage.NetworkStatsManager;
37 import android.net.INetd;
38 import android.net.MacAddress;
39 import android.net.NetworkStats;
40 import android.net.NetworkStats.Entry;
41 import android.net.TetherOffloadRuleParcel;
42 import android.net.ip.ConntrackMonitor;
43 import android.net.ip.ConntrackMonitor.ConntrackEventConsumer;
44 import android.net.ip.IpServer;
45 import android.net.netlink.ConntrackMessage;
46 import android.net.netlink.NetlinkConstants;
47 import android.net.netlink.NetlinkSocket;
48 import android.net.netstats.provider.NetworkStatsProvider;
49 import android.net.util.InterfaceParams;
50 import android.net.util.SharedLog;
51 import android.net.util.TetheringUtils.ForwardedStats;
52 import android.os.Handler;
53 import android.os.SystemClock;
54 import android.system.ErrnoException;
55 import android.system.OsConstants;
56 import android.text.TextUtils;
57 import android.util.ArraySet;
58 import android.util.Log;
59 import android.util.SparseArray;
60 
61 import androidx.annotation.NonNull;
62 import androidx.annotation.Nullable;
63 
64 import com.android.internal.annotations.VisibleForTesting;
65 import com.android.internal.util.IndentingPrintWriter;
66 import com.android.modules.utils.build.SdkLevel;
67 import com.android.net.module.util.NetworkStackConstants;
68 import com.android.net.module.util.Struct;
69 import com.android.networkstack.tethering.apishim.common.BpfCoordinatorShim;
70 
71 import java.net.Inet4Address;
72 import java.net.Inet6Address;
73 import java.net.InetAddress;
74 import java.net.UnknownHostException;
75 import java.util.ArrayList;
76 import java.util.Arrays;
77 import java.util.Collection;
78 import java.util.HashMap;
79 import java.util.HashSet;
80 import java.util.LinkedHashMap;
81 import java.util.LinkedHashSet;
82 import java.util.Map;
83 import java.util.Objects;
84 import java.util.Set;
85 
86 /**
87  *  This coordinator is responsible for providing BPF offload relevant functionality.
88  *  - Get tethering stats.
89  *  - Set data limit.
90  *  - Set global alert.
91  *  - Add/remove forwarding rules.
92  *
93  * @hide
94  */
95 public class BpfCoordinator {
96     // Ensure the JNI code is loaded. In production this will already have been loaded by
97     // TetherService, but for tests it needs to be either loaded here or loaded by every test.
98     // TODO: is there a better way?
99     static {
100         System.loadLibrary("tetherutilsjni");
101     }
102 
103     private static final String TAG = BpfCoordinator.class.getSimpleName();
104     private static final int DUMP_TIMEOUT_MS = 10_000;
105     private static final MacAddress NULL_MAC_ADDRESS = MacAddress.fromString(
106             "00:00:00:00:00:00");
107     private static final String TETHER_DOWNSTREAM4_MAP_PATH = makeMapPath(DOWNSTREAM, 4);
108     private static final String TETHER_UPSTREAM4_MAP_PATH = makeMapPath(UPSTREAM, 4);
109     private static final String TETHER_DOWNSTREAM6_FS_PATH = makeMapPath(DOWNSTREAM, 6);
110     private static final String TETHER_UPSTREAM6_FS_PATH = makeMapPath(UPSTREAM, 6);
111     private static final String TETHER_STATS_MAP_PATH = makeMapPath("stats");
112     private static final String TETHER_LIMIT_MAP_PATH = makeMapPath("limit");
113     private static final String TETHER_ERROR_MAP_PATH = makeMapPath("error");
114     private static final String TETHER_DEV_MAP_PATH = makeMapPath("dev");
115 
116     /** The names of all the BPF counters defined in bpf_tethering.h. */
117     public static final String[] sBpfCounterNames = getBpfCounterNames();
118 
makeMapPath(String which)119     private static String makeMapPath(String which) {
120         return "/sys/fs/bpf/tethering/map_offload_tether_" + which + "_map";
121     }
122 
makeMapPath(boolean downstream, int ipVersion)123     private static String makeMapPath(boolean downstream, int ipVersion) {
124         return makeMapPath((downstream ? "downstream" : "upstream") + ipVersion);
125     }
126 
127     @VisibleForTesting
128     static final int POLLING_CONNTRACK_TIMEOUT_MS = 60_000;
129     @VisibleForTesting
130     static final int NF_CONNTRACK_TCP_TIMEOUT_ESTABLISHED = 432000;
131     @VisibleForTesting
132     static final int NF_CONNTRACK_UDP_TIMEOUT_STREAM = 180;
133 
134     @VisibleForTesting
135     enum StatsType {
136         STATS_PER_IFACE,
137         STATS_PER_UID,
138     }
139 
140     @NonNull
141     private final Handler mHandler;
142     @NonNull
143     private final INetd mNetd;
144     @NonNull
145     private final SharedLog mLog;
146     @NonNull
147     private final Dependencies mDeps;
148     @NonNull
149     private final ConntrackMonitor mConntrackMonitor;
150     @Nullable
151     private final BpfTetherStatsProvider mStatsProvider;
152     @NonNull
153     private final BpfCoordinatorShim mBpfCoordinatorShim;
154     @NonNull
155     private final BpfConntrackEventConsumer mBpfConntrackEventConsumer;
156 
157     // True if BPF offload is supported, false otherwise. The BPF offload could be disabled by
158     // a runtime resource overlay package or device configuration. This flag is only initialized
159     // in the constructor because it is hard to unwind all existing change once device
160     // configuration is changed. Especially the forwarding rules. Keep the same setting
161     // to make it simpler. See also TetheringConfiguration.
162     private final boolean mIsBpfEnabled;
163 
164     // Tracks whether BPF tethering is started or not. This is set by tethering before it
165     // starts the first IpServer and is cleared by tethering shortly before the last IpServer
166     // is stopped. Note that rule updates (especially deletions, but sometimes additions as
167     // well) may arrive when this is false. If they do, they must be communicated to netd.
168     // Changes in data limits may also arrive when this is false, and if they do, they must
169     // also be communicated to netd.
170     private boolean mPollingStarted = false;
171 
172     // Tracking remaining alert quota. Unlike limit quota is subject to interface, the alert
173     // quota is interface independent and global for tether offload.
174     private long mRemainingAlertQuota = QUOTA_UNLIMITED;
175 
176     // Maps upstream interface index to offloaded traffic statistics.
177     // Always contains the latest total bytes/packets, since each upstream was started, received
178     // from the BPF maps for each interface.
179     private final SparseArray<ForwardedStats> mStats = new SparseArray<>();
180 
181     // Maps upstream interface names to interface quotas.
182     // Always contains the latest value received from the framework for each interface, regardless
183     // of whether offload is currently running (or is even supported) on that interface. Only
184     // includes interfaces that have a quota set. Note that this map is used for storing the quota
185     // which is set from the service. Because the service uses the interface name to present the
186     // interface, this map uses the interface name to be the mapping index.
187     private final HashMap<String, Long> mInterfaceQuotas = new HashMap<>();
188 
189     // Maps upstream interface index to interface names.
190     // Store all interface name since boot. Used for lookup what interface name it is from the
191     // tether stats got from netd because netd reports interface index to present an interface.
192     // TODO: Remove the unused interface name.
193     private final SparseArray<String> mInterfaceNames = new SparseArray<>();
194 
195     // Map of downstream rule maps. Each of these maps represents the IPv6 forwarding rules for a
196     // given downstream. Each map:
197     // - Is owned by the IpServer that is responsible for that downstream.
198     // - Must only be modified by that IpServer.
199     // - Is created when the IpServer adds its first rule, and deleted when the IpServer deletes
200     //   its last rule (or clears its rules).
201     // TODO: Perhaps seal the map and rule operations which communicates with netd into a class.
202     // TODO: Does this need to be a LinkedHashMap or can it just be a HashMap? Also, could it be
203     // a ConcurrentHashMap, in order to avoid the copies in tetherOffloadRuleClear
204     // and tetherOffloadRuleUpdate?
205     // TODO: Perhaps use one-dimensional map and access specific downstream rules via downstream
206     // index. For doing that, IpServer must guarantee that it always has a valid IPv6 downstream
207     // interface index while calling function to clear all rules. IpServer may be calling clear
208     // rules function without a valid IPv6 downstream interface index even if it may have one
209     // before. IpServer would need to call getInterfaceParams() in the constructor instead of when
210     // startIpv6() is called, and make mInterfaceParams final.
211     private final HashMap<IpServer, LinkedHashMap<Inet6Address, Ipv6ForwardingRule>>
212             mIpv6ForwardingRules = new LinkedHashMap<>();
213 
214     // Map of downstream client maps. Each of these maps represents the IPv4 clients for a given
215     // downstream. Needed to build IPv4 forwarding rules when conntrack events are received.
216     // Each map:
217     // - Is owned by the IpServer that is responsible for that downstream.
218     // - Must only be modified by that IpServer.
219     // - Is created when the IpServer adds its first client, and deleted when the IpServer deletes
220     //   its last client.
221     // Note that relying on the client address for finding downstream is okay for now because the
222     // client address is unique. See PrivateAddressCoordinator#requestDownstreamAddress.
223     // TODO: Refactor if any possible that the client address is not unique.
224     private final HashMap<IpServer, HashMap<Inet4Address, ClientInfo>>
225             mTetherClients = new HashMap<>();
226 
227     // Set for which downstream is monitoring the conntrack netlink message.
228     private final Set<IpServer> mMonitoringIpServers = new HashSet<>();
229 
230     // Map of upstream interface IPv4 address to interface index.
231     // TODO: consider making the key to be unique because the upstream address is not unique. It
232     // is okay for now because there have only one upstream generally.
233     private final HashMap<Inet4Address, Integer> mIpv4UpstreamIndices = new HashMap<>();
234 
235     // Map for upstream and downstream pair.
236     private final HashMap<String, HashSet<String>> mForwardingPairs = new HashMap<>();
237 
238     // Set for upstream and downstream device map. Used for caching BPF dev map status and
239     // reduce duplicate adding or removing map operations. Use LinkedHashSet because the test
240     // BpfCoordinatorTest needs predictable iteration order.
241     private final Set<Integer> mDeviceMapSet = new LinkedHashSet<>();
242 
243     // Tracks the last IPv4 upstream index. Support single upstream only.
244     // TODO: Support multi-upstream interfaces.
245     private int mLastIPv4UpstreamIfindex = 0;
246 
247     // Runnable that used by scheduling next polling of stats.
248     private final Runnable mScheduledPollingStats = () -> {
249         updateForwardedStats();
250         maybeSchedulePollingStats();
251     };
252 
253     // Runnable that used by scheduling next polling of conntrack timeout.
254     private final Runnable mScheduledPollingConntrackTimeout = () -> {
255         maybeRefreshConntrackTimeout();
256         maybeSchedulePollingConntrackTimeout();
257     };
258 
259     // TODO: add BpfMap<TetherDownstream64Key, TetherDownstream64Value> retrieving function.
260     @VisibleForTesting
261     public abstract static class Dependencies {
262         /** Get handler. */
getHandler()263         @NonNull public abstract Handler getHandler();
264 
265         /** Get netd. */
getNetd()266         @NonNull public abstract INetd getNetd();
267 
268         /** Get network stats manager. */
getNetworkStatsManager()269         @NonNull public abstract NetworkStatsManager getNetworkStatsManager();
270 
271         /** Get shared log. */
getSharedLog()272         @NonNull public abstract SharedLog getSharedLog();
273 
274         /** Get tethering configuration. */
getTetherConfig()275         @Nullable public abstract TetheringConfiguration getTetherConfig();
276 
277         /** Get conntrack monitor. */
getConntrackMonitor(ConntrackEventConsumer consumer)278         @NonNull public ConntrackMonitor getConntrackMonitor(ConntrackEventConsumer consumer) {
279             return new ConntrackMonitor(getHandler(), getSharedLog(), consumer);
280         }
281 
282         /** Get interface information for a given interface. */
getInterfaceParams(String ifName)283         @NonNull public InterfaceParams getInterfaceParams(String ifName) {
284             return InterfaceParams.getByName(ifName);
285         }
286 
287         /**
288          * Represents an estimate of elapsed time since boot in nanoseconds.
289          */
elapsedRealtimeNanos()290         public long elapsedRealtimeNanos() {
291             return SystemClock.elapsedRealtimeNanos();
292         }
293 
294         /**
295          * Check OS Build at least S.
296          *
297          * TODO: move to BpfCoordinatorShim once the test doesn't need the mocked OS build for
298          * testing different code flows concurrently.
299          */
isAtLeastS()300         public boolean isAtLeastS() {
301             return SdkLevel.isAtLeastS();
302         }
303 
304         /** Get downstream4 BPF map. */
getBpfDownstream4Map()305         @Nullable public BpfMap<Tether4Key, Tether4Value> getBpfDownstream4Map() {
306             if (!isAtLeastS()) return null;
307             try {
308                 return new BpfMap<>(TETHER_DOWNSTREAM4_MAP_PATH,
309                     BpfMap.BPF_F_RDWR, Tether4Key.class, Tether4Value.class);
310             } catch (ErrnoException e) {
311                 Log.e(TAG, "Cannot create downstream4 map: " + e);
312                 return null;
313             }
314         }
315 
316         /** Get upstream4 BPF map. */
getBpfUpstream4Map()317         @Nullable public BpfMap<Tether4Key, Tether4Value> getBpfUpstream4Map() {
318             if (!isAtLeastS()) return null;
319             try {
320                 return new BpfMap<>(TETHER_UPSTREAM4_MAP_PATH,
321                     BpfMap.BPF_F_RDWR, Tether4Key.class, Tether4Value.class);
322             } catch (ErrnoException e) {
323                 Log.e(TAG, "Cannot create upstream4 map: " + e);
324                 return null;
325             }
326         }
327 
328         /** Get downstream6 BPF map. */
getBpfDownstream6Map()329         @Nullable public BpfMap<TetherDownstream6Key, Tether6Value> getBpfDownstream6Map() {
330             if (!isAtLeastS()) return null;
331             try {
332                 return new BpfMap<>(TETHER_DOWNSTREAM6_FS_PATH,
333                     BpfMap.BPF_F_RDWR, TetherDownstream6Key.class, Tether6Value.class);
334             } catch (ErrnoException e) {
335                 Log.e(TAG, "Cannot create downstream6 map: " + e);
336                 return null;
337             }
338         }
339 
340         /** Get upstream6 BPF map. */
getBpfUpstream6Map()341         @Nullable public BpfMap<TetherUpstream6Key, Tether6Value> getBpfUpstream6Map() {
342             if (!isAtLeastS()) return null;
343             try {
344                 return new BpfMap<>(TETHER_UPSTREAM6_FS_PATH, BpfMap.BPF_F_RDWR,
345                         TetherUpstream6Key.class, Tether6Value.class);
346             } catch (ErrnoException e) {
347                 Log.e(TAG, "Cannot create upstream6 map: " + e);
348                 return null;
349             }
350         }
351 
352         /** Get stats BPF map. */
getBpfStatsMap()353         @Nullable public BpfMap<TetherStatsKey, TetherStatsValue> getBpfStatsMap() {
354             if (!isAtLeastS()) return null;
355             try {
356                 return new BpfMap<>(TETHER_STATS_MAP_PATH,
357                     BpfMap.BPF_F_RDWR, TetherStatsKey.class, TetherStatsValue.class);
358             } catch (ErrnoException e) {
359                 Log.e(TAG, "Cannot create stats map: " + e);
360                 return null;
361             }
362         }
363 
364         /** Get limit BPF map. */
getBpfLimitMap()365         @Nullable public BpfMap<TetherLimitKey, TetherLimitValue> getBpfLimitMap() {
366             if (!isAtLeastS()) return null;
367             try {
368                 return new BpfMap<>(TETHER_LIMIT_MAP_PATH,
369                     BpfMap.BPF_F_RDWR, TetherLimitKey.class, TetherLimitValue.class);
370             } catch (ErrnoException e) {
371                 Log.e(TAG, "Cannot create limit map: " + e);
372                 return null;
373             }
374         }
375 
376         /** Get dev BPF map. */
getBpfDevMap()377         @Nullable public BpfMap<TetherDevKey, TetherDevValue> getBpfDevMap() {
378             if (!isAtLeastS()) return null;
379             try {
380                 return new BpfMap<>(TETHER_DEV_MAP_PATH,
381                     BpfMap.BPF_F_RDWR, TetherDevKey.class, TetherDevValue.class);
382             } catch (ErrnoException e) {
383                 Log.e(TAG, "Cannot create dev map: " + e);
384                 return null;
385             }
386         }
387     }
388 
389     @VisibleForTesting
BpfCoordinator(@onNull Dependencies deps)390     public BpfCoordinator(@NonNull Dependencies deps) {
391         mDeps = deps;
392         mHandler = mDeps.getHandler();
393         mNetd = mDeps.getNetd();
394         mLog = mDeps.getSharedLog().forSubComponent(TAG);
395         mIsBpfEnabled = isBpfEnabled();
396 
397         // The conntrack consummer needs to be initialized in BpfCoordinator constructor because it
398         // have to access the data members of BpfCoordinator which is not a static class. The
399         // consumer object is also needed for initializing the conntrack monitor which may be
400         // mocked for testing.
401         mBpfConntrackEventConsumer = new BpfConntrackEventConsumer();
402         mConntrackMonitor = mDeps.getConntrackMonitor(mBpfConntrackEventConsumer);
403 
404         BpfTetherStatsProvider provider = new BpfTetherStatsProvider();
405         try {
406             mDeps.getNetworkStatsManager().registerNetworkStatsProvider(
407                     getClass().getSimpleName(), provider);
408         } catch (RuntimeException e) {
409             // TODO: Perhaps not allow to use BPF offload because the reregistration failure
410             // implied that no data limit could be applies on a metered upstream if any.
411             Log.wtf(TAG, "Cannot register offload stats provider: " + e);
412             provider = null;
413         }
414         mStatsProvider = provider;
415 
416         mBpfCoordinatorShim = BpfCoordinatorShim.getBpfCoordinatorShim(deps);
417         if (!mBpfCoordinatorShim.isInitialized()) {
418             mLog.e("Bpf shim not initialized");
419         }
420     }
421 
422     /**
423      * Start BPF tethering offload stats polling when the first upstream is started.
424      * Note that this can be only called on handler thread.
425      * TODO: Perhaps check BPF support before starting.
426      * TODO: Start the stats polling only if there is any client on the downstream.
427      */
startPolling()428     public void startPolling() {
429         if (mPollingStarted) return;
430 
431         if (!isUsingBpf()) {
432             mLog.i("BPF is not using");
433             return;
434         }
435 
436         mPollingStarted = true;
437         maybeSchedulePollingStats();
438         maybeSchedulePollingConntrackTimeout();
439 
440         mLog.i("Polling started");
441     }
442 
443     /**
444      * Stop BPF tethering offload stats polling.
445      * The data limit cleanup and the tether stats maps cleanup are not implemented here.
446      * These cleanups rely on all IpServers calling #tetherOffloadRuleRemove. After the
447      * last rule is removed from the upstream, #tetherOffloadRuleRemove does the cleanup
448      * functionality.
449      * Note that this can be only called on handler thread.
450      */
stopPolling()451     public void stopPolling() {
452         if (!mPollingStarted) return;
453 
454         // Stop scheduled polling conntrack timeout.
455         if (mHandler.hasCallbacks(mScheduledPollingConntrackTimeout)) {
456             mHandler.removeCallbacks(mScheduledPollingConntrackTimeout);
457         }
458         // Stop scheduled polling stats and poll the latest stats from BPF maps.
459         if (mHandler.hasCallbacks(mScheduledPollingStats)) {
460             mHandler.removeCallbacks(mScheduledPollingStats);
461         }
462         updateForwardedStats();
463         mPollingStarted = false;
464 
465         mLog.i("Polling stopped");
466     }
467 
isUsingBpf()468     private boolean isUsingBpf() {
469         return mIsBpfEnabled && mBpfCoordinatorShim.isInitialized();
470     }
471 
472     /**
473      * Start conntrack message monitoring.
474      * Note that this can be only called on handler thread.
475      *
476      * TODO: figure out a better logging for non-interesting conntrack message.
477      * For example, the following logging is an IPCTNL_MSG_CT_GET message but looks scary.
478      * +---------------------------------------------------------------------------+
479      * | ERROR unparsable netlink msg: 1400000001010103000000000000000002000000    |
480      * +------------------+--------------------------------------------------------+
481      * |                  | struct nlmsghdr                                        |
482      * | 14000000         | length = 20                                            |
483      * | 0101             | type = NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_CT_GET  |
484      * | 0103             | flags                                                  |
485      * | 00000000         | seqno = 0                                              |
486      * | 00000000         | pid = 0                                                |
487      * |                  | struct nfgenmsg                                        |
488      * | 02               | nfgen_family  = AF_INET                                |
489      * | 00               | version = NFNETLINK_V0                                 |
490      * | 0000             | res_id                                                 |
491      * +------------------+--------------------------------------------------------+
492      * See NetlinkMonitor#handlePacket, NetlinkMessage#parseNfMessage.
493      */
startMonitoring(@onNull final IpServer ipServer)494     public void startMonitoring(@NonNull final IpServer ipServer) {
495         // TODO: Wrap conntrackMonitor starting function into mBpfCoordinatorShim.
496         if (!isUsingBpf() || !mDeps.isAtLeastS()) return;
497 
498         if (mMonitoringIpServers.contains(ipServer)) {
499             Log.wtf(TAG, "The same downstream " + ipServer.interfaceName()
500                     + " should not start monitoring twice.");
501             return;
502         }
503 
504         if (mMonitoringIpServers.isEmpty()) {
505             mConntrackMonitor.start();
506             mLog.i("Monitoring started");
507         }
508 
509         mMonitoringIpServers.add(ipServer);
510     }
511 
512     /**
513      * Stop conntrack event monitoring.
514      * Note that this can be only called on handler thread.
515      */
stopMonitoring(@onNull final IpServer ipServer)516     public void stopMonitoring(@NonNull final IpServer ipServer) {
517         // TODO: Wrap conntrackMonitor stopping function into mBpfCoordinatorShim.
518         if (!isUsingBpf() || !mDeps.isAtLeastS()) return;
519 
520         mMonitoringIpServers.remove(ipServer);
521 
522         if (!mMonitoringIpServers.isEmpty()) return;
523 
524         mConntrackMonitor.stop();
525         mLog.i("Monitoring stopped");
526     }
527 
528     /**
529      * Add forwarding rule. After adding the first rule on a given upstream, must add the data
530      * limit on the given upstream.
531      * Note that this can be only called on handler thread.
532      */
tetherOffloadRuleAdd( @onNull final IpServer ipServer, @NonNull final Ipv6ForwardingRule rule)533     public void tetherOffloadRuleAdd(
534             @NonNull final IpServer ipServer, @NonNull final Ipv6ForwardingRule rule) {
535         if (!isUsingBpf()) return;
536 
537         // TODO: Perhaps avoid to add a duplicate rule.
538         if (!mBpfCoordinatorShim.tetherOffloadRuleAdd(rule)) return;
539 
540         if (!mIpv6ForwardingRules.containsKey(ipServer)) {
541             mIpv6ForwardingRules.put(ipServer, new LinkedHashMap<Inet6Address,
542                     Ipv6ForwardingRule>());
543         }
544         LinkedHashMap<Inet6Address, Ipv6ForwardingRule> rules = mIpv6ForwardingRules.get(ipServer);
545 
546         // Add upstream and downstream interface index to dev map.
547         maybeAddDevMap(rule.upstreamIfindex, rule.downstreamIfindex);
548 
549         // When the first rule is added to an upstream, setup upstream forwarding and data limit.
550         maybeSetLimit(rule.upstreamIfindex);
551 
552         if (!isAnyRuleFromDownstreamToUpstream(rule.downstreamIfindex, rule.upstreamIfindex)) {
553             final int downstream = rule.downstreamIfindex;
554             final int upstream = rule.upstreamIfindex;
555             // TODO: support upstream forwarding on non-point-to-point interfaces.
556             // TODO: get the MTU from LinkProperties and update the rules when it changes.
557             if (!mBpfCoordinatorShim.startUpstreamIpv6Forwarding(downstream, upstream, rule.srcMac,
558                     NULL_MAC_ADDRESS, NULL_MAC_ADDRESS, NetworkStackConstants.ETHER_MTU)) {
559                 mLog.e("Failed to enable upstream IPv6 forwarding from "
560                         + mInterfaceNames.get(downstream) + " to " + mInterfaceNames.get(upstream));
561             }
562         }
563 
564         // Must update the adding rule after calling #isAnyRuleOnUpstream because it needs to
565         // check if it is about adding a first rule for a given upstream.
566         rules.put(rule.address, rule);
567     }
568 
569     /**
570      * Remove forwarding rule. After removing the last rule on a given upstream, must clear
571      * data limit, update the last tether stats and remove the tether stats in the BPF maps.
572      * Note that this can be only called on handler thread.
573      */
tetherOffloadRuleRemove( @onNull final IpServer ipServer, @NonNull final Ipv6ForwardingRule rule)574     public void tetherOffloadRuleRemove(
575             @NonNull final IpServer ipServer, @NonNull final Ipv6ForwardingRule rule) {
576         if (!isUsingBpf()) return;
577 
578         if (!mBpfCoordinatorShim.tetherOffloadRuleRemove(rule)) return;
579 
580         LinkedHashMap<Inet6Address, Ipv6ForwardingRule> rules = mIpv6ForwardingRules.get(ipServer);
581         if (rules == null) return;
582 
583         // Must remove rules before calling #isAnyRuleOnUpstream because it needs to check if
584         // the last rule is removed for a given upstream. If no rule is removed, return early.
585         // Avoid unnecessary work on a non-existent rule which may have never been added or
586         // removed already.
587         if (rules.remove(rule.address) == null) return;
588 
589         // Remove the downstream entry if it has no more rule.
590         if (rules.isEmpty()) {
591             mIpv6ForwardingRules.remove(ipServer);
592         }
593 
594         // If no more rules between this upstream and downstream, stop upstream forwarding.
595         if (!isAnyRuleFromDownstreamToUpstream(rule.downstreamIfindex, rule.upstreamIfindex)) {
596             final int downstream = rule.downstreamIfindex;
597             final int upstream = rule.upstreamIfindex;
598             if (!mBpfCoordinatorShim.stopUpstreamIpv6Forwarding(downstream, upstream,
599                     rule.srcMac)) {
600                 mLog.e("Failed to disable upstream IPv6 forwarding from "
601                         + mInterfaceNames.get(downstream) + " to " + mInterfaceNames.get(upstream));
602             }
603         }
604 
605         // Do cleanup functionality if there is no more rule on the given upstream.
606         maybeClearLimit(rule.upstreamIfindex);
607     }
608 
609     /**
610      * Clear all forwarding rules for a given downstream.
611      * Note that this can be only called on handler thread.
612      * TODO: rename to tetherOffloadRuleClear6 because of IPv6 only.
613      */
tetherOffloadRuleClear(@onNull final IpServer ipServer)614     public void tetherOffloadRuleClear(@NonNull final IpServer ipServer) {
615         if (!isUsingBpf()) return;
616 
617         final LinkedHashMap<Inet6Address, Ipv6ForwardingRule> rules = mIpv6ForwardingRules.get(
618                 ipServer);
619         if (rules == null) return;
620 
621         // Need to build a rule list because the rule map may be changed in the iteration.
622         for (final Ipv6ForwardingRule rule : new ArrayList<Ipv6ForwardingRule>(rules.values())) {
623             tetherOffloadRuleRemove(ipServer, rule);
624         }
625     }
626 
627     /**
628      * Update existing forwarding rules to new upstream for a given downstream.
629      * Note that this can be only called on handler thread.
630      */
tetherOffloadRuleUpdate(@onNull final IpServer ipServer, int newUpstreamIfindex)631     public void tetherOffloadRuleUpdate(@NonNull final IpServer ipServer, int newUpstreamIfindex) {
632         if (!isUsingBpf()) return;
633 
634         final LinkedHashMap<Inet6Address, Ipv6ForwardingRule> rules = mIpv6ForwardingRules.get(
635                 ipServer);
636         if (rules == null) return;
637 
638         // Need to build a rule list because the rule map may be changed in the iteration.
639         // First remove all the old rules, then add all the new rules. This is because the upstream
640         // forwarding code in tetherOffloadRuleAdd cannot support rules on two upstreams at the
641         // same time. Deleting the rules first ensures that upstream forwarding is disabled on the
642         // old upstream when the last rule is removed from it, and re-enabled on the new upstream
643         // when the first rule is added to it.
644         // TODO: Once the IPv6 client processing code has moved from IpServer to BpfCoordinator, do
645         // something smarter.
646         final ArrayList<Ipv6ForwardingRule> rulesCopy = new ArrayList<>(rules.values());
647         for (final Ipv6ForwardingRule rule : rulesCopy) {
648             // Remove the old rule before adding the new one because the map uses the same key for
649             // both rules. Reversing the processing order causes that the new rule is removed as
650             // unexpected.
651             // TODO: Add new rule first to reduce the latency which has no rule.
652             tetherOffloadRuleRemove(ipServer, rule);
653         }
654         for (final Ipv6ForwardingRule rule : rulesCopy) {
655             tetherOffloadRuleAdd(ipServer, rule.onNewUpstream(newUpstreamIfindex));
656         }
657     }
658 
659     /**
660      * Add upstream name to lookup table. The lookup table is used for tether stats interface name
661      * lookup because the netd only reports interface index in BPF tether stats but the service
662      * expects the interface name in NetworkStats object.
663      * Note that this can be only called on handler thread.
664      */
addUpstreamNameToLookupTable(int upstreamIfindex, @NonNull String upstreamIface)665     public void addUpstreamNameToLookupTable(int upstreamIfindex, @NonNull String upstreamIface) {
666         if (!isUsingBpf()) return;
667 
668         if (upstreamIfindex == 0 || TextUtils.isEmpty(upstreamIface)) return;
669 
670         if (isVcnInterface(upstreamIface)) return;
671 
672         // The same interface index to name mapping may be added by different IpServer objects or
673         // re-added by reconnection on the same upstream interface. Ignore the duplicate one.
674         final String iface = mInterfaceNames.get(upstreamIfindex);
675         if (iface == null) {
676             mInterfaceNames.put(upstreamIfindex, upstreamIface);
677         } else if (!TextUtils.equals(iface, upstreamIface)) {
678             Log.wtf(TAG, "The upstream interface name " + upstreamIface
679                     + " is different from the existing interface name "
680                     + iface + " for index " + upstreamIfindex);
681         }
682     }
683 
684     /**
685      * Add downstream client.
686      * Note that this can be only called on handler thread.
687      */
tetherOffloadClientAdd(@onNull final IpServer ipServer, @NonNull final ClientInfo client)688     public void tetherOffloadClientAdd(@NonNull final IpServer ipServer,
689             @NonNull final ClientInfo client) {
690         if (!isUsingBpf()) return;
691 
692         if (!mTetherClients.containsKey(ipServer)) {
693             mTetherClients.put(ipServer, new HashMap<Inet4Address, ClientInfo>());
694         }
695 
696         HashMap<Inet4Address, ClientInfo> clients = mTetherClients.get(ipServer);
697         clients.put(client.clientAddress, client);
698     }
699 
700     /**
701      * Remove a downstream client and its rules if any.
702      * Note that this can be only called on handler thread.
703      */
tetherOffloadClientRemove(@onNull final IpServer ipServer, @NonNull final ClientInfo client)704     public void tetherOffloadClientRemove(@NonNull final IpServer ipServer,
705             @NonNull final ClientInfo client) {
706         if (!isUsingBpf()) return;
707 
708         // No clients on the downstream, return early.
709         HashMap<Inet4Address, ClientInfo> clients = mTetherClients.get(ipServer);
710         if (clients == null) return;
711 
712         // No client is removed, return early.
713         if (clients.remove(client.clientAddress) == null) return;
714 
715         // Remove the client's rules. Removing the client implies that its rules are not used
716         // anymore.
717         tetherOffloadRuleClear(client);
718 
719         // Remove the downstream entry if it has no more client.
720         if (clients.isEmpty()) {
721             mTetherClients.remove(ipServer);
722         }
723     }
724 
725     /**
726      * Clear all downstream clients and their rules if any.
727      * Note that this can be only called on handler thread.
728      */
tetherOffloadClientClear(@onNull final IpServer ipServer)729     public void tetherOffloadClientClear(@NonNull final IpServer ipServer) {
730         if (!isUsingBpf()) return;
731 
732         final HashMap<Inet4Address, ClientInfo> clients = mTetherClients.get(ipServer);
733         if (clients == null) return;
734 
735         // Need to build a client list because the client map may be changed in the iteration.
736         for (final ClientInfo c : new ArrayList<ClientInfo>(clients.values())) {
737             tetherOffloadClientRemove(ipServer, c);
738         }
739     }
740 
741     /**
742      * Clear all forwarding IPv4 rules for a given client.
743      * Note that this can be only called on handler thread.
744      */
tetherOffloadRuleClear(@onNull final ClientInfo clientInfo)745     private void tetherOffloadRuleClear(@NonNull final ClientInfo clientInfo) {
746         // TODO: consider removing the rules in #tetherOffloadRuleForEach once BpfMap#forEach
747         // can guarantee that deleting some pass-in rules in the BPF map iteration can still
748         // walk through every entry.
749         final Inet4Address clientAddr = clientInfo.clientAddress;
750         final Set<Integer> upstreamIndiceSet = new ArraySet<Integer>();
751         final Set<Tether4Key> deleteUpstreamRuleKeys = new ArraySet<Tether4Key>();
752         final Set<Tether4Key> deleteDownstreamRuleKeys = new ArraySet<Tether4Key>();
753 
754         // Find the rules which are related with the given client.
755         mBpfCoordinatorShim.tetherOffloadRuleForEach(UPSTREAM, (k, v) -> {
756             if (Arrays.equals(k.src4, clientAddr.getAddress())) {
757                 deleteUpstreamRuleKeys.add(k);
758             }
759         });
760         mBpfCoordinatorShim.tetherOffloadRuleForEach(DOWNSTREAM, (k, v) -> {
761             if (Arrays.equals(v.dst46, toIpv4MappedAddressBytes(clientAddr))) {
762                 deleteDownstreamRuleKeys.add(k);
763                 upstreamIndiceSet.add((int) k.iif);
764             }
765         });
766 
767         // The rules should be paired on upstream and downstream map because they are added by
768         // conntrack events which have bidirectional information.
769         // TODO: Consider figuring out a way to fix. Probably delete all rules to fallback.
770         if (deleteUpstreamRuleKeys.size() != deleteDownstreamRuleKeys.size()) {
771             Log.wtf(TAG, "The deleting rule numbers are different on upstream4 and downstream4 ("
772                     + "upstream: " + deleteUpstreamRuleKeys.size() + ", "
773                     + "downstream: " + deleteDownstreamRuleKeys.size() + ").");
774             return;
775         }
776 
777         // Delete the rules which are related with the given client.
778         for (final Tether4Key k : deleteUpstreamRuleKeys) {
779             mBpfCoordinatorShim.tetherOffloadRuleRemove(UPSTREAM, k);
780         }
781         for (final Tether4Key k : deleteDownstreamRuleKeys) {
782             mBpfCoordinatorShim.tetherOffloadRuleRemove(DOWNSTREAM, k);
783         }
784 
785         // Cleanup each upstream interface by a set which avoids duplicated work on the same
786         // upstream interface. Cleaning up the same interface twice (or more) here may raise
787         // an exception because all related information were removed in the first deletion.
788         for (final int upstreamIndex : upstreamIndiceSet) {
789             maybeClearLimit(upstreamIndex);
790         }
791     }
792 
793     /**
794      * Clear all forwarding IPv4 rules for a given downstream. Needed because the client may still
795      * connect on the downstream but the existing rules are not required anymore. Ex: upstream
796      * changed.
797      */
tetherOffloadRule4Clear(@onNull final IpServer ipServer)798     private void tetherOffloadRule4Clear(@NonNull final IpServer ipServer) {
799         if (!isUsingBpf()) return;
800 
801         final HashMap<Inet4Address, ClientInfo> clients = mTetherClients.get(ipServer);
802         if (clients == null) return;
803 
804         // The value should be unique as its key because currently the key was using from its
805         // client address of ClientInfo. See #tetherOffloadClientAdd.
806         for (final ClientInfo client : clients.values()) {
807             tetherOffloadRuleClear(client);
808         }
809     }
810 
isValidUpstreamIpv4Address(@onNull final InetAddress addr)811     private boolean isValidUpstreamIpv4Address(@NonNull final InetAddress addr) {
812         if (!(addr instanceof Inet4Address)) return false;
813         Inet4Address v4 = (Inet4Address) addr;
814         if (v4.isAnyLocalAddress() || v4.isLinkLocalAddress()
815                 || v4.isLoopbackAddress() || v4.isMulticastAddress()) {
816             return false;
817         }
818         return true;
819     }
820 
821     /**
822      * Call when UpstreamNetworkState may be changed.
823      * If upstream has ipv4 for tethering, update this new UpstreamNetworkState
824      * to BpfCoordinator for building upstream interface index mapping. Otherwise,
825      * clear the all existing rules if any.
826      *
827      * Note that this can be only called on handler thread.
828      */
updateUpstreamNetworkState(UpstreamNetworkState ns)829     public void updateUpstreamNetworkState(UpstreamNetworkState ns) {
830         if (!isUsingBpf()) return;
831 
832         int upstreamIndex = 0;
833 
834         // This will not work on a network that is using 464xlat because hasIpv4Address will not be
835         // true.
836         // TODO: need to consider 464xlat.
837         if (ns != null && ns.linkProperties != null && ns.linkProperties.hasIpv4Address()) {
838             // TODO: support ether ip upstream interface.
839             final String ifaceName = ns.linkProperties.getInterfaceName();
840             final InterfaceParams params = mDeps.getInterfaceParams(ifaceName);
841             final boolean isVcn = isVcnInterface(ifaceName);
842             if (!isVcn && params != null && !params.hasMacAddress /* raw ip upstream only */) {
843                 upstreamIndex = params.index;
844             }
845         }
846         if (mLastIPv4UpstreamIfindex == upstreamIndex) return;
847 
848         // Clear existing rules if upstream interface is changed. The existing rules should be
849         // cleared before upstream index mapping is cleared. It can avoid that ipServer or
850         // conntrack event may use the non-existing upstream interfeace index to build a removing
851         // key while removeing the rules. Can't notify each IpServer to clear the rules as
852         // IPv6TetheringCoordinator#updateUpstreamNetworkState because the IpServer may not
853         // handle the upstream changing notification before changing upstream index mapping.
854         if (mLastIPv4UpstreamIfindex != 0) {
855             // Clear all forwarding IPv4 rules for all downstreams.
856             for (final IpServer ipserver : mTetherClients.keySet()) {
857                 tetherOffloadRule4Clear(ipserver);
858             }
859         }
860 
861         // Don't update mLastIPv4UpstreamIfindex before clearing existing rules if any. Need that
862         // to tell if it is required to clean the out-of-date rules.
863         mLastIPv4UpstreamIfindex = upstreamIndex;
864 
865         // If link properties are valid, build the upstream information mapping. Otherwise, clear
866         // the upstream interface index mapping, to ensure that any conntrack events that arrive
867         // after the upstream is lost do not incorrectly add rules pointing at the upstream.
868         if (upstreamIndex == 0) {
869             mIpv4UpstreamIndices.clear();
870             return;
871         }
872         Collection<InetAddress> addresses = ns.linkProperties.getAddresses();
873         for (final InetAddress addr: addresses) {
874             if (isValidUpstreamIpv4Address(addr)) {
875                 mIpv4UpstreamIndices.put((Inet4Address) addr, upstreamIndex);
876             }
877         }
878     }
879 
880     /**
881      * Attach BPF program
882      *
883      * TODO: consider error handling if the attach program failed.
884      */
maybeAttachProgram(@onNull String intIface, @NonNull String extIface)885     public void maybeAttachProgram(@NonNull String intIface, @NonNull String extIface) {
886         if (isVcnInterface(extIface)) return;
887 
888         if (forwardingPairExists(intIface, extIface)) return;
889 
890         boolean firstDownstreamForThisUpstream = !isAnyForwardingPairOnUpstream(extIface);
891         forwardingPairAdd(intIface, extIface);
892 
893         mBpfCoordinatorShim.attachProgram(intIface, UPSTREAM);
894         // Attach if the upstream is the first time to be used in a forwarding pair.
895         if (firstDownstreamForThisUpstream) {
896             mBpfCoordinatorShim.attachProgram(extIface, DOWNSTREAM);
897         }
898     }
899 
900     /**
901      * Detach BPF program
902      */
maybeDetachProgram(@onNull String intIface, @NonNull String extIface)903     public void maybeDetachProgram(@NonNull String intIface, @NonNull String extIface) {
904         forwardingPairRemove(intIface, extIface);
905 
906         // Detaching program may fail because the interface has been removed already.
907         mBpfCoordinatorShim.detachProgram(intIface);
908         // Detach if no more forwarding pair is using the upstream.
909         if (!isAnyForwardingPairOnUpstream(extIface)) {
910             mBpfCoordinatorShim.detachProgram(extIface);
911         }
912     }
913 
914     // TODO: make mInterfaceNames accessible to the shim and move this code to there.
getIfName(long ifindex)915     private String getIfName(long ifindex) {
916         return mInterfaceNames.get((int) ifindex, Long.toString(ifindex));
917     }
918 
919     /**
920      * Dump information.
921      * Block the function until all the data are dumped on the handler thread or timed-out. The
922      * reason is that dumpsys invokes this function on the thread of caller and the data may only
923      * be allowed to be accessed on the handler thread.
924      */
dump(@onNull IndentingPrintWriter pw)925     public void dump(@NonNull IndentingPrintWriter pw) {
926         pw.println("mIsBpfEnabled: " + mIsBpfEnabled);
927         pw.println("Polling " + (mPollingStarted ? "started" : "not started"));
928         pw.println("Stats provider " + (mStatsProvider != null
929                 ? "registered" : "not registered"));
930         pw.println("Upstream quota: " + mInterfaceQuotas.toString());
931         pw.println("Polling interval: " + getPollingInterval() + " ms");
932         pw.println("Bpf shim: " + mBpfCoordinatorShim.toString());
933 
934         pw.println("Forwarding stats:");
935         pw.increaseIndent();
936         if (mStats.size() == 0) {
937             pw.println("<empty>");
938         } else {
939             dumpStats(pw);
940         }
941         pw.decreaseIndent();
942 
943         pw.println("BPF stats:");
944         pw.increaseIndent();
945         dumpBpfStats(pw);
946         pw.decreaseIndent();
947         pw.println();
948 
949         pw.println("Forwarding rules:");
950         pw.increaseIndent();
951         dumpIpv6UpstreamRules(pw);
952         dumpIpv6ForwardingRules(pw);
953         dumpIpv4ForwardingRules(pw);
954         pw.decreaseIndent();
955         pw.println();
956 
957         pw.println("Device map:");
958         pw.increaseIndent();
959         dumpDevmap(pw);
960         pw.decreaseIndent();
961 
962         pw.println("Client Information:");
963         pw.increaseIndent();
964         if (mTetherClients.isEmpty()) {
965             pw.println("<empty>");
966         } else {
967             pw.println(mTetherClients.toString());
968         }
969         pw.decreaseIndent();
970 
971         pw.println("IPv4 Upstream Indices:");
972         pw.increaseIndent();
973         if (mIpv4UpstreamIndices.isEmpty()) {
974             pw.println("<empty>");
975         } else {
976             pw.println(mIpv4UpstreamIndices.toString());
977         }
978         pw.decreaseIndent();
979 
980         pw.println();
981         pw.println("Forwarding counters:");
982         pw.increaseIndent();
983         dumpCounters(pw);
984         pw.decreaseIndent();
985     }
986 
dumpStats(@onNull IndentingPrintWriter pw)987     private void dumpStats(@NonNull IndentingPrintWriter pw) {
988         for (int i = 0; i < mStats.size(); i++) {
989             final int upstreamIfindex = mStats.keyAt(i);
990             final ForwardedStats stats = mStats.get(upstreamIfindex);
991             pw.println(String.format("%d(%s) - %s", upstreamIfindex, mInterfaceNames.get(
992                     upstreamIfindex), stats.toString()));
993         }
994     }
dumpBpfStats(@onNull IndentingPrintWriter pw)995     private void dumpBpfStats(@NonNull IndentingPrintWriter pw) {
996         try (BpfMap<TetherStatsKey, TetherStatsValue> map = mDeps.getBpfStatsMap()) {
997             if (map == null) {
998                 pw.println("No BPF stats map");
999                 return;
1000             }
1001             if (map.isEmpty()) {
1002                 pw.println("<empty>");
1003             }
1004             map.forEach((k, v) -> {
1005                 pw.println(String.format("%s: %s", k, v));
1006             });
1007         } catch (ErrnoException e) {
1008             pw.println("Error dumping BPF stats map: " + e);
1009         }
1010     }
1011 
dumpIpv6ForwardingRules(@onNull IndentingPrintWriter pw)1012     private void dumpIpv6ForwardingRules(@NonNull IndentingPrintWriter pw) {
1013         if (mIpv6ForwardingRules.size() == 0) {
1014             pw.println("No IPv6 rules");
1015             return;
1016         }
1017 
1018         for (Map.Entry<IpServer, LinkedHashMap<Inet6Address, Ipv6ForwardingRule>> entry :
1019                 mIpv6ForwardingRules.entrySet()) {
1020             IpServer ipServer = entry.getKey();
1021             // The rule downstream interface index is paired with the interface name from
1022             // IpServer#interfaceName. See #startIPv6, #updateIpv6ForwardingRules in IpServer.
1023             final String downstreamIface = ipServer.interfaceName();
1024             pw.println("[" + downstreamIface + "]: iif(iface) oif(iface) v6addr srcmac dstmac");
1025 
1026             pw.increaseIndent();
1027             LinkedHashMap<Inet6Address, Ipv6ForwardingRule> rules = entry.getValue();
1028             for (Ipv6ForwardingRule rule : rules.values()) {
1029                 final int upstreamIfindex = rule.upstreamIfindex;
1030                 pw.println(String.format("%d(%s) %d(%s) %s %s %s", upstreamIfindex,
1031                         mInterfaceNames.get(upstreamIfindex), rule.downstreamIfindex,
1032                         downstreamIface, rule.address.getHostAddress(), rule.srcMac, rule.dstMac));
1033             }
1034             pw.decreaseIndent();
1035         }
1036     }
1037 
ipv6UpstreamRuletoString(TetherUpstream6Key key, Tether6Value value)1038     private String ipv6UpstreamRuletoString(TetherUpstream6Key key, Tether6Value value) {
1039         return String.format("%d(%s) %s -> %d(%s) %04x %s %s",
1040                 key.iif, getIfName(key.iif), key.dstMac, value.oif, getIfName(value.oif),
1041                 value.ethProto, value.ethSrcMac, value.ethDstMac);
1042     }
1043 
dumpIpv6UpstreamRules(IndentingPrintWriter pw)1044     private void dumpIpv6UpstreamRules(IndentingPrintWriter pw) {
1045         try (BpfMap<TetherUpstream6Key, Tether6Value> map = mDeps.getBpfUpstream6Map()) {
1046             if (map == null) {
1047                 pw.println("No IPv6 upstream");
1048                 return;
1049             }
1050             if (map.isEmpty()) {
1051                 pw.println("No IPv6 upstream rules");
1052                 return;
1053             }
1054             map.forEach((k, v) -> pw.println(ipv6UpstreamRuletoString(k, v)));
1055         } catch (ErrnoException e) {
1056             pw.println("Error dumping IPv6 upstream map: " + e);
1057         }
1058     }
1059 
ipv4RuleToString(long now, boolean downstream, Tether4Key key, Tether4Value value)1060     private String ipv4RuleToString(long now, boolean downstream,
1061             Tether4Key key, Tether4Value value) {
1062         final String src4, public4, dst4;
1063         final int publicPort;
1064         try {
1065             src4 = InetAddress.getByAddress(key.src4).getHostAddress();
1066             if (downstream) {
1067                 public4 = InetAddress.getByAddress(key.dst4).getHostAddress();
1068                 publicPort = key.dstPort;
1069             } else {
1070                 public4 = InetAddress.getByAddress(value.src46).getHostAddress();
1071                 publicPort = value.srcPort;
1072             }
1073             dst4 = InetAddress.getByAddress(value.dst46).getHostAddress();
1074         } catch (UnknownHostException impossible) {
1075             throw new AssertionError("IP address array not valid IPv4 address!");
1076         }
1077 
1078         final String protoStr = (key.l4proto == OsConstants.IPPROTO_TCP) ? "tcp" : "udp";
1079         final String ageStr = (value.lastUsed == 0) ? "-"
1080                 : String.format("%dms", (now - value.lastUsed) / 1_000_000);
1081         return String.format("%s [%s] %d(%s) %s:%d -> %d(%s) %s:%d -> %s:%d [%s] %s",
1082                 protoStr, key.dstMac, key.iif, getIfName(key.iif), src4, key.srcPort,
1083                 value.oif, getIfName(value.oif),
1084                 public4, publicPort, dst4, value.dstPort, value.ethDstMac, ageStr);
1085     }
1086 
dumpIpv4ForwardingRuleMap(long now, boolean downstream, BpfMap<Tether4Key, Tether4Value> map, IndentingPrintWriter pw)1087     private void dumpIpv4ForwardingRuleMap(long now, boolean downstream,
1088             BpfMap<Tether4Key, Tether4Value> map, IndentingPrintWriter pw) throws ErrnoException {
1089         if (map == null) {
1090             pw.println("No IPv4 support");
1091             return;
1092         }
1093         if (map.isEmpty()) {
1094             pw.println("No rules");
1095             return;
1096         }
1097         map.forEach((k, v) -> pw.println(ipv4RuleToString(now, downstream, k, v)));
1098     }
1099 
dumpIpv4ForwardingRules(IndentingPrintWriter pw)1100     private void dumpIpv4ForwardingRules(IndentingPrintWriter pw) {
1101         final long now = SystemClock.elapsedRealtimeNanos();
1102 
1103         try (BpfMap<Tether4Key, Tether4Value> upstreamMap = mDeps.getBpfUpstream4Map();
1104                 BpfMap<Tether4Key, Tether4Value> downstreamMap = mDeps.getBpfDownstream4Map()) {
1105             pw.println("IPv4 Upstream: proto [inDstMac] iif(iface) src -> nat -> "
1106                     + "dst [outDstMac] age");
1107             pw.increaseIndent();
1108             dumpIpv4ForwardingRuleMap(now, UPSTREAM, upstreamMap, pw);
1109             pw.decreaseIndent();
1110 
1111             pw.println("IPv4 Downstream: proto [inDstMac] iif(iface) src -> nat -> "
1112                     + "dst [outDstMac] age");
1113             pw.increaseIndent();
1114             dumpIpv4ForwardingRuleMap(now, DOWNSTREAM, downstreamMap, pw);
1115             pw.decreaseIndent();
1116         } catch (ErrnoException e) {
1117             pw.println("Error dumping IPv4 map: " + e);
1118         }
1119     }
1120 
1121     /**
1122      * Simple struct that only contains a u32. Must be public because Struct needs access to it.
1123      * TODO: make this a public inner class of Struct so anyone can use it as, e.g., Struct.U32?
1124      */
1125     public static class U32Struct extends Struct {
1126         @Struct.Field(order = 0, type = Struct.Type.U32)
1127         public long val;
1128     }
1129 
dumpCounters(@onNull IndentingPrintWriter pw)1130     private void dumpCounters(@NonNull IndentingPrintWriter pw) {
1131         if (!mDeps.isAtLeastS()) {
1132             pw.println("No counter support");
1133             return;
1134         }
1135         try (BpfMap<U32Struct, U32Struct> map = new BpfMap<>(TETHER_ERROR_MAP_PATH,
1136                 BpfMap.BPF_F_RDONLY, U32Struct.class, U32Struct.class)) {
1137 
1138             map.forEach((k, v) -> {
1139                 String counterName;
1140                 try {
1141                     counterName = sBpfCounterNames[(int) k.val];
1142                 } catch (IndexOutOfBoundsException e) {
1143                     // Should never happen because this code gets the counter name from the same
1144                     // include file as the BPF program that increments the counter.
1145                     Log.wtf(TAG, "Unknown tethering counter type " + k.val);
1146                     counterName = Long.toString(k.val);
1147                 }
1148                 if (v.val > 0) pw.println(String.format("%s: %d", counterName, v.val));
1149             });
1150         } catch (ErrnoException e) {
1151             pw.println("Error dumping counter map: " + e);
1152         }
1153     }
1154 
dumpDevmap(@onNull IndentingPrintWriter pw)1155     private void dumpDevmap(@NonNull IndentingPrintWriter pw) {
1156         try (BpfMap<TetherDevKey, TetherDevValue> map = mDeps.getBpfDevMap()) {
1157             if (map == null) {
1158                 pw.println("No devmap support");
1159                 return;
1160             }
1161             if (map.isEmpty()) {
1162                 pw.println("<empty>");
1163                 return;
1164             }
1165             pw.println("ifindex (iface) -> ifindex (iface)");
1166             pw.increaseIndent();
1167             map.forEach((k, v) -> {
1168                 // Only get upstream interface name. Just do the best to make the index readable.
1169                 // TODO: get downstream interface name because the index is either upstream or
1170                 // downstream interface in dev map.
1171                 pw.println(String.format("%d (%s) -> %d (%s)", k.ifIndex, getIfName(k.ifIndex),
1172                         v.ifIndex, getIfName(v.ifIndex)));
1173             });
1174         } catch (ErrnoException e) {
1175             pw.println("Error dumping dev map: " + e);
1176         }
1177         pw.decreaseIndent();
1178     }
1179 
1180     /** IPv6 forwarding rule class. */
1181     public static class Ipv6ForwardingRule {
1182         // The upstream6 and downstream6 rules are built as the following tables. Only raw ip
1183         // upstream interface is supported.
1184         // TODO: support ether ip upstream interface.
1185         //
1186         // NAT network topology:
1187         //
1188         //         public network (rawip)                 private network
1189         //                   |                 UE                |
1190         // +------------+    V    +------------+------------+    V    +------------+
1191         // |   Sever    +---------+  Upstream  | Downstream +---------+   Client   |
1192         // +------------+         +------------+------------+         +------------+
1193         //
1194         // upstream6 key and value:
1195         //
1196         // +------+-------------+
1197         // | TetherUpstream6Key |
1198         // +------+------+------+
1199         // |field |iif   |dstMac|
1200         // |      |      |      |
1201         // +------+------+------+
1202         // |value |downst|downst|
1203         // |      |ream  |ream  |
1204         // +------+------+------+
1205         //
1206         // +------+----------------------------------+
1207         // |      |Tether6Value                      |
1208         // +------+------+------+------+------+------+
1209         // |field |oif   |ethDst|ethSrc|ethPro|pmtu  |
1210         // |      |      |mac   |mac   |to    |      |
1211         // +------+------+------+------+------+------+
1212         // |value |upstre|--    |--    |ETH_P_|1500  |
1213         // |      |am    |      |      |IP    |      |
1214         // +------+------+------+------+------+------+
1215         //
1216         // downstream6 key and value:
1217         //
1218         // +------+--------------------+
1219         // |      |TetherDownstream6Key|
1220         // +------+------+------+------+
1221         // |field |iif   |dstMac|neigh6|
1222         // |      |      |      |      |
1223         // +------+------+------+------+
1224         // |value |upstre|--    |client|
1225         // |      |am    |      |      |
1226         // +------+------+------+------+
1227         //
1228         // +------+----------------------------------+
1229         // |      |Tether6Value                      |
1230         // +------+------+------+------+------+------+
1231         // |field |oif   |ethDst|ethSrc|ethPro|pmtu  |
1232         // |      |      |mac   |mac   |to    |      |
1233         // +------+------+------+------+------+------+
1234         // |value |downst|client|downst|ETH_P_|1500  |
1235         // |      |ream  |      |ream  |IP    |      |
1236         // +------+------+------+------+------+------+
1237         //
1238         public final int upstreamIfindex;
1239         public final int downstreamIfindex;
1240 
1241         // TODO: store a ClientInfo object instead of storing address, srcMac, and dstMac directly.
1242         @NonNull
1243         public final Inet6Address address;
1244         @NonNull
1245         public final MacAddress srcMac;
1246         @NonNull
1247         public final MacAddress dstMac;
1248 
Ipv6ForwardingRule(int upstreamIfindex, int downstreamIfIndex, @NonNull Inet6Address address, @NonNull MacAddress srcMac, @NonNull MacAddress dstMac)1249         public Ipv6ForwardingRule(int upstreamIfindex, int downstreamIfIndex,
1250                 @NonNull Inet6Address address, @NonNull MacAddress srcMac,
1251                 @NonNull MacAddress dstMac) {
1252             this.upstreamIfindex = upstreamIfindex;
1253             this.downstreamIfindex = downstreamIfIndex;
1254             this.address = address;
1255             this.srcMac = srcMac;
1256             this.dstMac = dstMac;
1257         }
1258 
1259         /** Return a new rule object which updates with new upstream index. */
1260         @NonNull
onNewUpstream(int newUpstreamIfindex)1261         public Ipv6ForwardingRule onNewUpstream(int newUpstreamIfindex) {
1262             return new Ipv6ForwardingRule(newUpstreamIfindex, downstreamIfindex, address, srcMac,
1263                     dstMac);
1264         }
1265 
1266         /**
1267          * Don't manipulate TetherOffloadRuleParcel directly because implementing onNewUpstream()
1268          * would be error-prone due to generated stable AIDL classes not having a copy constructor.
1269          */
1270         @NonNull
toTetherOffloadRuleParcel()1271         public TetherOffloadRuleParcel toTetherOffloadRuleParcel() {
1272             final TetherOffloadRuleParcel parcel = new TetherOffloadRuleParcel();
1273             parcel.inputInterfaceIndex = upstreamIfindex;
1274             parcel.outputInterfaceIndex = downstreamIfindex;
1275             parcel.destination = address.getAddress();
1276             parcel.prefixLength = 128;
1277             parcel.srcL2Address = srcMac.toByteArray();
1278             parcel.dstL2Address = dstMac.toByteArray();
1279             return parcel;
1280         }
1281 
1282         /**
1283          * Return a TetherDownstream6Key object built from the rule.
1284          */
1285         @NonNull
makeTetherDownstream6Key()1286         public TetherDownstream6Key makeTetherDownstream6Key() {
1287             return new TetherDownstream6Key(upstreamIfindex, NULL_MAC_ADDRESS,
1288                     address.getAddress());
1289         }
1290 
1291         /**
1292          * Return a Tether6Value object built from the rule.
1293          */
1294         @NonNull
makeTether6Value()1295         public Tether6Value makeTether6Value() {
1296             return new Tether6Value(downstreamIfindex, dstMac, srcMac, ETH_P_IPV6,
1297                     NetworkStackConstants.ETHER_MTU);
1298         }
1299 
1300         @Override
equals(Object o)1301         public boolean equals(Object o) {
1302             if (!(o instanceof Ipv6ForwardingRule)) return false;
1303             Ipv6ForwardingRule that = (Ipv6ForwardingRule) o;
1304             return this.upstreamIfindex == that.upstreamIfindex
1305                     && this.downstreamIfindex == that.downstreamIfindex
1306                     && Objects.equals(this.address, that.address)
1307                     && Objects.equals(this.srcMac, that.srcMac)
1308                     && Objects.equals(this.dstMac, that.dstMac);
1309         }
1310 
1311         @Override
hashCode()1312         public int hashCode() {
1313             // TODO: if this is ever used in production code, don't pass ifindices
1314             // to Objects.hash() to avoid autoboxing overhead.
1315             return Objects.hash(upstreamIfindex, downstreamIfindex, address, srcMac, dstMac);
1316         }
1317     }
1318 
1319     /** Tethering client information class. */
1320     public static class ClientInfo {
1321         public final int downstreamIfindex;
1322 
1323         @NonNull
1324         public final MacAddress downstreamMac;
1325         @NonNull
1326         public final Inet4Address clientAddress;
1327         @NonNull
1328         public final MacAddress clientMac;
1329 
ClientInfo(int downstreamIfindex, @NonNull MacAddress downstreamMac, @NonNull Inet4Address clientAddress, @NonNull MacAddress clientMac)1330         public ClientInfo(int downstreamIfindex,
1331                 @NonNull MacAddress downstreamMac, @NonNull Inet4Address clientAddress,
1332                 @NonNull MacAddress clientMac) {
1333             this.downstreamIfindex = downstreamIfindex;
1334             this.downstreamMac = downstreamMac;
1335             this.clientAddress = clientAddress;
1336             this.clientMac = clientMac;
1337         }
1338 
1339         @Override
equals(Object o)1340         public boolean equals(Object o) {
1341             if (!(o instanceof ClientInfo)) return false;
1342             ClientInfo that = (ClientInfo) o;
1343             return this.downstreamIfindex == that.downstreamIfindex
1344                     && Objects.equals(this.downstreamMac, that.downstreamMac)
1345                     && Objects.equals(this.clientAddress, that.clientAddress)
1346                     && Objects.equals(this.clientMac, that.clientMac);
1347         }
1348 
1349         @Override
hashCode()1350         public int hashCode() {
1351             return Objects.hash(downstreamIfindex, downstreamMac, clientAddress, clientMac);
1352         }
1353 
1354         @Override
toString()1355         public String toString() {
1356             return String.format("downstream: %d (%s), client: %s (%s)",
1357                     downstreamIfindex, downstreamMac, clientAddress, clientMac);
1358         }
1359     }
1360 
1361     /**
1362      * A BPF tethering stats provider to provide network statistics to the system.
1363      * Note that this class' data may only be accessed on the handler thread.
1364      */
1365     @VisibleForTesting
1366     class BpfTetherStatsProvider extends NetworkStatsProvider {
1367         // The offloaded traffic statistics per interface that has not been reported since the
1368         // last call to pushTetherStats. Only the interfaces that were ever tethering upstreams
1369         // and has pending tether stats delta are included in this NetworkStats object.
1370         private NetworkStats mIfaceStats = new NetworkStats(0L, 0);
1371 
1372         // The same stats as above, but counts network stats per uid.
1373         private NetworkStats mUidStats = new NetworkStats(0L, 0);
1374 
1375         @Override
onRequestStatsUpdate(int token)1376         public void onRequestStatsUpdate(int token) {
1377             mHandler.post(() -> pushTetherStats());
1378         }
1379 
1380         @Override
onSetAlert(long quotaBytes)1381         public void onSetAlert(long quotaBytes) {
1382             mHandler.post(() -> updateAlertQuota(quotaBytes));
1383         }
1384 
1385         @Override
onSetLimit(@onNull String iface, long quotaBytes)1386         public void onSetLimit(@NonNull String iface, long quotaBytes) {
1387             if (quotaBytes < QUOTA_UNLIMITED) {
1388                 throw new IllegalArgumentException("invalid quota value " + quotaBytes);
1389             }
1390 
1391             mHandler.post(() -> {
1392                 final Long curIfaceQuota = mInterfaceQuotas.get(iface);
1393 
1394                 if (null == curIfaceQuota && QUOTA_UNLIMITED == quotaBytes) return;
1395 
1396                 if (quotaBytes == QUOTA_UNLIMITED) {
1397                     mInterfaceQuotas.remove(iface);
1398                 } else {
1399                     mInterfaceQuotas.put(iface, quotaBytes);
1400                 }
1401                 maybeUpdateDataLimit(iface);
1402             });
1403         }
1404 
1405         @VisibleForTesting
pushTetherStats()1406         void pushTetherStats() {
1407             try {
1408                 // The token is not used for now. See b/153606961.
1409                 notifyStatsUpdated(0 /* token */, mIfaceStats, mUidStats);
1410 
1411                 // Clear the accumulated tether stats delta after reported. Note that create a new
1412                 // empty object because NetworkStats#clear is @hide.
1413                 mIfaceStats = new NetworkStats(0L, 0);
1414                 mUidStats = new NetworkStats(0L, 0);
1415             } catch (RuntimeException e) {
1416                 mLog.e("Cannot report network stats: ", e);
1417             }
1418         }
1419 
accumulateDiff(@onNull NetworkStats ifaceDiff, @NonNull NetworkStats uidDiff)1420         private void accumulateDiff(@NonNull NetworkStats ifaceDiff,
1421                 @NonNull NetworkStats uidDiff) {
1422             mIfaceStats = mIfaceStats.add(ifaceDiff);
1423             mUidStats = mUidStats.add(uidDiff);
1424         }
1425     }
1426 
1427     @Nullable
getClientInfo(@onNull Inet4Address clientAddress)1428     private ClientInfo getClientInfo(@NonNull Inet4Address clientAddress) {
1429         for (HashMap<Inet4Address, ClientInfo> clients : mTetherClients.values()) {
1430             for (ClientInfo client : clients.values()) {
1431                 if (clientAddress.equals(client.clientAddress)) {
1432                     return client;
1433                 }
1434             }
1435         }
1436         return null;
1437     }
1438 
1439     @NonNull
toIpv4MappedAddressBytes(Inet4Address ia4)1440     private byte[] toIpv4MappedAddressBytes(Inet4Address ia4) {
1441         final byte[] addr4 = ia4.getAddress();
1442         final byte[] addr6 = new byte[16];
1443         addr6[10] = (byte) 0xff;
1444         addr6[11] = (byte) 0xff;
1445         addr6[12] = addr4[0];
1446         addr6[13] = addr4[1];
1447         addr6[14] = addr4[2];
1448         addr6[15] = addr4[3];
1449         return addr6;
1450     }
1451 
1452     @Nullable
ipv4MappedAddressBytesToIpv4Address(final byte[] addr46)1453     private Inet4Address ipv4MappedAddressBytesToIpv4Address(final byte[] addr46) {
1454         if (addr46.length != 16) return null;
1455         if (addr46[0] != 0 || addr46[1] != 0 || addr46[2] != 0 || addr46[3] != 0
1456                 || addr46[4] != 0 || addr46[5] != 0 || addr46[6] != 0 || addr46[7] != 0
1457                 || addr46[8] != 0 && addr46[9] != 0 || (addr46[10] & 0xff) != 0xff
1458                 || (addr46[11] & 0xff) != 0xff) {
1459             return null;
1460         }
1461 
1462         final byte[] addr4 = new byte[4];
1463         addr4[0] = addr46[12];
1464         addr4[1] = addr46[13];
1465         addr4[2] = addr46[14];
1466         addr4[3] = addr46[15];
1467 
1468         return parseIPv4Address(addr4);
1469     }
1470 
1471     // TODO: parse CTA_PROTOINFO of conntrack event in ConntrackMonitor. For TCP, only add rules
1472     // while TCP status is established.
1473     @VisibleForTesting
1474     class BpfConntrackEventConsumer implements ConntrackEventConsumer {
1475         // The upstream4 and downstream4 rules are built as the following tables. Only raw ip
1476         // upstream interface is supported. Note that the field "lastUsed" is only updated by
1477         // BPF program which records the last used time for a given rule.
1478         // TODO: support ether ip upstream interface.
1479         //
1480         // NAT network topology:
1481         //
1482         //         public network (rawip)                 private network
1483         //                   |                 UE                |
1484         // +------------+    V    +------------+------------+    V    +------------+
1485         // |   Sever    +---------+  Upstream  | Downstream +---------+   Client   |
1486         // +------------+         +------------+------------+         +------------+
1487         //
1488         // upstream4 key and value:
1489         //
1490         // +------+------------------------------------------------+
1491         // |      |      TetherUpstream4Key                        |
1492         // +------+------+------+------+------+------+------+------+
1493         // |field |iif   |dstMac|l4prot|src4  |dst4  |srcPor|dstPor|
1494         // |      |      |      |o     |      |      |t     |t     |
1495         // +------+------+------+------+------+------+------+------+
1496         // |value |downst|downst|tcp/  |client|server|client|server|
1497         // |      |ream  |ream  |udp   |      |      |      |      |
1498         // +------+------+------+------+------+------+------+------+
1499         //
1500         // +------+---------------------------------------------------------------------+
1501         // |      |      TetherUpstream4Value                                           |
1502         // +------+------+------+------+------+------+------+------+------+------+------+
1503         // |field |oif   |ethDst|ethSrc|ethPro|pmtu  |src46 |dst46 |srcPor|dstPor|lastUs|
1504         // |      |      |mac   |mac   |to    |      |      |      |t     |t     |ed    |
1505         // +------+------+------+------+------+------+------+------+------+------+------+
1506         // |value |upstre|--    |--    |ETH_P_|1500  |upstre|server|upstre|server|--    |
1507         // |      |am    |      |      |IP    |      |am    |      |am    |      |      |
1508         // +------+------+------+------+------+------+------+------+------+------+------+
1509         //
1510         // downstream4 key and value:
1511         //
1512         // +------+------------------------------------------------+
1513         // |      |      TetherDownstream4Key                      |
1514         // +------+------+------+------+------+------+------+------+
1515         // |field |iif   |dstMac|l4prot|src4  |dst4  |srcPor|dstPor|
1516         // |      |      |      |o     |      |      |t     |t     |
1517         // +------+------+------+------+------+------+------+------+
1518         // |value |upstre|--    |tcp/  |server|upstre|server|upstre|
1519         // |      |am    |      |udp   |      |am    |      |am    |
1520         // +------+------+------+------+------+------+------+------+
1521         //
1522         // +------+---------------------------------------------------------------------+
1523         // |      |      TetherDownstream4Value                                         |
1524         // +------+------+------+------+------+------+------+------+------+------+------+
1525         // |field |oif   |ethDst|ethSrc|ethPro|pmtu  |src46 |dst46 |srcPor|dstPor|lastUs|
1526         // |      |      |mac   |mac   |to    |      |      |      |t     |t     |ed    |
1527         // +------+------+------+------+------+------+------+------+------+------+------+
1528         // |value |downst|client|downst|ETH_P_|1500  |server|client|server|client|--    |
1529         // |      |ream  |      |ream  |IP    |      |      |      |      |      |      |
1530         // +------+------+------+------+------+------+------+------+------+------+------+
1531         //
1532         @NonNull
makeTetherUpstream4Key( @onNull ConntrackEvent e, @NonNull ClientInfo c)1533         private Tether4Key makeTetherUpstream4Key(
1534                 @NonNull ConntrackEvent e, @NonNull ClientInfo c) {
1535             return new Tether4Key(c.downstreamIfindex, c.downstreamMac,
1536                     e.tupleOrig.protoNum, e.tupleOrig.srcIp.getAddress(),
1537                     e.tupleOrig.dstIp.getAddress(), e.tupleOrig.srcPort, e.tupleOrig.dstPort);
1538         }
1539 
1540         @NonNull
makeTetherDownstream4Key( @onNull ConntrackEvent e, @NonNull ClientInfo c, int upstreamIndex)1541         private Tether4Key makeTetherDownstream4Key(
1542                 @NonNull ConntrackEvent e, @NonNull ClientInfo c, int upstreamIndex) {
1543             return new Tether4Key(upstreamIndex, NULL_MAC_ADDRESS /* dstMac (rawip) */,
1544                     e.tupleReply.protoNum, e.tupleReply.srcIp.getAddress(),
1545                     e.tupleReply.dstIp.getAddress(), e.tupleReply.srcPort, e.tupleReply.dstPort);
1546         }
1547 
1548         @NonNull
makeTetherUpstream4Value(@onNull ConntrackEvent e, int upstreamIndex)1549         private Tether4Value makeTetherUpstream4Value(@NonNull ConntrackEvent e,
1550                 int upstreamIndex) {
1551             return new Tether4Value(upstreamIndex,
1552                     NULL_MAC_ADDRESS /* ethDstMac (rawip) */,
1553                     NULL_MAC_ADDRESS /* ethSrcMac (rawip) */, ETH_P_IP,
1554                     NetworkStackConstants.ETHER_MTU, toIpv4MappedAddressBytes(e.tupleReply.dstIp),
1555                     toIpv4MappedAddressBytes(e.tupleReply.srcIp), e.tupleReply.dstPort,
1556                     e.tupleReply.srcPort, 0 /* lastUsed, filled by bpf prog only */);
1557         }
1558 
1559         @NonNull
makeTetherDownstream4Value(@onNull ConntrackEvent e, @NonNull ClientInfo c, int upstreamIndex)1560         private Tether4Value makeTetherDownstream4Value(@NonNull ConntrackEvent e,
1561                 @NonNull ClientInfo c, int upstreamIndex) {
1562             return new Tether4Value(c.downstreamIfindex,
1563                     c.clientMac, c.downstreamMac, ETH_P_IP, NetworkStackConstants.ETHER_MTU,
1564                     toIpv4MappedAddressBytes(e.tupleOrig.dstIp),
1565                     toIpv4MappedAddressBytes(e.tupleOrig.srcIp),
1566                     e.tupleOrig.dstPort, e.tupleOrig.srcPort,
1567                     0 /* lastUsed, filled by bpf prog only */);
1568         }
1569 
accept(ConntrackEvent e)1570         public void accept(ConntrackEvent e) {
1571             final ClientInfo tetherClient = getClientInfo(e.tupleOrig.srcIp);
1572             if (tetherClient == null) return;
1573 
1574             final Integer upstreamIndex = mIpv4UpstreamIndices.get(e.tupleReply.dstIp);
1575             if (upstreamIndex == null) return;
1576 
1577             final Tether4Key upstream4Key = makeTetherUpstream4Key(e, tetherClient);
1578             final Tether4Key downstream4Key = makeTetherDownstream4Key(e, tetherClient,
1579                     upstreamIndex);
1580 
1581             if (e.msgType == (NetlinkConstants.NFNL_SUBSYS_CTNETLINK << 8
1582                     | NetlinkConstants.IPCTNL_MSG_CT_DELETE)) {
1583                 final boolean deletedUpstream = mBpfCoordinatorShim.tetherOffloadRuleRemove(
1584                         UPSTREAM, upstream4Key);
1585                 final boolean deletedDownstream = mBpfCoordinatorShim.tetherOffloadRuleRemove(
1586                         DOWNSTREAM, downstream4Key);
1587 
1588                 if (!deletedUpstream && !deletedDownstream) {
1589                     // The rules may have been already removed by losing client or losing upstream.
1590                     return;
1591                 }
1592 
1593                 if (deletedUpstream != deletedDownstream) {
1594                     Log.wtf(TAG, "The bidirectional rules should be removed concurrently ("
1595                             + "upstream: " + deletedUpstream
1596                             + ", downstream: " + deletedDownstream + ")");
1597                     return;
1598                 }
1599 
1600                 maybeClearLimit(upstreamIndex);
1601                 return;
1602             }
1603 
1604             final Tether4Value upstream4Value = makeTetherUpstream4Value(e, upstreamIndex);
1605             final Tether4Value downstream4Value = makeTetherDownstream4Value(e, tetherClient,
1606                     upstreamIndex);
1607 
1608             maybeAddDevMap(upstreamIndex, tetherClient.downstreamIfindex);
1609             maybeSetLimit(upstreamIndex);
1610             mBpfCoordinatorShim.tetherOffloadRuleAdd(UPSTREAM, upstream4Key, upstream4Value);
1611             mBpfCoordinatorShim.tetherOffloadRuleAdd(DOWNSTREAM, downstream4Key, downstream4Value);
1612         }
1613     }
1614 
isBpfEnabled()1615     private boolean isBpfEnabled() {
1616         final TetheringConfiguration config = mDeps.getTetherConfig();
1617         return (config != null) ? config.isBpfOffloadEnabled() : true /* default value */;
1618     }
1619 
getInterfaceIndexFromRules(@onNull String ifName)1620     private int getInterfaceIndexFromRules(@NonNull String ifName) {
1621         for (LinkedHashMap<Inet6Address, Ipv6ForwardingRule> rules : mIpv6ForwardingRules
1622                 .values()) {
1623             for (Ipv6ForwardingRule rule : rules.values()) {
1624                 final int upstreamIfindex = rule.upstreamIfindex;
1625                 if (TextUtils.equals(ifName, mInterfaceNames.get(upstreamIfindex))) {
1626                     return upstreamIfindex;
1627                 }
1628             }
1629         }
1630         return 0;
1631     }
1632 
getQuotaBytes(@onNull String iface)1633     private long getQuotaBytes(@NonNull String iface) {
1634         final Long limit = mInterfaceQuotas.get(iface);
1635         final long quotaBytes = (limit != null) ? limit : QUOTA_UNLIMITED;
1636 
1637         return quotaBytes;
1638     }
1639 
sendDataLimitToBpfMap(int ifIndex, long quotaBytes)1640     private boolean sendDataLimitToBpfMap(int ifIndex, long quotaBytes) {
1641         if (ifIndex == 0) {
1642             Log.wtf(TAG, "Invalid interface index.");
1643             return false;
1644         }
1645 
1646         return mBpfCoordinatorShim.tetherOffloadSetInterfaceQuota(ifIndex, quotaBytes);
1647     }
1648 
1649     // Handle the data limit update from the service which is the stats provider registered for.
maybeUpdateDataLimit(@onNull String iface)1650     private void maybeUpdateDataLimit(@NonNull String iface) {
1651         // Set data limit only on a given upstream which has at least one rule. If we can't get
1652         // an interface index for a given interface name, it means either there is no rule for
1653         // a given upstream or the interface name is not an upstream which is monitored by the
1654         // coordinator.
1655         final int ifIndex = getInterfaceIndexFromRules(iface);
1656         if (ifIndex == 0) return;
1657 
1658         final long quotaBytes = getQuotaBytes(iface);
1659         sendDataLimitToBpfMap(ifIndex, quotaBytes);
1660     }
1661 
1662     // Handle the data limit update while adding forwarding rules.
updateDataLimit(int ifIndex)1663     private boolean updateDataLimit(int ifIndex) {
1664         final String iface = mInterfaceNames.get(ifIndex);
1665         if (iface == null) {
1666             mLog.e("Fail to get the interface name for index " + ifIndex);
1667             return false;
1668         }
1669         final long quotaBytes = getQuotaBytes(iface);
1670         return sendDataLimitToBpfMap(ifIndex, quotaBytes);
1671     }
1672 
maybeSetLimit(int upstreamIfindex)1673     private void maybeSetLimit(int upstreamIfindex) {
1674         if (isAnyRuleOnUpstream(upstreamIfindex)
1675                 || mBpfCoordinatorShim.isAnyIpv4RuleOnUpstream(upstreamIfindex)) {
1676             return;
1677         }
1678 
1679         // If failed to set a data limit, probably should not use this upstream, because
1680         // the upstream may not want to blow through the data limit that was told to apply.
1681         // TODO: Perhaps stop the coordinator.
1682         boolean success = updateDataLimit(upstreamIfindex);
1683         if (!success) {
1684             final String iface = mInterfaceNames.get(upstreamIfindex);
1685             mLog.e("Setting data limit for " + iface + " failed.");
1686         }
1687     }
1688 
1689     // TODO: This should be also called while IpServer wants to clear all IPv4 rules. Relying on
1690     // conntrack event can't cover this case.
maybeClearLimit(int upstreamIfindex)1691     private void maybeClearLimit(int upstreamIfindex) {
1692         if (isAnyRuleOnUpstream(upstreamIfindex)
1693                 || mBpfCoordinatorShim.isAnyIpv4RuleOnUpstream(upstreamIfindex)) {
1694             return;
1695         }
1696 
1697         final TetherStatsValue statsValue =
1698                 mBpfCoordinatorShim.tetherOffloadGetAndClearStats(upstreamIfindex);
1699         if (statsValue == null) {
1700             Log.wtf(TAG, "Fail to cleanup tether stats for upstream index " + upstreamIfindex);
1701             return;
1702         }
1703 
1704         SparseArray<TetherStatsValue> tetherStatsList = new SparseArray<TetherStatsValue>();
1705         tetherStatsList.put(upstreamIfindex, statsValue);
1706 
1707         // Update the last stats delta and delete the local cache for a given upstream.
1708         updateQuotaAndStatsFromSnapshot(tetherStatsList);
1709         mStats.remove(upstreamIfindex);
1710     }
1711 
1712     // TODO: Rename to isAnyIpv6RuleOnUpstream and define an isAnyRuleOnUpstream method that called
1713     // both isAnyIpv6RuleOnUpstream and mBpfCoordinatorShim.isAnyIpv4RuleOnUpstream.
isAnyRuleOnUpstream(int upstreamIfindex)1714     private boolean isAnyRuleOnUpstream(int upstreamIfindex) {
1715         for (LinkedHashMap<Inet6Address, Ipv6ForwardingRule> rules : mIpv6ForwardingRules
1716                 .values()) {
1717             for (Ipv6ForwardingRule rule : rules.values()) {
1718                 if (upstreamIfindex == rule.upstreamIfindex) return true;
1719             }
1720         }
1721         return false;
1722     }
1723 
isAnyRuleFromDownstreamToUpstream(int downstreamIfindex, int upstreamIfindex)1724     private boolean isAnyRuleFromDownstreamToUpstream(int downstreamIfindex, int upstreamIfindex) {
1725         for (LinkedHashMap<Inet6Address, Ipv6ForwardingRule> rules : mIpv6ForwardingRules
1726                 .values()) {
1727             for (Ipv6ForwardingRule rule : rules.values()) {
1728                 if (downstreamIfindex == rule.downstreamIfindex
1729                         && upstreamIfindex == rule.upstreamIfindex) {
1730                     return true;
1731                 }
1732             }
1733         }
1734         return false;
1735     }
1736 
1737     // TODO: remove the index from map while the interface has been removed because the map size
1738     // is 64 entries. See packages\modules\Connectivity\Tethering\bpf_progs\offload.c.
maybeAddDevMap(int upstreamIfindex, int downstreamIfindex)1739     private void maybeAddDevMap(int upstreamIfindex, int downstreamIfindex) {
1740         for (Integer index : new Integer[] {upstreamIfindex, downstreamIfindex}) {
1741             if (mDeviceMapSet.contains(index)) continue;
1742             if (mBpfCoordinatorShim.addDevMap(index)) mDeviceMapSet.add(index);
1743         }
1744     }
1745 
forwardingPairAdd(@onNull String intIface, @NonNull String extIface)1746     private void forwardingPairAdd(@NonNull String intIface, @NonNull String extIface) {
1747         if (!mForwardingPairs.containsKey(extIface)) {
1748             mForwardingPairs.put(extIface, new HashSet<String>());
1749         }
1750         mForwardingPairs.get(extIface).add(intIface);
1751     }
1752 
forwardingPairRemove(@onNull String intIface, @NonNull String extIface)1753     private void forwardingPairRemove(@NonNull String intIface, @NonNull String extIface) {
1754         HashSet<String> downstreams = mForwardingPairs.get(extIface);
1755         if (downstreams == null) return;
1756         if (!downstreams.remove(intIface)) return;
1757 
1758         if (downstreams.isEmpty()) {
1759             mForwardingPairs.remove(extIface);
1760         }
1761     }
1762 
forwardingPairExists(@onNull String intIface, @NonNull String extIface)1763     private boolean forwardingPairExists(@NonNull String intIface, @NonNull String extIface) {
1764         if (!mForwardingPairs.containsKey(extIface)) return false;
1765 
1766         return mForwardingPairs.get(extIface).contains(intIface);
1767     }
1768 
isAnyForwardingPairOnUpstream(@onNull String extIface)1769     private boolean isAnyForwardingPairOnUpstream(@NonNull String extIface) {
1770         return mForwardingPairs.containsKey(extIface);
1771     }
1772 
1773     @NonNull
buildNetworkStats(@onNull StatsType type, int ifIndex, @NonNull final ForwardedStats diff)1774     private NetworkStats buildNetworkStats(@NonNull StatsType type, int ifIndex,
1775             @NonNull final ForwardedStats diff) {
1776         NetworkStats stats = new NetworkStats(0L, 0);
1777         final String iface = mInterfaceNames.get(ifIndex);
1778         if (iface == null) {
1779             // TODO: Use Log.wtf once the coordinator owns full control of tether stats from netd.
1780             // For now, netd may add the empty stats for the upstream which is not monitored by
1781             // the coordinator. Silently ignore it.
1782             return stats;
1783         }
1784         final int uid = (type == StatsType.STATS_PER_UID) ? UID_TETHERING : UID_ALL;
1785         // Note that the argument 'metered', 'roaming' and 'defaultNetwork' are not recorded for
1786         // network stats snapshot. See NetworkStatsRecorder#recordSnapshotLocked.
1787         return stats.addEntry(new Entry(iface, uid, SET_DEFAULT, TAG_NONE, METERED_NO,
1788                 ROAMING_NO, DEFAULT_NETWORK_NO, diff.rxBytes, diff.rxPackets,
1789                 diff.txBytes, diff.txPackets, 0L /* operations */));
1790     }
1791 
updateAlertQuota(long newQuota)1792     private void updateAlertQuota(long newQuota) {
1793         if (newQuota < QUOTA_UNLIMITED) {
1794             throw new IllegalArgumentException("invalid quota value " + newQuota);
1795         }
1796         if (mRemainingAlertQuota == newQuota) return;
1797 
1798         mRemainingAlertQuota = newQuota;
1799         if (mRemainingAlertQuota == 0) {
1800             mLog.i("onAlertReached");
1801             if (mStatsProvider != null) mStatsProvider.notifyAlertReached();
1802         }
1803     }
1804 
updateQuotaAndStatsFromSnapshot( @onNull final SparseArray<TetherStatsValue> tetherStatsList)1805     private void updateQuotaAndStatsFromSnapshot(
1806             @NonNull final SparseArray<TetherStatsValue> tetherStatsList) {
1807         long usedAlertQuota = 0;
1808         for (int i = 0; i < tetherStatsList.size(); i++) {
1809             final Integer ifIndex = tetherStatsList.keyAt(i);
1810             final TetherStatsValue tetherStats = tetherStatsList.valueAt(i);
1811             final ForwardedStats curr = new ForwardedStats(tetherStats);
1812             final ForwardedStats base = mStats.get(ifIndex);
1813             final ForwardedStats diff = (base != null) ? curr.subtract(base) : curr;
1814             usedAlertQuota += diff.rxBytes + diff.txBytes;
1815 
1816             // Update the local cache for counting tether stats delta.
1817             mStats.put(ifIndex, curr);
1818 
1819             // Update the accumulated tether stats delta to the stats provider for the service
1820             // querying.
1821             if (mStatsProvider != null) {
1822                 try {
1823                     mStatsProvider.accumulateDiff(
1824                             buildNetworkStats(StatsType.STATS_PER_IFACE, ifIndex, diff),
1825                             buildNetworkStats(StatsType.STATS_PER_UID, ifIndex, diff));
1826                 } catch (ArrayIndexOutOfBoundsException e) {
1827                     Log.wtf(TAG, "Fail to update the accumulated stats delta for interface index "
1828                             + ifIndex + " : ", e);
1829                 }
1830             }
1831         }
1832 
1833         if (mRemainingAlertQuota > 0 && usedAlertQuota > 0) {
1834             // Trim to zero if overshoot.
1835             final long newQuota = Math.max(mRemainingAlertQuota - usedAlertQuota, 0);
1836             updateAlertQuota(newQuota);
1837         }
1838 
1839         // TODO: Count the used limit quota for notifying data limit reached.
1840     }
1841 
updateForwardedStats()1842     private void updateForwardedStats() {
1843         final SparseArray<TetherStatsValue> tetherStatsList =
1844                 mBpfCoordinatorShim.tetherOffloadGetStats();
1845 
1846         if (tetherStatsList == null) {
1847             mLog.e("Problem fetching tethering stats");
1848             return;
1849         }
1850 
1851         updateQuotaAndStatsFromSnapshot(tetherStatsList);
1852     }
1853 
1854     @VisibleForTesting
getPollingInterval()1855     int getPollingInterval() {
1856         // The valid range of interval is DEFAULT_TETHER_OFFLOAD_POLL_INTERVAL_MS..max_long.
1857         // Ignore the config value is less than the minimum polling interval. Note that the
1858         // minimum interval definition is invoked as OffloadController#isPollingStatsNeeded does.
1859         // TODO: Perhaps define a minimum polling interval constant.
1860         final TetheringConfiguration config = mDeps.getTetherConfig();
1861         final int configInterval = (config != null) ? config.getOffloadPollInterval() : 0;
1862         return Math.max(DEFAULT_TETHER_OFFLOAD_POLL_INTERVAL_MS, configInterval);
1863     }
1864 
1865     @Nullable
parseIPv4Address(byte[] addrBytes)1866     private Inet4Address parseIPv4Address(byte[] addrBytes) {
1867         try {
1868             final InetAddress ia = Inet4Address.getByAddress(addrBytes);
1869             if (ia instanceof Inet4Address) return (Inet4Address) ia;
1870         } catch (UnknownHostException | IllegalArgumentException e) {
1871             mLog.e("Failed to parse IPv4 address: " + e);
1872         }
1873         return null;
1874     }
1875 
1876     // Update CTA_TUPLE_ORIG timeout for a given conntrack entry. Note that there will also be
1877     // coming a conntrack event to notify updated timeout.
updateConntrackTimeout(byte proto, Inet4Address src4, short srcPort, Inet4Address dst4, short dstPort)1878     private void updateConntrackTimeout(byte proto, Inet4Address src4, short srcPort,
1879             Inet4Address dst4, short dstPort) {
1880         if (src4 == null || dst4 == null) return;
1881 
1882         // TODO: consider acquiring the timeout setting from nf_conntrack_* variables.
1883         // - proc/sys/net/netfilter/nf_conntrack_tcp_timeout_established
1884         // - proc/sys/net/netfilter/nf_conntrack_udp_timeout_stream
1885         // See kernel document nf_conntrack-sysctl.txt.
1886         final int timeoutSec = (proto == OsConstants.IPPROTO_TCP)
1887                 ? NF_CONNTRACK_TCP_TIMEOUT_ESTABLISHED
1888                 : NF_CONNTRACK_UDP_TIMEOUT_STREAM;
1889         final byte[] msg = ConntrackMessage.newIPv4TimeoutUpdateRequest(
1890                 proto, src4, (int) srcPort, dst4, (int) dstPort, timeoutSec);
1891         try {
1892             NetlinkSocket.sendOneShotKernelMessage(OsConstants.NETLINK_NETFILTER, msg);
1893         } catch (ErrnoException e) {
1894             mLog.e("Error updating conntrack entry ("
1895                     + "proto: " + proto + ", "
1896                     + "src4: " + src4 + ", "
1897                     + "srcPort: " + Short.toUnsignedInt(srcPort) + ", "
1898                     + "dst4: " + dst4 + ", "
1899                     + "dstPort: " + Short.toUnsignedInt(dstPort) + "), "
1900                     + "msg: " + NetlinkConstants.hexify(msg) + ", "
1901                     + "e: " + e);
1902         }
1903     }
1904 
maybeRefreshConntrackTimeout()1905     private void maybeRefreshConntrackTimeout() {
1906         final long now = mDeps.elapsedRealtimeNanos();
1907 
1908         // Reverse the source and destination {address, port} from downstream value because
1909         // #updateConntrackTimeout refresh the timeout of netlink attribute CTA_TUPLE_ORIG
1910         // which is opposite direction for downstream map value.
1911         mBpfCoordinatorShim.tetherOffloadRuleForEach(DOWNSTREAM, (k, v) -> {
1912             if ((now - v.lastUsed) / 1_000_000 < POLLING_CONNTRACK_TIMEOUT_MS) {
1913                 updateConntrackTimeout((byte) k.l4proto,
1914                         ipv4MappedAddressBytesToIpv4Address(v.dst46), (short) v.dstPort,
1915                         ipv4MappedAddressBytesToIpv4Address(v.src46), (short) v.srcPort);
1916             }
1917         });
1918 
1919         // TODO: Consider ignoring TCP traffic on upstream and monitor on downstream only
1920         // because TCP is a bidirectional traffic. Probably don't need to extend timeout by
1921         // both directions for TCP.
1922         mBpfCoordinatorShim.tetherOffloadRuleForEach(UPSTREAM, (k, v) -> {
1923             if ((now - v.lastUsed) / 1_000_000 < POLLING_CONNTRACK_TIMEOUT_MS) {
1924                 updateConntrackTimeout((byte) k.l4proto, parseIPv4Address(k.src4),
1925                         (short) k.srcPort, parseIPv4Address(k.dst4), (short) k.dstPort);
1926             }
1927         });
1928     }
1929 
maybeSchedulePollingStats()1930     private void maybeSchedulePollingStats() {
1931         if (!mPollingStarted) return;
1932 
1933         if (mHandler.hasCallbacks(mScheduledPollingStats)) {
1934             mHandler.removeCallbacks(mScheduledPollingStats);
1935         }
1936 
1937         mHandler.postDelayed(mScheduledPollingStats, getPollingInterval());
1938     }
1939 
maybeSchedulePollingConntrackTimeout()1940     private void maybeSchedulePollingConntrackTimeout() {
1941         if (!mPollingStarted) return;
1942 
1943         if (mHandler.hasCallbacks(mScheduledPollingConntrackTimeout)) {
1944             mHandler.removeCallbacks(mScheduledPollingConntrackTimeout);
1945         }
1946 
1947         mHandler.postDelayed(mScheduledPollingConntrackTimeout, POLLING_CONNTRACK_TIMEOUT_MS);
1948     }
1949 
1950     // Return forwarding rule map. This is used for testing only.
1951     // Note that this can be only called on handler thread.
1952     @NonNull
1953     @VisibleForTesting
1954     final HashMap<IpServer, LinkedHashMap<Inet6Address, Ipv6ForwardingRule>>
getForwardingRulesForTesting()1955             getForwardingRulesForTesting() {
1956         return mIpv6ForwardingRules;
1957     }
1958 
1959     // Return upstream interface name map. This is used for testing only.
1960     // Note that this can be only called on handler thread.
1961     @NonNull
1962     @VisibleForTesting
getInterfaceNamesForTesting()1963     final SparseArray<String> getInterfaceNamesForTesting() {
1964         return mInterfaceNames;
1965     }
1966 
1967     // Return BPF conntrack event consumer. This is used for testing only.
1968     // Note that this can be only called on handler thread.
1969     @NonNull
1970     @VisibleForTesting
getBpfConntrackEventConsumerForTesting()1971     final BpfConntrackEventConsumer getBpfConntrackEventConsumerForTesting() {
1972         return mBpfConntrackEventConsumer;
1973     }
1974 
getBpfCounterNames()1975     private static native String[] getBpfCounterNames();
1976 }
1977