1 /* 2 * Copyright (c) 2016 Facebook, Inc. 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #pragma once 18 19 #include <cctype> 20 #include <cstdint> 21 #include <memory> 22 #include <ostream> 23 #include <string> 24 25 #include "BPFTable.h" 26 #include "bcc_exception.h" 27 #include "bcc_syms.h" 28 #include "bpf_module.h" 29 #include "linux/bpf.h" 30 #include "libbpf.h" 31 #include "table_storage.h" 32 33 static const int DEFAULT_PERF_BUFFER_PAGE_CNT = 8; 34 35 namespace ebpf { 36 37 struct open_probe_t { 38 int perf_event_fd; 39 std::string func; 40 std::vector<std::pair<int, int>>* per_cpu_fd; 41 }; 42 43 class USDT; 44 45 class BPF { 46 public: 47 static const int BPF_MAX_STACK_DEPTH = 127; 48 49 explicit BPF(unsigned int flag = 0, TableStorage* ts = nullptr, 50 bool rw_engine_enabled = bpf_module_rw_engine_enabled(), 51 const std::string &maps_ns = "", 52 bool allow_rlimit = true) flag_(flag)53 : flag_(flag), 54 bsymcache_(NULL), 55 bpf_module_(new BPFModule(flag, ts, rw_engine_enabled, maps_ns, 56 allow_rlimit)) {} 57 StatusTuple init(const std::string& bpf_program, 58 const std::vector<std::string>& cflags = {}, 59 const std::vector<USDT>& usdt = {}); 60 61 StatusTuple init_usdt(const USDT& usdt); 62 63 ~BPF(); 64 StatusTuple detach_all(); 65 66 StatusTuple attach_kprobe(const std::string& kernel_func, 67 const std::string& probe_func, 68 uint64_t kernel_func_offset = 0, 69 bpf_probe_attach_type = BPF_PROBE_ENTRY, 70 int maxactive = 0); 71 StatusTuple detach_kprobe( 72 const std::string& kernel_func, 73 bpf_probe_attach_type attach_type = BPF_PROBE_ENTRY); 74 75 StatusTuple attach_uprobe(const std::string& binary_path, 76 const std::string& symbol, 77 const std::string& probe_func, 78 uint64_t symbol_addr = 0, 79 bpf_probe_attach_type attach_type = BPF_PROBE_ENTRY, 80 pid_t pid = -1, 81 uint64_t symbol_offset = 0, 82 uint32_t ref_ctr_offset = 0); 83 StatusTuple detach_uprobe(const std::string& binary_path, 84 const std::string& symbol, uint64_t symbol_addr = 0, 85 bpf_probe_attach_type attach_type = BPF_PROBE_ENTRY, 86 pid_t pid = -1, 87 uint64_t symbol_offset = 0); 88 StatusTuple attach_usdt(const USDT& usdt, pid_t pid = -1); 89 StatusTuple attach_usdt_all(); 90 StatusTuple detach_usdt(const USDT& usdt, pid_t pid = -1); 91 StatusTuple detach_usdt_all(); 92 93 StatusTuple attach_tracepoint(const std::string& tracepoint, 94 const std::string& probe_func); 95 StatusTuple detach_tracepoint(const std::string& tracepoint); 96 97 StatusTuple attach_raw_tracepoint(const std::string& tracepoint, 98 const std::string& probe_func); 99 StatusTuple detach_raw_tracepoint(const std::string& tracepoint); 100 101 StatusTuple attach_perf_event(uint32_t ev_type, uint32_t ev_config, 102 const std::string& probe_func, 103 uint64_t sample_period, uint64_t sample_freq, 104 pid_t pid = -1, int cpu = -1, 105 int group_fd = -1); 106 StatusTuple attach_perf_event_raw(void* perf_event_attr, 107 const std::string& probe_func, 108 pid_t pid = -1, int cpu = -1, 109 int group_fd = -1, 110 unsigned long extra_flags = 0); 111 StatusTuple detach_perf_event(uint32_t ev_type, uint32_t ev_config); 112 StatusTuple detach_perf_event_raw(void* perf_event_attr); 113 std::string get_syscall_fnname(const std::string& name); 114 get_table(const std::string & name)115 BPFTable get_table(const std::string& name) { 116 TableStorage::iterator it; 117 if (bpf_module_->table_storage().Find(Path({bpf_module_->id(), name}), it)) 118 return BPFTable(it->second); 119 return BPFTable({}); 120 } 121 122 template <class ValueType> get_array_table(const std::string & name)123 BPFArrayTable<ValueType> get_array_table(const std::string& name) { 124 TableStorage::iterator it; 125 if (bpf_module_->table_storage().Find(Path({bpf_module_->id(), name}), it)) 126 return BPFArrayTable<ValueType>(it->second); 127 return BPFArrayTable<ValueType>({}); 128 } 129 130 template <class ValueType> get_percpu_array_table(const std::string & name)131 BPFPercpuArrayTable<ValueType> get_percpu_array_table( 132 const std::string& name) { 133 TableStorage::iterator it; 134 if (bpf_module_->table_storage().Find(Path({bpf_module_->id(), name}), it)) 135 return BPFPercpuArrayTable<ValueType>(it->second); 136 return BPFPercpuArrayTable<ValueType>({}); 137 } 138 139 template <class KeyType, class ValueType> get_hash_table(const std::string & name)140 BPFHashTable<KeyType, ValueType> get_hash_table(const std::string& name) { 141 TableStorage::iterator it; 142 if (bpf_module_->table_storage().Find(Path({bpf_module_->id(), name}), it)) 143 return BPFHashTable<KeyType, ValueType>(it->second); 144 return BPFHashTable<KeyType, ValueType>({}); 145 } 146 147 template <class KeyType, class ValueType> get_percpu_hash_table(const std::string & name)148 BPFPercpuHashTable<KeyType, ValueType> get_percpu_hash_table( 149 const std::string& name) { 150 TableStorage::iterator it; 151 if (bpf_module_->table_storage().Find(Path({bpf_module_->id(), name}), it)) 152 return BPFPercpuHashTable<KeyType, ValueType>(it->second); 153 return BPFPercpuHashTable<KeyType, ValueType>({}); 154 } 155 156 template <class ValueType> get_sk_storage_table(const std::string & name)157 BPFSkStorageTable<ValueType> get_sk_storage_table(const std::string& name) { 158 TableStorage::iterator it; 159 if (bpf_module_->table_storage().Find(Path({bpf_module_->id(), name}), it)) 160 return BPFSkStorageTable<ValueType>(it->second); 161 return BPFSkStorageTable<ValueType>({}); 162 } 163 164 template <class ValueType> get_inode_storage_table(const std::string & name)165 BPFInodeStorageTable<ValueType> get_inode_storage_table(const std::string& name) { 166 TableStorage::iterator it; 167 if (bpf_module_->table_storage().Find(Path({bpf_module_->id(), name}), it)) 168 return BPFInodeStorageTable<ValueType>(it->second); 169 return BPFInodeStorageTable<ValueType>({}); 170 } 171 172 template <class ValueType> get_task_storage_table(const std::string & name)173 BPFTaskStorageTable<ValueType> get_task_storage_table(const std::string& name) { 174 TableStorage::iterator it; 175 if (bpf_module_->table_storage().Find(Path({bpf_module_->id(), name}), it)) 176 return BPFTaskStorageTable<ValueType>(it->second); 177 return BPFTaskStorageTable<ValueType>({}); 178 } 179 180 template <class ValueType> get_cg_storage_table(const std::string & name)181 BPFCgStorageTable<ValueType> get_cg_storage_table(const std::string& name) { 182 TableStorage::iterator it; 183 if (bpf_module_->table_storage().Find(Path({bpf_module_->id(), name}), it)) 184 return BPFCgStorageTable<ValueType>(it->second); 185 return BPFCgStorageTable<ValueType>({}); 186 } 187 188 template <class ValueType> get_percpu_cg_storage_table(const std::string & name)189 BPFPercpuCgStorageTable<ValueType> get_percpu_cg_storage_table(const std::string& name) { 190 TableStorage::iterator it; 191 if (bpf_module_->table_storage().Find(Path({bpf_module_->id(), name}), it)) 192 return BPFPercpuCgStorageTable<ValueType>(it->second); 193 return BPFPercpuCgStorageTable<ValueType>({}); 194 } 195 196 template <class ValueType> get_queuestack_table(const std::string & name)197 BPFQueueStackTable<ValueType> get_queuestack_table(const std::string& name) { 198 TableStorage::iterator it; 199 if (bpf_module_->table_storage().Find(Path({bpf_module_->id(), name}), it)) 200 return BPFQueueStackTable<ValueType>(it->second); 201 return BPFQueueStackTable<ValueType>({}); 202 } 203 get_bsymcache(void)204 void* get_bsymcache(void) { 205 if (bsymcache_ == NULL) { 206 bsymcache_ = bcc_buildsymcache_new(); 207 } 208 return bsymcache_; 209 } 210 211 BPFProgTable get_prog_table(const std::string& name); 212 213 BPFCgroupArray get_cgroup_array(const std::string& name); 214 215 BPFDevmapTable get_devmap_table(const std::string& name); 216 217 BPFXskmapTable get_xskmap_table(const std::string& name); 218 219 BPFSockmapTable get_sockmap_table(const std::string& name); 220 221 BPFSockhashTable get_sockhash_table(const std::string& name); 222 223 BPFStackTable get_stack_table(const std::string& name, 224 bool use_debug_file = true, 225 bool check_debug_file_crc = true); 226 227 BPFStackBuildIdTable get_stackbuildid_table(const std::string &name, 228 bool use_debug_file = true, 229 bool check_debug_file_crc = true); 230 template <class KeyType> get_map_in_map_table(const std::string & name)231 BPFMapInMapTable<KeyType> get_map_in_map_table(const std::string& name){ 232 TableStorage::iterator it; 233 if (bpf_module_->table_storage().Find(Path({bpf_module_->id(), name}), it)) 234 return BPFMapInMapTable<KeyType>(it->second); 235 return BPFMapInMapTable<KeyType>({}); 236 } 237 238 bool add_module(std::string module); 239 240 StatusTuple open_perf_event(const std::string& name, uint32_t type, 241 uint64_t config); 242 243 StatusTuple close_perf_event(const std::string& name); 244 245 // Open a Perf Buffer of given name, providing callback and callback cookie 246 // to use when polling. BPF class owns the opened Perf Buffer and will free 247 // it on-demand or on destruction. 248 StatusTuple open_perf_buffer(const std::string& name, perf_reader_raw_cb cb, 249 perf_reader_lost_cb lost_cb = nullptr, 250 void* cb_cookie = nullptr, 251 int page_cnt = DEFAULT_PERF_BUFFER_PAGE_CNT); 252 // Close and free the Perf Buffer of given name. 253 StatusTuple close_perf_buffer(const std::string& name); 254 // Obtain an pointer to the opened BPFPerfBuffer instance of given name. 255 // Will return nullptr if such open Perf Buffer doesn't exist. 256 BPFPerfBuffer* get_perf_buffer(const std::string& name); 257 // Poll an opened Perf Buffer of given name with given timeout, using callback 258 // provided when opening. Do nothing if such open Perf Buffer doesn't exist. 259 // Returns: 260 // -1 on error or if perf buffer with such name doesn't exist; 261 // 0, if no data was available before timeout; 262 // number of CPUs that have new data, otherwise. 263 int poll_perf_buffer(const std::string& name, int timeout_ms = -1); 264 265 StatusTuple load_func(const std::string& func_name, enum bpf_prog_type type, 266 int& fd, unsigned flags = 0); 267 StatusTuple unload_func(const std::string& func_name); 268 269 StatusTuple attach_func(int prog_fd, int attachable_fd, 270 enum bpf_attach_type attach_type, 271 uint64_t flags); 272 StatusTuple detach_func(int prog_fd, int attachable_fd, 273 enum bpf_attach_type attach_type); 274 275 int free_bcc_memory(); 276 277 private: 278 std::string get_kprobe_event(const std::string& kernel_func, 279 bpf_probe_attach_type type); 280 std::string get_uprobe_event(const std::string& binary_path, uint64_t offset, 281 bpf_probe_attach_type type, pid_t pid); 282 283 StatusTuple attach_usdt_without_validation(const USDT& usdt, pid_t pid); 284 StatusTuple detach_usdt_without_validation(const USDT& usdt, pid_t pid); 285 286 StatusTuple detach_kprobe_event(const std::string& event, open_probe_t& attr); 287 StatusTuple detach_uprobe_event(const std::string& event, open_probe_t& attr); 288 StatusTuple detach_tracepoint_event(const std::string& tracepoint, 289 open_probe_t& attr); 290 StatusTuple detach_raw_tracepoint_event(const std::string& tracepoint, 291 open_probe_t& attr); 292 StatusTuple detach_perf_event_all_cpu(open_probe_t& attr); 293 attach_type_debug(bpf_probe_attach_type type)294 std::string attach_type_debug(bpf_probe_attach_type type) { 295 switch (type) { 296 case BPF_PROBE_ENTRY: 297 return ""; 298 case BPF_PROBE_RETURN: 299 return "return "; 300 } 301 return "ERROR"; 302 } 303 attach_type_prefix(bpf_probe_attach_type type)304 std::string attach_type_prefix(bpf_probe_attach_type type) { 305 switch (type) { 306 case BPF_PROBE_ENTRY: 307 return "p"; 308 case BPF_PROBE_RETURN: 309 return "r"; 310 } 311 return "ERROR"; 312 } 313 kprobe_event_validator(char c)314 static bool kprobe_event_validator(char c) { 315 return (c != '+') && (c != '.'); 316 } 317 uprobe_path_validator(char c)318 static bool uprobe_path_validator(char c) { 319 return std::isalpha(c) || std::isdigit(c) || (c == '_'); 320 } 321 322 StatusTuple check_binary_symbol(const std::string& binary_path, 323 const std::string& symbol, 324 uint64_t symbol_addr, std::string& module_res, 325 uint64_t& offset_res, 326 uint64_t symbol_offset = 0); 327 328 void init_fail_reset(); 329 330 int flag_; 331 332 void *bsymcache_; 333 334 std::unique_ptr<std::string> syscall_prefix_; 335 336 std::unique_ptr<BPFModule> bpf_module_; 337 338 std::map<std::string, int> funcs_; 339 340 std::vector<USDT> usdt_; 341 std::string all_bpf_program_; 342 343 std::map<std::string, open_probe_t> kprobes_; 344 std::map<std::string, open_probe_t> uprobes_; 345 std::map<std::string, open_probe_t> tracepoints_; 346 std::map<std::string, open_probe_t> raw_tracepoints_; 347 std::map<std::string, BPFPerfBuffer*> perf_buffers_; 348 std::map<std::string, BPFPerfEventArray*> perf_event_arrays_; 349 std::map<std::pair<uint32_t, uint32_t>, open_probe_t> perf_events_; 350 }; 351 352 class USDT { 353 public: 354 USDT(const std::string& binary_path, const std::string& provider, 355 const std::string& name, const std::string& probe_func); 356 USDT(pid_t pid, const std::string& provider, const std::string& name, 357 const std::string& probe_func); 358 USDT(const std::string& binary_path, pid_t pid, const std::string& provider, 359 const std::string& name, const std::string& probe_func); 360 USDT(const USDT& usdt); 361 USDT(USDT&& usdt) noexcept; 362 binary_path()363 const std::string &binary_path() const { return binary_path_; } pid()364 pid_t pid() const { return pid_; } provider()365 const std::string &provider() const { return provider_; } name()366 const std::string &name() const { return name_; } probe_func()367 const std::string &probe_func() const { return probe_func_; } 368 369 StatusTuple init(); 370 371 bool operator==(const USDT& other) const; 372 print_name()373 std::string print_name() const { 374 return provider_ + ":" + name_ + " from binary " + binary_path_ + " PID " + 375 std::to_string(pid_) + " for probe " + probe_func_; 376 } 377 378 friend std::ostream& operator<<(std::ostream& out, const USDT& usdt) { 379 return out << usdt.provider_ << ":" << usdt.name_ << " from binary " 380 << usdt.binary_path_ << " PID " << usdt.pid_ << " for probe " 381 << usdt.probe_func_; 382 } 383 384 // When the kludge flag is set to 1 (default), we will only match on inode 385 // when searching for modules in /proc/PID/maps that might contain the 386 // tracepoint we're looking for. 387 // By setting this to 0, we will match on both inode and 388 // (dev_major, dev_minor), which is a more accurate way to uniquely 389 // identify a file, but may fail depending on the filesystem backing the 390 // target file (see bcc#2715) 391 // 392 // This hack exists because btrfs and overlayfs report different device 393 // numbers for files in /proc/PID/maps vs stat syscall. Don't use it unless 394 // you've had issues with inode collisions. Both btrfs and overlayfs are 395 // known to require inode-only resolution to accurately match a file. 396 // 397 // set_probe_matching_kludge(0) must be called before USDTs are submitted to 398 // BPF::init() 399 int set_probe_matching_kludge(uint8_t kludge); 400 401 private: 402 bool initialized_; 403 404 std::string binary_path_; 405 pid_t pid_; 406 407 std::string provider_; 408 std::string name_; 409 std::string probe_func_; 410 411 std::unique_ptr<void, std::function<void(void*)>> probe_; 412 std::string program_text_; 413 414 uint8_t mod_match_inode_only_; 415 416 friend class BPF; 417 }; 418 419 } // namespace ebpf 420