• Home
  • Raw
  • Download

Lines Matching +full:ftrace +full:- +full:size

1 // SPDX-License-Identifier: GPL-2.0
3 * Infrastructure for profiling code inserted by 'gcc -pg'.
5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
8 * Originally ported from the -rt patch by:
13 * Copyright (C) 2004-2006 Ingo Molnar
29 #include <linux/ftrace.h>
87 /* ftrace_enabled is a method to turn ftrace on or off */
100 if (!(ops->flags & FTRACE_OPS_FL_PID) || !ops->private) in ftrace_pids_enabled()
103 tr = ops->private; in ftrace_pids_enabled()
105 return tr->function_pids != NULL || tr->function_no_pids != NULL; in ftrace_pids_enabled()
134 if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) { in ftrace_ops_init()
135 mutex_init(&ops->local_hash.regex_lock); in ftrace_ops_init()
136 ops->func_hash = &ops->local_hash; in ftrace_ops_init()
137 ops->flags |= FTRACE_OPS_FL_INITIALIZED; in ftrace_ops_init()
145 struct trace_array *tr = op->private; in ftrace_pid_func()
149 pid = this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid); in ftrace_pid_func()
153 pid != current->pid) in ftrace_pid_func()
157 op->saved_func(ip, parent_ip, op, regs); in ftrace_pid_func()
172 if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_RCU) || in ftrace_ops_get_list_func()
200 } else if (rcu_dereference_protected(ftrace_ops_list->next, in update_ftrace_function()
261 rcu_assign_pointer(ops->next, *list); in add_ftrace_ops()
266 * the ops->next pointer is valid before another CPU sees in add_ftrace_ops()
283 rcu_dereference_protected(ops->next, in remove_ftrace_ops()
289 for (p = list; *p != &ftrace_list_end; p = &(*p)->next) in remove_ftrace_ops()
294 return -1; in remove_ftrace_ops()
296 *p = (*p)->next; in remove_ftrace_ops()
304 if (ops->flags & FTRACE_OPS_FL_DELETED) in __register_ftrace_function()
305 return -EINVAL; in __register_ftrace_function()
307 if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED)) in __register_ftrace_function()
308 return -EBUSY; in __register_ftrace_function()
316 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS && in __register_ftrace_function()
317 !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)) in __register_ftrace_function()
318 return -EINVAL; in __register_ftrace_function()
320 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED) in __register_ftrace_function()
321 ops->flags |= FTRACE_OPS_FL_SAVE_REGS; in __register_ftrace_function()
323 if (!ftrace_enabled && (ops->flags & FTRACE_OPS_FL_PERMANENT)) in __register_ftrace_function()
324 return -EBUSY; in __register_ftrace_function()
327 ops->flags |= FTRACE_OPS_FL_DYNAMIC; in __register_ftrace_function()
332 ops->saved_func = ops->func; in __register_ftrace_function()
335 ops->func = ftrace_pid_func; in __register_ftrace_function()
349 if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED))) in __unregister_ftrace_function()
350 return -EBUSY; in __unregister_ftrace_function()
360 ops->func = ops->saved_func; in __unregister_ftrace_function()
374 if (op->flags & FTRACE_OPS_FL_PID) { in ftrace_update_pid_func()
375 op->func = ftrace_pids_enabled(op) ? in ftrace_update_pid_func()
376 ftrace_pid_func : op->saved_func; in ftrace_update_pid_func()
410 (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
417 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
437 if ((void *)rec >= (void *)&pg->records[pg->index]) { in function_stat_next()
438 pg = pg->next; in function_stat_next()
441 rec = &pg->records[0]; in function_stat_next()
442 if (!rec->counter) in function_stat_next()
454 if (!stat || !stat->start) in function_stat_start()
457 return function_stat_next(&stat->start->records[0], 0); in function_stat_start()
467 if (a->time < b->time) in function_stat_cmp()
468 return -1; in function_stat_cmp()
469 if (a->time > b->time) in function_stat_cmp()
481 if (a->counter < b->counter) in function_stat_cmp()
482 return -1; in function_stat_cmp()
483 if (a->counter > b->counter) in function_stat_cmp()
495 " -------- " in function_stat_headers()
496 "--- ---- --- ---\n"); in function_stat_headers()
499 " -------- ---\n"); in function_stat_headers()
517 if (unlikely(rec->counter == 0)) { in function_stat_show()
518 ret = -EBUSY; in function_stat_show()
523 avg = div64_ul(rec->time, rec->counter); in function_stat_show()
528 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); in function_stat_show()
529 seq_printf(m, " %-30.30s %10lu", str, rec->counter); in function_stat_show()
535 if (rec->counter <= 1) in function_stat_show()
540 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2) in function_stat_show()
542 stddev = rec->counter * rec->time_squared - in function_stat_show()
543 rec->time * rec->time; in function_stat_show()
546 * Divide only 1000 for ns^2 -> us^2 conversion. in function_stat_show()
550 rec->counter * (rec->counter - 1) * 1000); in function_stat_show()
554 trace_print_graph_duration(rec->time, &s); in function_stat_show()
572 pg = stat->pages = stat->start; in ftrace_profile_reset()
575 memset(pg->records, 0, PROFILE_RECORDS_SIZE); in ftrace_profile_reset()
576 pg->index = 0; in ftrace_profile_reset()
577 pg = pg->next; in ftrace_profile_reset()
580 memset(stat->hash, 0, in ftrace_profile_reset()
592 if (stat->pages) in ftrace_profile_pages_init()
595 stat->pages = (void *)get_zeroed_page(GFP_KERNEL); in ftrace_profile_pages_init()
596 if (!stat->pages) in ftrace_profile_pages_init()
597 return -ENOMEM; in ftrace_profile_pages_init()
612 pg = stat->start = stat->pages; in ftrace_profile_pages_init()
617 pg->next = (void *)get_zeroed_page(GFP_KERNEL); in ftrace_profile_pages_init()
618 if (!pg->next) in ftrace_profile_pages_init()
620 pg = pg->next; in ftrace_profile_pages_init()
626 pg = stat->start; in ftrace_profile_pages_init()
630 pg = pg->next; in ftrace_profile_pages_init()
634 stat->pages = NULL; in ftrace_profile_pages_init()
635 stat->start = NULL; in ftrace_profile_pages_init()
637 return -ENOMEM; in ftrace_profile_pages_init()
643 int size; in ftrace_profile_init_cpu() local
647 if (stat->hash) { in ftrace_profile_init_cpu()
657 size = FTRACE_PROFILE_HASH_SIZE; in ftrace_profile_init_cpu()
659 stat->hash = kcalloc(size, sizeof(struct hlist_head), GFP_KERNEL); in ftrace_profile_init_cpu()
661 if (!stat->hash) in ftrace_profile_init_cpu()
662 return -ENOMEM; in ftrace_profile_init_cpu()
666 kfree(stat->hash); in ftrace_profile_init_cpu()
667 stat->hash = NULL; in ftrace_profile_init_cpu()
668 return -ENOMEM; in ftrace_profile_init_cpu()
697 hhd = &stat->hash[key]; in ftrace_find_profiled_func()
703 if (rec->ip == ip) in ftrace_find_profiled_func()
715 key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS); in ftrace_add_profile()
716 hlist_add_head_rcu(&rec->node, &stat->hash[key]); in ftrace_add_profile()
728 if (atomic_inc_return(&stat->disabled) != 1) in ftrace_profile_alloc()
739 if (stat->pages->index == PROFILES_PER_PAGE) { in ftrace_profile_alloc()
740 if (!stat->pages->next) in ftrace_profile_alloc()
742 stat->pages = stat->pages->next; in ftrace_profile_alloc()
745 rec = &stat->pages->records[stat->pages->index++]; in ftrace_profile_alloc()
746 rec->ip = ip; in ftrace_profile_alloc()
750 atomic_dec(&stat->disabled); in ftrace_profile_alloc()
769 if (!stat->hash || !ftrace_profile_enabled) in function_profile_call()
779 rec->counter++; in function_profile_call()
796 function_profile_call(trace->func, 0, NULL, NULL); in profile_graph_entry()
799 if (!current->ret_stack) in profile_graph_entry()
804 ret_stack->subtime = 0; in profile_graph_entry()
819 if (!stat->hash || !ftrace_profile_enabled) in profile_graph_return()
823 if (!trace->calltime) in profile_graph_return()
826 calltime = trace->rettime - trace->calltime; in profile_graph_return()
833 ret_stack->subtime += calltime; in profile_graph_return()
836 if (ret_stack && ret_stack->subtime < calltime) in profile_graph_return()
837 calltime -= ret_stack->subtime; in profile_graph_return()
842 rec = ftrace_find_profiled_func(stat, trace->func); in profile_graph_return()
844 rec->time += calltime; in profile_graph_return()
845 rec->time_squared += calltime * calltime; in profile_graph_return()
979 stat->stat = function_stats; in ftrace_profile_tracefs()
980 stat->stat.name = name; in ftrace_profile_tracefs()
981 ret = register_stat_tracer(&stat->stat); in ftrace_profile_tracefs()
1014 # error Dynamic ftrace depends on MCOUNT_RECORD
1049 * Used by the stack undwinder to know about dynamic ftrace trampolines.
1067 if (op->trampoline && op->trampoline_size) in ftrace_ops_trampoline()
1068 if (addr >= op->trampoline && in ftrace_ops_trampoline()
1069 addr < op->trampoline + op->trampoline_size) { in ftrace_ops_trampoline()
1106 if (hash->size_bits > 0) in ftrace_hash_key()
1107 return hash_long(ip, hash->size_bits); in ftrace_hash_key()
1121 hhd = &hash->buckets[key]; in __ftrace_lookup_ip()
1124 if (entry->ip == ip) in __ftrace_lookup_ip()
1131 * ftrace_lookup_ip - Test to see if an ip exists in an ftrace_hash
1155 key = ftrace_hash_key(hash, entry->ip); in __add_hash_entry()
1156 hhd = &hash->buckets[key]; in __add_hash_entry()
1157 hlist_add_head(&entry->hlist, hhd); in __add_hash_entry()
1158 hash->count++; in __add_hash_entry()
1167 return -ENOMEM; in add_hash_entry()
1169 entry->ip = ip; in add_hash_entry()
1179 hlist_del(&entry->hlist); in free_hash_entry()
1181 hash->count--; in free_hash_entry()
1188 hlist_del_rcu(&entry->hlist); in remove_hash_entry()
1189 hash->count--; in remove_hash_entry()
1197 int size = 1 << hash->size_bits; in ftrace_hash_clear() local
1200 if (!hash->count) in ftrace_hash_clear()
1203 for (i = 0; i < size; i++) { in ftrace_hash_clear()
1204 hhd = &hash->buckets[i]; in ftrace_hash_clear()
1208 FTRACE_WARN_ON(hash->count); in ftrace_hash_clear()
1213 list_del(&ftrace_mod->list); in free_ftrace_mod()
1214 kfree(ftrace_mod->module); in free_ftrace_mod()
1215 kfree(ftrace_mod->func); in free_ftrace_mod()
1238 kfree(hash->buckets); in free_ftrace_hash()
1254 call_rcu(&hash->rcu, __free_ftrace_hash_rcu); in free_ftrace_hash_rcu()
1260 free_ftrace_hash(ops->func_hash->filter_hash); in ftrace_free_filter()
1261 free_ftrace_hash(ops->func_hash->notrace_hash); in ftrace_free_filter()
1267 int size; in alloc_ftrace_hash() local
1273 size = 1 << size_bits; in alloc_ftrace_hash()
1274 hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL); in alloc_ftrace_hash()
1276 if (!hash->buckets) { in alloc_ftrace_hash()
1281 hash->size_bits = size_bits; in alloc_ftrace_hash()
1292 struct list_head *mod_head = enable ? &tr->mod_trace : &tr->mod_notrace; in ftrace_add_mod()
1296 return -ENOMEM; in ftrace_add_mod()
1298 INIT_LIST_HEAD(&ftrace_mod->list); in ftrace_add_mod()
1299 ftrace_mod->func = kstrdup(func, GFP_KERNEL); in ftrace_add_mod()
1300 ftrace_mod->module = kstrdup(module, GFP_KERNEL); in ftrace_add_mod()
1301 ftrace_mod->enable = enable; in ftrace_add_mod()
1303 if (!ftrace_mod->func || !ftrace_mod->module) in ftrace_add_mod()
1306 list_add(&ftrace_mod->list, mod_head); in ftrace_add_mod()
1313 return -ENOMEM; in ftrace_add_mod()
1321 int size; in alloc_and_copy_ftrace_hash() local
1330 new_hash->flags = hash->flags; in alloc_and_copy_ftrace_hash()
1336 size = 1 << hash->size_bits; in alloc_and_copy_ftrace_hash()
1337 for (i = 0; i < size; i++) { in alloc_and_copy_ftrace_hash()
1338 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { in alloc_and_copy_ftrace_hash()
1339 ret = add_hash_entry(new_hash, entry->ip); in alloc_and_copy_ftrace_hash()
1345 FTRACE_WARN_ON(new_hash->count != hash->count); in alloc_and_copy_ftrace_hash()
1362 static struct ftrace_hash *dup_hash(struct ftrace_hash *src, int size) in dup_hash() argument
1372 * Use around half the size (max bit of it), but in dup_hash()
1373 * a minimum of 2 is fine (as size of 0 or 1 both give 1 for bits). in dup_hash()
1375 bits = fls(size / 2); in dup_hash()
1385 new_hash->flags = src->flags; in dup_hash()
1387 size = 1 << src->size_bits; in dup_hash()
1388 for (i = 0; i < size; i++) { in dup_hash()
1389 hhd = &src->buckets[i]; in dup_hash()
1401 int size = src->count; in __ftrace_hash_move() local
1409 return dup_hash(src, size); in __ftrace_hash_move()
1420 if (ops->flags & FTRACE_OPS_FL_IPMODIFY && !enable) in ftrace_hash_move()
1421 return -EINVAL; in ftrace_hash_move()
1425 return -ENOMEM; in ftrace_hash_move()
1459 return (ftrace_hash_empty(hash->filter_hash) || in hash_contains_ip()
1460 __ftrace_lookup_ip(hash->filter_hash, ip)) && in hash_contains_ip()
1461 (ftrace_hash_empty(hash->notrace_hash) || in hash_contains_ip()
1462 !__ftrace_lookup_ip(hash->notrace_hash, ip)); in hash_contains_ip()
1467 * the ops->func or not.
1469 * It's a match if the ip is in the ops->filter_hash or
1472 * the ip is not in the ops->notrace_hash.
1485 * There's a small race when adding ops that the ftrace handler in ftrace_ops_test()
1489 if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS)) in ftrace_ops_test()
1493 rcu_assign_pointer(hash.filter_hash, ops->func_hash->filter_hash); in ftrace_ops_test()
1494 rcu_assign_pointer(hash.notrace_hash, ops->func_hash->notrace_hash); in ftrace_ops_test()
1509 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
1511 for (_____i = 0; _____i < pg->index; _____i++) { \
1512 rec = &pg->records[_____i];
1524 if (key->flags < rec->ip) in ftrace_cmp_recs()
1525 return -1; in ftrace_cmp_recs()
1526 if (key->ip >= rec->ip + MCOUNT_INSN_SIZE) in ftrace_cmp_recs()
1540 for (pg = ftrace_pages_start; pg; pg = pg->next) { in lookup_rec()
1541 if (pg->index == 0 || in lookup_rec()
1542 end < pg->records[0].ip || in lookup_rec()
1543 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE)) in lookup_rec()
1545 rec = bsearch(&key, pg->records, pg->index, in lookup_rec()
1555 * ftrace_location_range - return the first address of a traced location
1561 * Returns rec->ip if the related ftrace location is a least partly within
1563 * that is either a NOP or call to the function tracer. It checks the ftrace
1574 ip = rec->ip; in ftrace_location_range()
1581 * ftrace_location - return true if the ip giving is a traced location
1584 * Returns rec->ip if @ip given is a pointer to a ftrace location.
1586 * the function tracer. It checks the ftrace internal tables to
1595 * ftrace_text_reserved - return true if range contains an ftrace location
1599 * Returns 1 if @start and @end contains a ftrace location.
1601 * the function tracer. It checks the ftrace internal tables to
1621 ops != &ftrace_list_end; ops = ops->next) { in test_rec_ops_needs_regs()
1622 /* pass rec in as regs to have non-NULL val */ in test_rec_ops_needs_regs()
1623 if (ftrace_ops_test(ops, rec->ip, rec)) { in test_rec_ops_needs_regs()
1624 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) { in test_rec_ops_needs_regs()
1654 if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) in __ftrace_hash_rec_update()
1669 hash = ops->func_hash->filter_hash; in __ftrace_hash_rec_update()
1670 other_hash = ops->func_hash->notrace_hash; in __ftrace_hash_rec_update()
1675 hash = ops->func_hash->notrace_hash; in __ftrace_hash_rec_update()
1676 other_hash = ops->func_hash->filter_hash; in __ftrace_hash_rec_update()
1690 if (rec->flags & FTRACE_FL_DISABLED) in __ftrace_hash_rec_update()
1698 if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip)) in __ftrace_hash_rec_update()
1701 in_hash = !!ftrace_lookup_ip(hash, rec->ip); in __ftrace_hash_rec_update()
1702 in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip); in __ftrace_hash_rec_update()
1724 rec->flags++; in __ftrace_hash_rec_update()
1728 if (ops->flags & FTRACE_OPS_FL_DIRECT) in __ftrace_hash_rec_update()
1729 rec->flags |= FTRACE_FL_DIRECT; in __ftrace_hash_rec_update()
1736 if (ftrace_rec_count(rec) == 1 && ops->trampoline) in __ftrace_hash_rec_update()
1737 rec->flags |= FTRACE_FL_TRAMP; in __ftrace_hash_rec_update()
1745 rec->flags &= ~FTRACE_FL_TRAMP; in __ftrace_hash_rec_update()
1751 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) in __ftrace_hash_rec_update()
1752 rec->flags |= FTRACE_FL_REGS; in __ftrace_hash_rec_update()
1756 rec->flags--; in __ftrace_hash_rec_update()
1764 if (ops->flags & FTRACE_OPS_FL_DIRECT) in __ftrace_hash_rec_update()
1765 rec->flags &= ~FTRACE_FL_DIRECT; in __ftrace_hash_rec_update()
1774 rec->flags & FTRACE_FL_REGS && in __ftrace_hash_rec_update()
1775 ops->flags & FTRACE_OPS_FL_SAVE_REGS) { in __ftrace_hash_rec_update()
1777 rec->flags &= ~FTRACE_FL_REGS; in __ftrace_hash_rec_update()
1789 rec->flags |= FTRACE_FL_TRAMP; in __ftrace_hash_rec_update()
1791 rec->flags &= ~FTRACE_FL_TRAMP; in __ftrace_hash_rec_update()
1804 if (!all && count == hash->count) in __ftrace_hash_rec_update()
1830 if (ops->func_hash != &global_ops.local_hash) in ftrace_hash_rec_update_modify()
1841 if (op->func_hash == &global_ops.local_hash) in ftrace_hash_rec_update_modify()
1860 * or no-needed to update, -EBUSY if it detects a conflict of the flag
1861 * on a ftrace_rec, and -EINVAL if the new_hash tries to trace all recs.
1863 * - If the hash is NULL, it hits all recs (if IPMODIFY is set, this is rejected)
1864 * - If the hash is EMPTY_HASH, it hits nothing
1865 * - Anything else hits the recs which match the hash entries.
1876 if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) in __ftrace_hash_update_ipmodify()
1879 if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY)) in __ftrace_hash_update_ipmodify()
1887 return -EINVAL; in __ftrace_hash_update_ipmodify()
1889 /* Update rec->flags */ in __ftrace_hash_update_ipmodify()
1892 if (rec->flags & FTRACE_FL_DISABLED) in __ftrace_hash_update_ipmodify()
1896 in_old = !!ftrace_lookup_ip(old_hash, rec->ip); in __ftrace_hash_update_ipmodify()
1897 in_new = !!ftrace_lookup_ip(new_hash, rec->ip); in __ftrace_hash_update_ipmodify()
1903 if (rec->flags & FTRACE_FL_IPMODIFY) in __ftrace_hash_update_ipmodify()
1905 rec->flags |= FTRACE_FL_IPMODIFY; in __ftrace_hash_update_ipmodify()
1907 rec->flags &= ~FTRACE_FL_IPMODIFY; in __ftrace_hash_update_ipmodify()
1918 if (rec->flags & FTRACE_FL_DISABLED) in __ftrace_hash_update_ipmodify()
1924 in_old = !!ftrace_lookup_ip(old_hash, rec->ip); in __ftrace_hash_update_ipmodify()
1925 in_new = !!ftrace_lookup_ip(new_hash, rec->ip); in __ftrace_hash_update_ipmodify()
1930 rec->flags &= ~FTRACE_FL_IPMODIFY; in __ftrace_hash_update_ipmodify()
1932 rec->flags |= FTRACE_FL_IPMODIFY; in __ftrace_hash_update_ipmodify()
1936 return -EBUSY; in __ftrace_hash_update_ipmodify()
1941 struct ftrace_hash *hash = ops->func_hash->filter_hash; in ftrace_hash_ipmodify_enable()
1952 struct ftrace_hash *hash = ops->func_hash->filter_hash; in ftrace_hash_ipmodify_disable()
1963 struct ftrace_hash *old_hash = ops->func_hash->filter_hash; in ftrace_hash_ipmodify_update()
1999 pr_info("Initializing ftrace call sites\n"); in print_bug_type()
2002 pr_info("Setting ftrace call site to NOP\n"); in print_bug_type()
2005 pr_info("Setting ftrace call site to call ftrace function\n"); in print_bug_type()
2008 pr_info("Updating ftrace call site to call a different ftrace function\n"); in print_bug_type()
2014 * ftrace_bug - report and shutdown function tracer
2021 * EFAULT - if the problem happens on reading the @ip address
2022 * EINVAL - if what is read at @ip is not what was expected
2023 * EPERM - if the problem happens on writing to the @ip address
2027 unsigned long ip = rec ? rec->ip : 0; in ftrace_bug()
2029 pr_info("------------[ ftrace bug ]------------\n"); in ftrace_bug()
2032 case -EFAULT: in ftrace_bug()
2033 pr_info("ftrace faulted on modifying "); in ftrace_bug()
2036 case -EINVAL: in ftrace_bug()
2037 pr_info("ftrace failed to modify "); in ftrace_bug()
2046 case -EPERM: in ftrace_bug()
2047 pr_info("ftrace faulted on writing "); in ftrace_bug()
2051 pr_info("ftrace faulted on unknown error "); in ftrace_bug()
2058 pr_info("ftrace record flags: %lx\n", rec->flags); in ftrace_bug()
2060 rec->flags & FTRACE_FL_REGS ? " R" : " "); in ftrace_bug()
2061 if (rec->flags & FTRACE_FL_TRAMP_EN) { in ftrace_bug()
2066 (void *)ops->trampoline, in ftrace_bug()
2067 (void *)ops->func); in ftrace_bug()
2087 if (rec->flags & FTRACE_FL_DISABLED) in ftrace_check_record()
2111 if (!(rec->flags & FTRACE_FL_REGS) != in ftrace_check_record()
2112 !(rec->flags & FTRACE_FL_REGS_EN)) in ftrace_check_record()
2115 if (!(rec->flags & FTRACE_FL_TRAMP) != in ftrace_check_record()
2116 !(rec->flags & FTRACE_FL_TRAMP_EN)) in ftrace_check_record()
2130 if (!(rec->flags & FTRACE_FL_DIRECT) != in ftrace_check_record()
2131 !(rec->flags & FTRACE_FL_DIRECT_EN)) in ftrace_check_record()
2133 } else if (rec->flags & FTRACE_FL_DIRECT_EN) { in ftrace_check_record()
2139 if ((rec->flags & FTRACE_FL_ENABLED) == flag) in ftrace_check_record()
2144 flag ^= rec->flags & FTRACE_FL_ENABLED; in ftrace_check_record()
2147 rec->flags |= FTRACE_FL_ENABLED; in ftrace_check_record()
2149 if (rec->flags & FTRACE_FL_REGS) in ftrace_check_record()
2150 rec->flags |= FTRACE_FL_REGS_EN; in ftrace_check_record()
2152 rec->flags &= ~FTRACE_FL_REGS_EN; in ftrace_check_record()
2155 if (rec->flags & FTRACE_FL_TRAMP) in ftrace_check_record()
2156 rec->flags |= FTRACE_FL_TRAMP_EN; in ftrace_check_record()
2158 rec->flags &= ~FTRACE_FL_TRAMP_EN; in ftrace_check_record()
2164 * directly (no ftrace trampoline). in ftrace_check_record()
2167 if (rec->flags & FTRACE_FL_DIRECT) in ftrace_check_record()
2168 rec->flags |= FTRACE_FL_DIRECT_EN; in ftrace_check_record()
2170 rec->flags &= ~FTRACE_FL_DIRECT_EN; in ftrace_check_record()
2176 rec->flags &= ~FTRACE_FL_DIRECT_EN; in ftrace_check_record()
2186 * from the save regs, to a non-save regs function or in ftrace_check_record()
2201 rec->flags = 0; in ftrace_check_record()
2207 rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN | in ftrace_check_record()
2246 unsigned long ip = rec->ip; in ftrace_find_tramp_ops_any()
2250 if (!op->trampoline) in ftrace_find_tramp_ops_any()
2253 if (hash_contains_ip(ip, op->func_hash)) in ftrace_find_tramp_ops_any()
2264 unsigned long ip = rec->ip; in ftrace_find_tramp_ops_any_other()
2268 if (op == op_exclude || !op->trampoline) in ftrace_find_tramp_ops_any_other()
2271 if (hash_contains_ip(ip, op->func_hash)) in ftrace_find_tramp_ops_any_other()
2282 unsigned long ip = rec->ip; in ftrace_find_tramp_ops_next()
2286 if (!op->trampoline) in ftrace_find_tramp_ops_next()
2289 if (hash_contains_ip(ip, op->func_hash)) in ftrace_find_tramp_ops_next()
2300 unsigned long ip = rec->ip; in ftrace_find_tramp_ops_curr()
2309 if (hash_contains_ip(ip, &removed_ops->old_hash)) in ftrace_find_tramp_ops_curr()
2333 if (!op->trampoline) in ftrace_find_tramp_ops_curr()
2340 if (op->flags & FTRACE_OPS_FL_ADDING) in ftrace_find_tramp_ops_curr()
2349 if ((op->flags & FTRACE_OPS_FL_MODIFYING) && in ftrace_find_tramp_ops_curr()
2350 hash_contains_ip(ip, &op->old_hash)) in ftrace_find_tramp_ops_curr()
2357 if (!(op->flags & FTRACE_OPS_FL_MODIFYING) && in ftrace_find_tramp_ops_curr()
2358 hash_contains_ip(ip, op->func_hash)) in ftrace_find_tramp_ops_curr()
2370 unsigned long ip = rec->ip; in ftrace_find_tramp_ops_new()
2373 /* pass rec in as regs to have non-NULL val */ in ftrace_find_tramp_ops_new()
2374 if (hash_contains_ip(ip, op->func_hash)) in ftrace_find_tramp_ops_new()
2399 return entry->direct; in ftrace_find_rec_direct()
2431 * ftrace_get_addr_new - Get the call address to set to
2432 * @rec: The ftrace record descriptor
2445 if ((rec->flags & FTRACE_FL_DIRECT) && in ftrace_get_addr_new()
2447 addr = ftrace_find_rec_direct(rec->ip); in ftrace_get_addr_new()
2454 if (rec->flags & FTRACE_FL_TRAMP) { in ftrace_get_addr_new()
2456 if (FTRACE_WARN_ON(!ops || !ops->trampoline)) { in ftrace_get_addr_new()
2458 (void *)rec->ip, (void *)rec->ip, rec->flags); in ftrace_get_addr_new()
2459 /* Ftrace is shutting down, return anything */ in ftrace_get_addr_new()
2462 return ops->trampoline; in ftrace_get_addr_new()
2465 if (rec->flags & FTRACE_FL_REGS) in ftrace_get_addr_new()
2472 * ftrace_get_addr_curr - Get the call address that is already there
2473 * @rec: The ftrace record descriptor
2487 if (rec->flags & FTRACE_FL_DIRECT_EN) { in ftrace_get_addr_curr()
2488 addr = ftrace_find_rec_direct(rec->ip); in ftrace_get_addr_curr()
2495 if (rec->flags & FTRACE_FL_TRAMP_EN) { in ftrace_get_addr_curr()
2499 (void *)rec->ip, (void *)rec->ip); in ftrace_get_addr_curr()
2500 /* Ftrace is shutting down, return anything */ in ftrace_get_addr_curr()
2503 return ops->trampoline; in ftrace_get_addr_curr()
2506 if (rec->flags & FTRACE_FL_REGS_EN) in ftrace_get_addr_curr()
2545 return -1; /* unknown ftrace bug */ in __ftrace_replace_code()
2561 if (rec->flags & FTRACE_FL_DISABLED) in ftrace_replace_code()
2598 iter->pg = ftrace_pages_start; in ftrace_rec_iter_start()
2599 iter->index = 0; in ftrace_rec_iter_start()
2602 while (iter->pg && !iter->pg->index) in ftrace_rec_iter_start()
2603 iter->pg = iter->pg->next; in ftrace_rec_iter_start()
2605 if (!iter->pg) in ftrace_rec_iter_start()
2619 iter->index++; in ftrace_rec_iter_next()
2621 if (iter->index >= iter->pg->index) { in ftrace_rec_iter_next()
2622 iter->pg = iter->pg->next; in ftrace_rec_iter_next()
2623 iter->index = 0; in ftrace_rec_iter_next()
2626 while (iter->pg && !iter->pg->index) in ftrace_rec_iter_next()
2627 iter->pg = iter->pg->next; in ftrace_rec_iter_next()
2630 if (!iter->pg) in ftrace_rec_iter_next()
2644 return &iter->pg->records[iter->index]; in ftrace_rec_iter_record()
2741 * @command: The command to tell ftrace what to do
2787 ops->flags |= FTRACE_OPS_FL_MODIFYING; in ftrace_run_modify_code()
2788 ops->old_hash.filter_hash = old_hash->filter_hash; in ftrace_run_modify_code()
2789 ops->old_hash.notrace_hash = old_hash->notrace_hash; in ftrace_run_modify_code()
2791 ops->old_hash.filter_hash = NULL; in ftrace_run_modify_code()
2792 ops->old_hash.notrace_hash = NULL; in ftrace_run_modify_code()
2793 ops->flags &= ~FTRACE_OPS_FL_MODIFYING; in ftrace_run_modify_code()
2809 list_add_rcu(&ops->list, &ftrace_ops_trampoline_list); in ftrace_add_trampoline_to_kallsyms()
2815 list_del_rcu(&ops->list); in ftrace_remove_trampoline_from_kallsyms()
2821 * for pages allocated for ftrace purposes, even though "__builtin__ftrace" is
2829 if (ops && (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP) && in ftrace_trampoline_free()
2830 ops->trampoline) { in ftrace_trampoline_free()
2835 perf_event_text_poke((void *)ops->trampoline, in ftrace_trampoline_free()
2836 (void *)ops->trampoline, in ftrace_trampoline_free()
2837 ops->trampoline_size, NULL, 0); in ftrace_trampoline_free()
2839 ops->trampoline, ops->trampoline_size, in ftrace_trampoline_free()
2873 return -ENODEV; in ftrace_startup()
2882 * Note that ftrace probes uses this to start up in ftrace_startup()
2889 ops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_ADDING; in ftrace_startup()
2895 ftrace_start_up--; in ftrace_startup()
2896 ops->flags &= ~FTRACE_OPS_FL_ENABLED; in ftrace_startup()
2897 if (ops->flags & FTRACE_OPS_FL_DYNAMIC) in ftrace_startup()
2908 * If ftrace is in an undefined state, we just remove ops from list in ftrace_startup()
2914 return -ENODEV; in ftrace_startup()
2917 ops->flags &= ~FTRACE_OPS_FL_ADDING; in ftrace_startup()
2927 return -ENODEV; in ftrace_shutdown()
2933 ftrace_start_up--; in ftrace_shutdown()
2935 * Just warn in case of unbalance, no need to kill ftrace, it's not in ftrace_shutdown()
2937 * further ftrace uses. in ftrace_shutdown()
2947 ops->flags &= ~FTRACE_OPS_FL_ENABLED; in ftrace_shutdown()
2961 ops->flags |= FTRACE_OPS_FL_REMOVING; in ftrace_shutdown()
2965 ops->old_hash.filter_hash = ops->func_hash->filter_hash; in ftrace_shutdown()
2966 ops->old_hash.notrace_hash = ops->func_hash->notrace_hash; in ftrace_shutdown()
2971 * If there's no more ops registered with ftrace, run a in ftrace_shutdown()
2980 if (FTRACE_WARN_ON_ONCE(rec->flags & ~FTRACE_FL_DISABLED)) in ftrace_shutdown()
2982 (void *)rec->ip, rec->flags); in ftrace_shutdown()
2986 ops->old_hash.filter_hash = NULL; in ftrace_shutdown()
2987 ops->old_hash.notrace_hash = NULL; in ftrace_shutdown()
2990 ops->flags &= ~FTRACE_OPS_FL_REMOVING; in ftrace_shutdown()
2999 if (ops->flags & FTRACE_OPS_FL_DYNAMIC) { in ftrace_shutdown()
3012 * while on a ftrace trampoline. Just scheduling a task on in ftrace_shutdown()
3035 /* ftrace_start_up is true if we want ftrace running */ in ftrace_startup_sysctl()
3051 /* ftrace_start_up is true if ftrace is running */ in ftrace_shutdown_sysctl()
3071 return ftrace_hash_empty(ops->func_hash->filter_hash) && in ops_traces_mod()
3072 ftrace_hash_empty(ops->func_hash->notrace_hash); in ops_traces_mod()
3086 if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) in ops_references_rec()
3094 if (!ftrace_hash_empty(ops->func_hash->filter_hash) && in ops_references_rec()
3095 !__ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip)) in ops_references_rec()
3099 if (ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip)) in ops_references_rec()
3119 * an entry in the ftrace data. Now, if ftrace is activated in ftrace_update_code()
3121 * read-only, the modification of enabling ftrace can fail if in ftrace_update_code()
3122 * the read-only is done while ftrace is converting the calls. in ftrace_update_code()
3125 * to read-only. in ftrace_update_code()
3130 for (pg = new_pgs; pg; pg = pg->next) { in ftrace_update_code()
3132 for (i = 0; i < pg->index; i++) { in ftrace_update_code()
3136 return -1; in ftrace_update_code()
3138 p = &pg->records[i]; in ftrace_update_code()
3139 p->flags = rec_flags; in ftrace_update_code()
3154 ftrace_update_time = stop - start; in ftrace_update_code()
3167 return -EINVAL; in ftrace_allocate_records()
3177 order--; in ftrace_allocate_records()
3180 pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); in ftrace_allocate_records()
3182 if (!pg->records) { in ftrace_allocate_records()
3183 /* if we can't allocate this size, try something smaller */ in ftrace_allocate_records()
3185 return -ENOMEM; in ftrace_allocate_records()
3186 order--; in ftrace_allocate_records()
3194 pg->order = order; in ftrace_allocate_records()
3207 if (pg->records) { in ftrace_free_pages()
3208 free_pages((unsigned long)pg->records, pg->order); in ftrace_free_pages()
3209 ftrace_number_of_pages -= 1 << pg->order; in ftrace_free_pages()
3211 pages = pg->next; in ftrace_free_pages()
3214 ftrace_number_of_groups--; in ftrace_free_pages()
3242 num_to_init -= cnt; in ftrace_allocate_pages()
3246 pg->next = kzalloc(sizeof(*pg), GFP_KERNEL); in ftrace_allocate_pages()
3247 if (!pg->next) in ftrace_allocate_pages()
3250 pg = pg->next; in ftrace_allocate_pages()
3257 pr_info("ftrace: FAILED to allocate memory for functions\n"); in ftrace_allocate_pages()
3284 struct ftrace_iterator *iter = m->private; in t_probe_next()
3285 struct trace_array *tr = iter->ops->private; in t_probe_next()
3291 int size; in t_probe_next() local
3294 iter->pos = *pos; in t_probe_next()
3299 func_probes = &tr->func_probes; in t_probe_next()
3303 if (!iter->probe) { in t_probe_next()
3304 next = func_probes->next; in t_probe_next()
3305 iter->probe = list_entry(next, struct ftrace_func_probe, list); in t_probe_next()
3308 if (iter->probe_entry) in t_probe_next()
3309 hnd = &iter->probe_entry->hlist; in t_probe_next()
3311 hash = iter->probe->ops.func_hash->filter_hash; in t_probe_next()
3320 size = 1 << hash->size_bits; in t_probe_next()
3323 if (iter->pidx >= size) { in t_probe_next()
3324 if (iter->probe->list.next == func_probes) in t_probe_next()
3326 next = iter->probe->list.next; in t_probe_next()
3327 iter->probe = list_entry(next, struct ftrace_func_probe, list); in t_probe_next()
3328 hash = iter->probe->ops.func_hash->filter_hash; in t_probe_next()
3329 size = 1 << hash->size_bits; in t_probe_next()
3330 iter->pidx = 0; in t_probe_next()
3333 hhd = &hash->buckets[iter->pidx]; in t_probe_next()
3336 iter->pidx++; in t_probe_next()
3342 hnd = hhd->first; in t_probe_next()
3344 hnd = hnd->next; in t_probe_next()
3346 iter->pidx++; in t_probe_next()
3354 iter->probe_entry = hlist_entry(hnd, struct ftrace_func_entry, hlist); in t_probe_next()
3361 struct ftrace_iterator *iter = m->private; in t_probe_start()
3365 if (!(iter->flags & FTRACE_ITER_DO_PROBES)) in t_probe_start()
3368 if (iter->mod_pos > *pos) in t_probe_start()
3371 iter->probe = NULL; in t_probe_start()
3372 iter->probe_entry = NULL; in t_probe_start()
3373 iter->pidx = 0; in t_probe_start()
3374 for (l = 0; l <= (*pos - iter->mod_pos); ) { in t_probe_start()
3383 iter->flags |= FTRACE_ITER_PROBE; in t_probe_start()
3395 probe = iter->probe; in t_probe_show()
3396 probe_entry = iter->probe_entry; in t_probe_show()
3399 return -EIO; in t_probe_show()
3401 probe_ops = probe->probe_ops; in t_probe_show()
3403 if (probe_ops->print) in t_probe_show()
3404 return probe_ops->print(m, probe_entry->ip, probe_ops, probe->data); in t_probe_show()
3406 seq_printf(m, "%ps:%ps\n", (void *)probe_entry->ip, in t_probe_show()
3407 (void *)probe_ops->func); in t_probe_show()
3415 struct ftrace_iterator *iter = m->private; in t_mod_next()
3416 struct trace_array *tr = iter->tr; in t_mod_next()
3419 iter->pos = *pos; in t_mod_next()
3421 iter->mod_list = iter->mod_list->next; in t_mod_next()
3423 if (iter->mod_list == &tr->mod_trace || in t_mod_next()
3424 iter->mod_list == &tr->mod_notrace) { in t_mod_next()
3425 iter->flags &= ~FTRACE_ITER_MOD; in t_mod_next()
3429 iter->mod_pos = *pos; in t_mod_next()
3436 struct ftrace_iterator *iter = m->private; in t_mod_start()
3440 if (iter->func_pos > *pos) in t_mod_start()
3443 iter->mod_pos = iter->func_pos; in t_mod_start()
3446 if (!iter->tr) in t_mod_start()
3449 for (l = 0; l <= (*pos - iter->func_pos); ) { in t_mod_start()
3455 iter->flags &= ~FTRACE_ITER_MOD; in t_mod_start()
3460 iter->flags |= FTRACE_ITER_MOD; in t_mod_start()
3469 struct trace_array *tr = iter->tr; in t_mod_show()
3471 if (WARN_ON_ONCE(!iter->mod_list) || in t_mod_show()
3472 iter->mod_list == &tr->mod_trace || in t_mod_show()
3473 iter->mod_list == &tr->mod_notrace) in t_mod_show()
3474 return -EIO; in t_mod_show()
3476 ftrace_mod = list_entry(iter->mod_list, struct ftrace_mod_load, list); in t_mod_show()
3478 if (ftrace_mod->func) in t_mod_show()
3479 seq_printf(m, "%s", ftrace_mod->func); in t_mod_show()
3483 seq_printf(m, ":mod:%s\n", ftrace_mod->module); in t_mod_show()
3491 struct ftrace_iterator *iter = m->private; in t_func_next()
3497 if (iter->idx >= iter->pg->index) { in t_func_next()
3498 if (iter->pg->next) { in t_func_next()
3499 iter->pg = iter->pg->next; in t_func_next()
3500 iter->idx = 0; in t_func_next()
3504 rec = &iter->pg->records[iter->idx++]; in t_func_next()
3505 if (((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) && in t_func_next()
3506 !ftrace_lookup_ip(iter->hash, rec->ip)) || in t_func_next()
3508 ((iter->flags & FTRACE_ITER_ENABLED) && in t_func_next()
3509 !(rec->flags & FTRACE_FL_ENABLED))) { in t_func_next()
3519 iter->pos = iter->func_pos = *pos; in t_func_next()
3520 iter->func = rec; in t_func_next()
3528 struct ftrace_iterator *iter = m->private; in t_next()
3535 if (iter->flags & FTRACE_ITER_PROBE) in t_next()
3538 if (iter->flags & FTRACE_ITER_MOD) in t_next()
3541 if (iter->flags & FTRACE_ITER_PRINTALL) { in t_next()
3557 iter->pos = 0; in reset_iter_read()
3558 iter->func_pos = 0; in reset_iter_read()
3559 iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_PROBE | FTRACE_ITER_MOD); in reset_iter_read()
3564 struct ftrace_iterator *iter = m->private; in t_start()
3576 if (*pos < iter->pos) in t_start()
3584 if ((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) && in t_start()
3585 ftrace_hash_empty(iter->hash)) { in t_start()
3586 iter->func_pos = 1; /* Account for the message */ in t_start()
3589 iter->flags |= FTRACE_ITER_PRINTALL; in t_start()
3591 iter->flags &= ~FTRACE_ITER_PROBE; in t_start()
3595 if (iter->flags & FTRACE_ITER_MOD) in t_start()
3603 iter->pg = ftrace_pages_start; in t_start()
3604 iter->idx = 0; in t_start()
3635 seq_printf(m, " ->%pS", ptr); in add_trampoline_func()
3640 struct ftrace_iterator *iter = m->private; in t_show()
3643 if (iter->flags & FTRACE_ITER_PROBE) in t_show()
3646 if (iter->flags & FTRACE_ITER_MOD) in t_show()
3649 if (iter->flags & FTRACE_ITER_PRINTALL) { in t_show()
3650 if (iter->flags & FTRACE_ITER_NOTRACE) in t_show()
3657 rec = iter->func; in t_show()
3662 seq_printf(m, "%ps", (void *)rec->ip); in t_show()
3663 if (iter->flags & FTRACE_ITER_ENABLED) { in t_show()
3668 rec->flags & FTRACE_FL_REGS ? " R" : " ", in t_show()
3669 rec->flags & FTRACE_FL_IPMODIFY ? " I" : " ", in t_show()
3670 rec->flags & FTRACE_FL_DIRECT ? " D" : " "); in t_show()
3671 if (rec->flags & FTRACE_FL_TRAMP_EN) { in t_show()
3676 (void *)ops->trampoline, in t_show()
3677 (void *)ops->func); in t_show()
3686 if (rec->flags & FTRACE_FL_DIRECT) { in t_show()
3689 direct = ftrace_find_rec_direct(rec->ip); in t_show()
3691 seq_printf(m, "\n\tdirect-->%pS", (void *)direct); in t_show()
3718 return -ENODEV; in ftrace_avail_open()
3722 return -ENOMEM; in ftrace_avail_open()
3724 iter->pg = ftrace_pages_start; in ftrace_avail_open()
3725 iter->ops = &global_ops; in ftrace_avail_open()
3746 return -ENOMEM; in ftrace_enabled_open()
3748 iter->pg = ftrace_pages_start; in ftrace_enabled_open()
3749 iter->flags = FTRACE_ITER_ENABLED; in ftrace_enabled_open()
3750 iter->ops = &global_ops; in ftrace_enabled_open()
3756 * ftrace_regex_open - initialize function tracer filter files
3778 struct trace_array *tr = ops->private; in ftrace_regex_open()
3779 int ret = -ENOMEM; in ftrace_regex_open()
3784 return -ENODEV; in ftrace_regex_open()
3787 return -ENODEV; in ftrace_regex_open()
3793 if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) in ftrace_regex_open()
3796 iter->ops = ops; in ftrace_regex_open()
3797 iter->flags = flag; in ftrace_regex_open()
3798 iter->tr = tr; in ftrace_regex_open()
3800 mutex_lock(&ops->func_hash->regex_lock); in ftrace_regex_open()
3803 hash = ops->func_hash->notrace_hash; in ftrace_regex_open()
3804 mod_head = tr ? &tr->mod_notrace : NULL; in ftrace_regex_open()
3806 hash = ops->func_hash->filter_hash; in ftrace_regex_open()
3807 mod_head = tr ? &tr->mod_trace : NULL; in ftrace_regex_open()
3810 iter->mod_list = mod_head; in ftrace_regex_open()
3812 if (file->f_mode & FMODE_WRITE) { in ftrace_regex_open()
3815 if (file->f_flags & O_TRUNC) { in ftrace_regex_open()
3816 iter->hash = alloc_ftrace_hash(size_bits); in ftrace_regex_open()
3819 iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash); in ftrace_regex_open()
3822 if (!iter->hash) { in ftrace_regex_open()
3823 trace_parser_put(&iter->parser); in ftrace_regex_open()
3827 iter->hash = hash; in ftrace_regex_open()
3831 if (file->f_mode & FMODE_READ) { in ftrace_regex_open()
3832 iter->pg = ftrace_pages_start; in ftrace_regex_open()
3836 struct seq_file *m = file->private_data; in ftrace_regex_open()
3837 m->private = iter; in ftrace_regex_open()
3840 free_ftrace_hash(iter->hash); in ftrace_regex_open()
3841 trace_parser_put(&iter->parser); in ftrace_regex_open()
3844 file->private_data = iter; in ftrace_regex_open()
3847 mutex_unlock(&ops->func_hash->regex_lock); in ftrace_regex_open()
3862 struct ftrace_ops *ops = inode->i_private; in ftrace_filter_open()
3873 struct ftrace_ops *ops = inode->i_private; in ftrace_notrace_open()
3880 /* Type for quick search ftrace basic regexes (globs) from filter_parse_regex */
3888 * If symbols in an architecture don't correspond exactly to the user-visible
3902 str = arch_ftrace_match_adjust(str, g->search); in ftrace_match()
3904 switch (g->type) { in ftrace_match()
3906 if (strcmp(str, g->search) == 0) in ftrace_match()
3910 if (strncmp(str, g->search, g->len) == 0) in ftrace_match()
3914 if (strstr(str, g->search)) in ftrace_match()
3919 if (slen >= g->len && in ftrace_match()
3920 memcmp(str + slen - g->len, g->search, g->len) == 0) in ftrace_match()
3924 if (glob_match(g->search, str)) in ftrace_match()
3938 entry = ftrace_lookup_ip(hash, rec->ip); in enter_record()
3950 ret = add_hash_entry(hash, rec->ip); in enter_record()
3959 long index = simple_strtoul(func_g->search, NULL, 0); in add_rec_by_index()
3964 if (--index < 0) in add_rec_by_index()
3968 if (pg->index <= index) { in add_rec_by_index()
3969 index -= pg->index; in add_rec_by_index()
3973 rec = &pg->records[index]; in add_rec_by_index()
3987 kallsyms_lookup(rec->ip, NULL, NULL, &modname, str); in ftrace_match_record()
3993 if (!mod_g->len) { in ftrace_match_record()
4011 if (!func_g->len) in ftrace_match_record()
4055 if (rec->flags & FTRACE_FL_DISABLED) in match_records()
4087 if (ops->flags & FTRACE_OPS_FL_ENABLED) { in ftrace_ops_update_code()
4097 if (ops->func_hash != &global_ops.local_hash) in ftrace_ops_update_code()
4101 if (op->func_hash == &global_ops.local_hash && in ftrace_ops_update_code()
4102 op->flags & FTRACE_OPS_FL_ENABLED) { in ftrace_ops_update_code()
4120 old_hash_ops.filter_hash = ops->func_hash->filter_hash; in ftrace_hash_move_and_update_ops()
4121 old_hash_ops.notrace_hash = ops->func_hash->notrace_hash; in ftrace_hash_move_and_update_ops()
4140 if (n > sizeof(modname) - 1) in module_exists()
4151 struct list_head *head = enable ? &tr->mod_trace : &tr->mod_notrace; in cache_mod()
4159 ret = -EINVAL; in cache_mod()
4163 if (strcmp(ftrace_mod->module, module) != 0) in cache_mod()
4168 (ftrace_mod->func && in cache_mod()
4169 strcmp(ftrace_mod->func, func) == 0)) { in cache_mod()
4178 ret = -EINVAL; in cache_mod()
4205 mutex_lock(&ops->func_hash->regex_lock); in process_mod_list()
4208 orig_hash = &ops->func_hash->filter_hash; in process_mod_list()
4210 orig_hash = &ops->func_hash->notrace_hash; in process_mod_list()
4221 if (strcmp(ftrace_mod->module, mod) != 0) in process_mod_list()
4224 if (ftrace_mod->func) in process_mod_list()
4225 func = kstrdup(ftrace_mod->func, GFP_KERNEL); in process_mod_list()
4232 list_del(&ftrace_mod->list); in process_mod_list()
4233 list_add(&ftrace_mod->list, &process_mods); in process_mod_list()
4236 kfree(ftrace_mod->func); in process_mod_list()
4237 ftrace_mod->func = func; in process_mod_list()
4244 func = ftrace_mod->func; in process_mod_list()
4252 new_hash->flags &= ~FTRACE_HASH_FL_MOD; in process_mod_list()
4261 mutex_unlock(&ops->func_hash->regex_lock); in process_mod_list()
4277 if (!list_empty(&tr->mod_trace)) in process_cached_mods()
4278 process_mod_list(&tr->mod_trace, tr->ops, mod, true); in process_cached_mods()
4279 if (!list_empty(&tr->mod_notrace)) in process_cached_mods()
4280 process_mod_list(&tr->mod_notrace, tr->ops, mod, false); in process_cached_mods()
4301 return -ENODEV; in ftrace_mod_callback()
4306 return -ENOMEM; in ftrace_mod_callback()
4343 probe_ops = probe->probe_ops; in function_trace_probe_call()
4351 probe_ops->func(ip, parent_ip, probe->tr, probe_ops, probe->data); in function_trace_probe_call()
4365 * allocate_ftrace_func_mapper - allocate a new ftrace_func_mapper
4383 * ftrace_func_mapper_find_ip - Find some data mapped to an ip
4399 entry = ftrace_lookup_ip(&mapper->hash, ip); in ftrace_func_mapper_find_ip()
4404 return &map->data; in ftrace_func_mapper_find_ip()
4408 * ftrace_func_mapper_add_ip - Map some data to an ip
4421 entry = ftrace_lookup_ip(&mapper->hash, ip); in ftrace_func_mapper_add_ip()
4423 return -EBUSY; in ftrace_func_mapper_add_ip()
4427 return -ENOMEM; in ftrace_func_mapper_add_ip()
4429 map->entry.ip = ip; in ftrace_func_mapper_add_ip()
4430 map->data = data; in ftrace_func_mapper_add_ip()
4432 __add_hash_entry(&mapper->hash, &map->entry); in ftrace_func_mapper_add_ip()
4438 * ftrace_func_mapper_remove_ip - Remove an ip from the mapping
4454 entry = ftrace_lookup_ip(&mapper->hash, ip); in ftrace_func_mapper_remove_ip()
4459 data = map->data; in ftrace_func_mapper_remove_ip()
4461 remove_hash_entry(&mapper->hash, entry); in ftrace_func_mapper_remove_ip()
4468 * free_ftrace_func_mapper - free a mapping of ips and data
4481 int size, i; in free_ftrace_func_mapper() local
4486 if (free_func && mapper->hash.count) { in free_ftrace_func_mapper()
4487 size = 1 << mapper->hash.size_bits; in free_ftrace_func_mapper()
4488 for (i = 0; i < size; i++) { in free_ftrace_func_mapper()
4489 hhd = &mapper->hash.buckets[i]; in free_ftrace_func_mapper()
4496 free_ftrace_hash(&mapper->hash); in free_ftrace_func_mapper()
4505 WARN_ON(probe->ref <= 0); in release_probe()
4508 probe->ref--; in release_probe()
4510 if (!probe->ref) { in release_probe()
4511 probe_ops = probe->probe_ops; in release_probe()
4514 * the probe->data itself in release_probe()
4516 if (probe_ops->free) in release_probe()
4517 probe_ops->free(probe_ops, probe->tr, 0, probe->data); in release_probe()
4518 list_del(&probe->list); in release_probe()
4530 probe->ref++; in acquire_probe_locked()
4544 int size; in register_ftrace_function_probe() local
4549 return -EINVAL; in register_ftrace_function_probe()
4553 return -EINVAL; in register_ftrace_function_probe()
4558 list_for_each_entry(probe, &tr->func_probes, list) { in register_ftrace_function_probe()
4559 if (probe->probe_ops == probe_ops) in register_ftrace_function_probe()
4562 if (&probe->list == &tr->func_probes) { in register_ftrace_function_probe()
4566 return -ENOMEM; in register_ftrace_function_probe()
4568 probe->probe_ops = probe_ops; in register_ftrace_function_probe()
4569 probe->ops.func = function_trace_probe_call; in register_ftrace_function_probe()
4570 probe->tr = tr; in register_ftrace_function_probe()
4571 ftrace_ops_init(&probe->ops); in register_ftrace_function_probe()
4572 list_add(&probe->list, &tr->func_probes); in register_ftrace_function_probe()
4580 * Note, there's a small window here that the func_hash->filter_hash in register_ftrace_function_probe()
4583 mutex_lock(&probe->ops.func_hash->regex_lock); in register_ftrace_function_probe()
4585 orig_hash = &probe->ops.func_hash->filter_hash; in register_ftrace_function_probe()
4590 ret = -ENOMEM; in register_ftrace_function_probe()
4598 ret = -EINVAL; in register_ftrace_function_probe()
4603 size = 1 << hash->size_bits; in register_ftrace_function_probe()
4604 for (i = 0; i < size; i++) { in register_ftrace_function_probe()
4605 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { in register_ftrace_function_probe()
4606 if (ftrace_lookup_ip(old_hash, entry->ip)) in register_ftrace_function_probe()
4613 if (probe_ops->init) { in register_ftrace_function_probe()
4614 ret = probe_ops->init(probe_ops, tr, in register_ftrace_function_probe()
4615 entry->ip, data, in register_ftrace_function_probe()
4616 &probe->data); in register_ftrace_function_probe()
4618 if (probe_ops->free && count) in register_ftrace_function_probe()
4619 probe_ops->free(probe_ops, tr, in register_ftrace_function_probe()
4620 0, probe->data); in register_ftrace_function_probe()
4621 probe->data = NULL; in register_ftrace_function_probe()
4633 ret = -EINVAL; in register_ftrace_function_probe()
4637 ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash, in register_ftrace_function_probe()
4643 probe->ref += count; in register_ftrace_function_probe()
4645 if (!(probe->ops.flags & FTRACE_OPS_FL_ENABLED)) in register_ftrace_function_probe()
4646 ret = ftrace_startup(&probe->ops, 0); in register_ftrace_function_probe()
4654 mutex_unlock(&probe->ops.func_hash->regex_lock); in register_ftrace_function_probe()
4662 if (!probe_ops->free || !count) in register_ftrace_function_probe()
4666 for (i = 0; i < size; i++) { in register_ftrace_function_probe()
4667 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { in register_ftrace_function_probe()
4668 if (ftrace_lookup_ip(old_hash, entry->ip)) in register_ftrace_function_probe()
4670 probe_ops->free(probe_ops, tr, entry->ip, probe->data); in register_ftrace_function_probe()
4691 int i, ret = -ENODEV; in unregister_ftrace_function_probe_func()
4692 int size; in unregister_ftrace_function_probe_func() local
4705 return -EINVAL; in unregister_ftrace_function_probe_func()
4710 list_for_each_entry(probe, &tr->func_probes, list) { in unregister_ftrace_function_probe_func()
4711 if (probe->probe_ops == probe_ops) in unregister_ftrace_function_probe_func()
4714 if (&probe->list == &tr->func_probes) in unregister_ftrace_function_probe_func()
4717 ret = -EINVAL; in unregister_ftrace_function_probe_func()
4718 if (!(probe->ops.flags & FTRACE_OPS_FL_INITIALIZED)) in unregister_ftrace_function_probe_func()
4725 mutex_lock(&probe->ops.func_hash->regex_lock); in unregister_ftrace_function_probe_func()
4727 orig_hash = &probe->ops.func_hash->filter_hash; in unregister_ftrace_function_probe_func()
4737 ret = -ENOMEM; in unregister_ftrace_function_probe_func()
4744 size = 1 << hash->size_bits; in unregister_ftrace_function_probe_func()
4745 for (i = 0; i < size; i++) { in unregister_ftrace_function_probe_func()
4746 hlist_for_each_entry_safe(entry, tmp, &hash->buckets[i], hlist) { in unregister_ftrace_function_probe_func()
4749 kallsyms_lookup(entry->ip, NULL, NULL, in unregister_ftrace_function_probe_func()
4756 hlist_add_head(&entry->hlist, &hhd); in unregister_ftrace_function_probe_func()
4762 ret = -EINVAL; in unregister_ftrace_function_probe_func()
4768 WARN_ON(probe->ref < count); in unregister_ftrace_function_probe_func()
4770 probe->ref -= count; in unregister_ftrace_function_probe_func()
4773 ftrace_shutdown(&probe->ops, 0); in unregister_ftrace_function_probe_func()
4775 ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash, in unregister_ftrace_function_probe_func()
4780 ftrace_run_modify_code(&probe->ops, FTRACE_UPDATE_CALLS, in unregister_ftrace_function_probe_func()
4785 hlist_del(&entry->hlist); in unregister_ftrace_function_probe_func()
4786 if (probe_ops->free) in unregister_ftrace_function_probe_func()
4787 probe_ops->free(probe_ops, tr, entry->ip, probe->data); in unregister_ftrace_function_probe_func()
4793 mutex_unlock(&probe->ops.func_hash->regex_lock); in unregister_ftrace_function_probe_func()
4809 list_for_each_entry_safe(probe, n, &tr->func_probes, list) in clear_ftrace_function_probes()
4810 unregister_ftrace_function_probe_func(NULL, tr, probe->probe_ops); in clear_ftrace_function_probes()
4817 * Currently we only register ftrace commands from __init, so mark this
4827 if (strcmp(cmd->name, p->name) == 0) { in register_ftrace_command()
4828 ret = -EBUSY; in register_ftrace_command()
4832 list_add(&cmd->list, &ftrace_commands); in register_ftrace_command()
4840 * Currently we only unregister ftrace commands from __init, so mark
4846 int ret = -ENODEV; in unregister_ftrace_command()
4850 if (strcmp(cmd->name, p->name) == 0) { in unregister_ftrace_command()
4852 list_del_init(&p->list); in unregister_ftrace_command()
4865 struct ftrace_hash *hash = iter->hash; in ftrace_process_regex()
4866 struct trace_array *tr = iter->ops->private; in ftrace_process_regex()
4869 int ret = -EINVAL; in ftrace_process_regex()
4876 ret = -EINVAL; in ftrace_process_regex()
4888 if (strcmp(p->name, command) == 0) { in ftrace_process_regex()
4889 ret = p->func(tr, hash, func, command, next, enable); in ftrace_process_regex()
4910 if (file->f_mode & FMODE_READ) { in ftrace_regex_write()
4911 struct seq_file *m = file->private_data; in ftrace_regex_write()
4912 iter = m->private; in ftrace_regex_write()
4914 iter = file->private_data; in ftrace_regex_write()
4917 return -ENODEV; in ftrace_regex_write()
4919 /* iter->hash is a local copy, so we don't need regex_lock */ in ftrace_regex_write()
4921 parser = &iter->parser; in ftrace_regex_write()
4926 ret = ftrace_process_regex(iter, parser->buffer, in ftrace_regex_write()
4927 parser->idx, enable); in ftrace_regex_write()
4958 return -EINVAL; in ftrace_match_addr()
4963 return -ENOENT; in ftrace_match_addr()
4980 return -ENODEV; in ftrace_set_hash()
4982 mutex_lock(&ops->func_hash->regex_lock); in ftrace_set_hash()
4985 orig_hash = &ops->func_hash->filter_hash; in ftrace_set_hash()
4987 orig_hash = &ops->func_hash->notrace_hash; in ftrace_set_hash()
4995 ret = -ENOMEM; in ftrace_set_hash()
5000 ret = -EINVAL; in ftrace_set_hash()
5014 mutex_unlock(&ops->func_hash->regex_lock); in ftrace_set_hash()
5038 * ftrace_find_direct_func - test an address if it is a registered direct caller
5041 * This searches to see if a ftrace direct caller has been registered
5057 if (entry->addr == addr) { in ftrace_find_direct_func()
5075 direct->addr = addr; in ftrace_alloc_direct_func()
5076 direct->count = 0; in ftrace_alloc_direct_func()
5077 list_add_rcu(&direct->next, &ftrace_direct_funcs); in ftrace_alloc_direct_func()
5083 * register_ftrace_direct - Call a custom trampoline directly
5088 * at the start of ftrace traced functions. The location that it calls
5095 * -EBUSY - Another direct function is already attached (there can be only one)
5096 * -ENODEV - @ip does not point to a ftrace nop location (or not supported)
5097 * -ENOMEM - There was an allocation failure.
5105 int ret = -EBUSY; in register_ftrace_direct()
5113 ret = -ENODEV; in register_ftrace_direct()
5122 if (WARN_ON(rec->flags & FTRACE_FL_DIRECT)) in register_ftrace_direct()
5126 if (ip != rec->ip) { in register_ftrace_direct()
5127 ip = rec->ip; in register_ftrace_direct()
5133 ret = -ENOMEM; in register_ftrace_direct()
5135 direct_functions->count > 2 * (1 << direct_functions->size_bits)) { in register_ftrace_direct()
5137 int size = ftrace_hash_empty(direct_functions) ? 0 : in register_ftrace_direct() local
5138 direct_functions->count + 1; in register_ftrace_direct()
5140 if (size < 32) in register_ftrace_direct()
5141 size = 32; in register_ftrace_direct()
5143 new_hash = dup_hash(direct_functions, size); in register_ftrace_direct()
5164 entry->ip = ip; in register_ftrace_direct()
5165 entry->direct = addr; in register_ftrace_direct()
5179 if (!direct->count) { in register_ftrace_direct()
5180 list_del_rcu(&direct->next); in register_ftrace_direct()
5186 ftrace_direct_func_count--; in register_ftrace_direct()
5189 direct->count++; in register_ftrace_direct()
5213 entry = __ftrace_lookup_ip(direct_functions, rec->ip); in find_direct_entry()
5215 WARN_ON(rec->flags & FTRACE_FL_DIRECT); in find_direct_entry()
5219 WARN_ON(!(rec->flags & FTRACE_FL_DIRECT)); in find_direct_entry()
5222 *ip = rec->ip; in find_direct_entry()
5234 int ret = -ENODEV; in unregister_ftrace_direct()
5242 if (direct_functions->count == 1) in unregister_ftrace_direct()
5254 direct->count--; in unregister_ftrace_direct()
5255 WARN_ON(direct->count < 0); in unregister_ftrace_direct()
5256 if (!direct->count) { in unregister_ftrace_direct()
5257 list_del_rcu(&direct->next); in unregister_ftrace_direct()
5261 ftrace_direct_func_count--; in unregister_ftrace_direct()
5276 * ftrace_modify_direct_caller - modify ftrace nop directly
5277 * @entry: The ftrace hash entry of the direct helper for @rec
5279 * @old_addr: The location that the site at @rec->ip currently calls
5280 * @new_addr: The location that the site at @rec->ip should call
5283 * changing of the direct callback on an ftrace nop location.
5285 * ftrace callbacks are on the associated record (@rec). Thus,
5286 * it is safe to modify the ftrace record, where it should be
5290 * @rec->ip is currently calling @old_addr. And this must
5291 * also update entry->direct to @new_addr.
5298 unsigned long ip = rec->ip; in ftrace_modify_direct_caller()
5308 * since this function uses standard ftrace calls that take in ftrace_modify_direct_caller()
5329 entry->direct = new_addr; in ftrace_modify_direct_caller()
5345 * modify_ftrace_direct - Modify an existing direct call to call something else
5350 * This modifies a ftrace direct caller at an instruction pointer without
5355 * -ENODEV : the @ip given has no direct caller attached
5356 * -EINVAL : the @old_addr does not match the current direct caller
5364 int ret = -ENODEV; in modify_ftrace_direct()
5373 ret = -EINVAL; in modify_ftrace_direct()
5374 if (entry->direct != old_addr) in modify_ftrace_direct()
5380 if (direct->count > 1) { in modify_ftrace_direct()
5381 ret = -ENOMEM; in modify_ftrace_direct()
5385 direct->count--; in modify_ftrace_direct()
5386 new_direct->count++; in modify_ftrace_direct()
5388 direct->addr = new_addr; in modify_ftrace_direct()
5392 * If there's no other ftrace callback on the rec->ip location, in modify_ftrace_direct()
5400 entry->direct = new_addr; in modify_ftrace_direct()
5405 direct->addr = old_addr; in modify_ftrace_direct()
5407 direct->count++; in modify_ftrace_direct()
5408 list_del_rcu(&new_direct->next); in modify_ftrace_direct()
5411 ftrace_direct_func_count--; in modify_ftrace_direct()
5424 * ftrace_set_filter_ip - set a function to filter on in ftrace by address
5425 * @ops - the ops to set the filter with
5426 * @ip - the address to add to or remove from the filter.
5427 * @remove - non zero to remove the ip from the filter
5428 * @reset - non zero to reset all filters before applying this filter.
5442 * ftrace_ops_set_global_filter - setup ops to use global filters
5443 * @ops - the ops which will use the global filters
5445 * ftrace users who need global function trace filtering should call this.
5450 if (ops->flags & FTRACE_OPS_FL_INITIALIZED) in ftrace_ops_set_global_filter()
5454 ops->func_hash = &global_ops.local_hash; in ftrace_ops_set_global_filter()
5466 * ftrace_set_filter - set a function to filter on in ftrace
5467 * @ops - the ops to set the filter with
5468 * @buf - the string that holds the function filter text.
5469 * @len - the length of the string.
5470 * @reset - non zero to reset all filters before applying this filter.
5484 * ftrace_set_notrace - set a function to not trace in ftrace
5485 * @ops - the ops to set the notrace filter with
5486 * @buf - the string that holds the function notrace text.
5487 * @len - the length of the string.
5488 * @reset - non zero to reset all filters before applying this filter.
5502 * ftrace_set_global_filter - set a function to filter on with global tracers
5503 * @buf - the string that holds the function filter text.
5504 * @len - the length of the string.
5505 * @reset - non zero to reset all filters before applying this filter.
5517 * ftrace_set_global_notrace - set a function to not trace with global tracers
5518 * @buf - the string that holds the function notrace text.
5519 * @len - the length of the string.
5520 * @reset - non zero to reset all filters before applying this filter.
5601 printk(KERN_DEBUG "ftrace: function %s not " in set_ftrace_early_graph()
5641 struct seq_file *m = (struct seq_file *)file->private_data; in ftrace_regex_release()
5648 if (file->f_mode & FMODE_READ) { in ftrace_regex_release()
5649 iter = m->private; in ftrace_regex_release()
5652 iter = file->private_data; in ftrace_regex_release()
5654 parser = &iter->parser; in ftrace_regex_release()
5656 int enable = !(iter->flags & FTRACE_ITER_NOTRACE); in ftrace_regex_release()
5658 ftrace_process_regex(iter, parser->buffer, in ftrace_regex_release()
5659 parser->idx, enable); in ftrace_regex_release()
5664 mutex_lock(&iter->ops->func_hash->regex_lock); in ftrace_regex_release()
5666 if (file->f_mode & FMODE_WRITE) { in ftrace_regex_release()
5667 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER); in ftrace_regex_release()
5670 orig_hash = &iter->ops->func_hash->filter_hash; in ftrace_regex_release()
5671 if (iter->tr) { in ftrace_regex_release()
5672 if (list_empty(&iter->tr->mod_trace)) in ftrace_regex_release()
5673 iter->hash->flags &= ~FTRACE_HASH_FL_MOD; in ftrace_regex_release()
5675 iter->hash->flags |= FTRACE_HASH_FL_MOD; in ftrace_regex_release()
5678 orig_hash = &iter->ops->func_hash->notrace_hash; in ftrace_regex_release()
5681 ret = ftrace_hash_move_and_update_ops(iter->ops, orig_hash, in ftrace_regex_release()
5682 iter->hash, filter_hash); in ftrace_regex_release()
5686 iter->hash = NULL; in ftrace_regex_release()
5689 mutex_unlock(&iter->ops->func_hash->regex_lock); in ftrace_regex_release()
5690 free_ftrace_hash(iter->hash); in ftrace_regex_release()
5691 if (iter->tr) in ftrace_regex_release()
5692 trace_array_put(iter->tr); in ftrace_regex_release()
5755 struct ftrace_graph_data *fgd = m->private; in __g_next()
5756 struct ftrace_func_entry *entry = fgd->entry; in __g_next()
5758 int i, idx = fgd->idx; in __g_next()
5760 if (*pos >= fgd->hash->count) in __g_next()
5765 fgd->entry = entry; in __g_next()
5772 for (i = idx; i < 1 << fgd->hash->size_bits; i++) { in __g_next()
5773 head = &fgd->hash->buckets[i]; in __g_next()
5775 fgd->entry = entry; in __g_next()
5776 fgd->idx = i; in __g_next()
5792 struct ftrace_graph_data *fgd = m->private; in g_start()
5796 if (fgd->type == GRAPH_FILTER_FUNCTION) in g_start()
5797 fgd->hash = rcu_dereference_protected(ftrace_graph_hash, in g_start()
5800 fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash, in g_start()
5804 if (ftrace_hash_empty(fgd->hash) && !*pos) in g_start()
5807 fgd->idx = 0; in g_start()
5808 fgd->entry = NULL; in g_start()
5825 struct ftrace_graph_data *fgd = m->private; in g_show()
5827 if (fgd->type == GRAPH_FILTER_FUNCTION) in g_show()
5834 seq_printf(m, "%ps\n", (void *)entry->ip); in g_show()
5857 if (file->f_mode & FMODE_WRITE) { in __ftrace_graph_open()
5860 if (trace_parser_get_init(&fgd->parser, FTRACE_BUFF_MAX)) in __ftrace_graph_open()
5861 return -ENOMEM; in __ftrace_graph_open()
5863 if (file->f_flags & O_TRUNC) in __ftrace_graph_open()
5867 fgd->hash); in __ftrace_graph_open()
5869 ret = -ENOMEM; in __ftrace_graph_open()
5874 if (file->f_mode & FMODE_READ) { in __ftrace_graph_open()
5877 struct seq_file *m = file->private_data; in __ftrace_graph_open()
5878 m->private = fgd; in __ftrace_graph_open()
5885 file->private_data = fgd; in __ftrace_graph_open()
5888 if (ret < 0 && file->f_mode & FMODE_WRITE) in __ftrace_graph_open()
5889 trace_parser_put(&fgd->parser); in __ftrace_graph_open()
5891 fgd->new_hash = new_hash; in __ftrace_graph_open()
5894 * All uses of fgd->hash must be taken with the graph_lock in __ftrace_graph_open()
5896 * fgd->hash to be reinitialized when it is taken again. in __ftrace_graph_open()
5898 fgd->hash = NULL; in __ftrace_graph_open()
5910 return -ENODEV; in ftrace_graph_open()
5914 return -ENOMEM; in ftrace_graph_open()
5918 fgd->hash = rcu_dereference_protected(ftrace_graph_hash, in ftrace_graph_open()
5920 fgd->type = GRAPH_FILTER_FUNCTION; in ftrace_graph_open()
5921 fgd->seq_ops = &ftrace_graph_seq_ops; in ftrace_graph_open()
5938 return -ENODEV; in ftrace_graph_notrace_open()
5942 return -ENOMEM; in ftrace_graph_notrace_open()
5946 fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash, in ftrace_graph_notrace_open()
5948 fgd->type = GRAPH_FILTER_NOTRACE; in ftrace_graph_notrace_open()
5949 fgd->seq_ops = &ftrace_graph_seq_ops; in ftrace_graph_notrace_open()
5967 if (file->f_mode & FMODE_READ) { in ftrace_graph_release()
5968 struct seq_file *m = file->private_data; in ftrace_graph_release()
5970 fgd = m->private; in ftrace_graph_release()
5973 fgd = file->private_data; in ftrace_graph_release()
5977 if (file->f_mode & FMODE_WRITE) { in ftrace_graph_release()
5979 parser = &fgd->parser; in ftrace_graph_release()
5982 ret = ftrace_graph_set_hash(fgd->new_hash, in ftrace_graph_release()
5983 parser->buffer); in ftrace_graph_release()
5988 new_hash = __ftrace_hash_move(fgd->new_hash); in ftrace_graph_release()
5990 ret = -ENOMEM; in ftrace_graph_release()
5996 if (fgd->type == GRAPH_FILTER_FUNCTION) { in ftrace_graph_release()
6022 free_ftrace_hash(fgd->new_hash); in ftrace_graph_release()
6048 return -ENODEV; in ftrace_graph_set_hash()
6053 if (rec->flags & FTRACE_FL_DISABLED) in ftrace_graph_set_hash()
6057 entry = ftrace_lookup_ip(hash, rec->ip); in ftrace_graph_set_hash()
6064 if (add_hash_entry(hash, rec->ip) < 0) in ftrace_graph_set_hash()
6078 return -EINVAL; in ftrace_graph_set_hash()
6088 struct ftrace_graph_data *fgd = file->private_data; in ftrace_graph_write()
6095 if (file->f_mode & FMODE_READ) { in ftrace_graph_write()
6096 struct seq_file *m = file->private_data; in ftrace_graph_write()
6097 fgd = m->private; in ftrace_graph_write()
6100 parser = &fgd->parser; in ftrace_graph_write()
6107 ret = ftrace_graph_set_hash(fgd->new_hash, in ftrace_graph_write()
6108 parser->buffer); in ftrace_graph_write()
6159 if (ops->flags & FTRACE_OPS_FL_ENABLED) in ftrace_destroy_filter_files()
6161 ops->flags |= FTRACE_OPS_FL_DELETED; in ftrace_destroy_filter_files()
6197 return -1; in ftrace_cmp_ips()
6214 int ret = -ENOMEM; in ftrace_process_locs()
6216 count = end - start; in ftrace_process_locs()
6226 return -ENOMEM; in ftrace_process_locs()
6243 if (WARN_ON(ftrace_pages->next)) { in ftrace_process_locs()
6245 while (ftrace_pages->next) in ftrace_process_locs()
6246 ftrace_pages = ftrace_pages->next; in ftrace_process_locs()
6249 ftrace_pages->next = start_pg; in ftrace_process_locs()
6268 end_offset = (pg->index+1) * sizeof(pg->records[0]); in ftrace_process_locs()
6269 if (end_offset > PAGE_SIZE << pg->order) { in ftrace_process_locs()
6271 if (WARN_ON(!pg->next)) in ftrace_process_locs()
6273 pg = pg->next; in ftrace_process_locs()
6276 rec = &pg->records[pg->index++]; in ftrace_process_locs()
6277 rec->ip = addr; in ftrace_process_locs()
6280 if (pg->next) { in ftrace_process_locs()
6281 pg_unuse = pg->next; in ftrace_process_locs()
6282 pg->next = NULL; in ftrace_process_locs()
6319 unsigned int size; member
6340 if (!op->trampoline || symnum--) in ftrace_get_trampoline_kallsym()
6342 *value = op->trampoline; in ftrace_get_trampoline_kallsym()
6350 return -ERANGE; in ftrace_get_trampoline_kallsym()
6364 for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) { in referenced_filters()
6366 if (WARN_ON_ONCE(ops->flags & FTRACE_OPS_FL_DIRECT)) in referenced_filters()
6368 if (WARN_ON_ONCE(ops->flags & FTRACE_OPS_FL_IPMODIFY)) in referenced_filters()
6371 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) in referenced_filters()
6372 rec->flags |= FTRACE_FL_REGS; in referenced_filters()
6373 if (cnt == 1 && ops->trampoline) in referenced_filters()
6374 rec->flags |= FTRACE_FL_TRAMP; in referenced_filters()
6376 rec->flags &= ~FTRACE_FL_TRAMP; in referenced_filters()
6393 for (i = 0; i < pg->index; i++) { in clear_mod_from_hash()
6394 rec = &pg->records[i]; in clear_mod_from_hash()
6395 entry = __ftrace_lookup_ip(hash, rec->ip); in clear_mod_from_hash()
6402 entry->ip = 0; in clear_mod_from_hash()
6413 if (!tr->ops || !tr->ops->func_hash) in clear_mod_from_hashes()
6415 mutex_lock(&tr->ops->func_hash->regex_lock); in clear_mod_from_hashes()
6416 clear_mod_from_hash(pg, tr->ops->func_hash->filter_hash); in clear_mod_from_hashes()
6417 clear_mod_from_hash(pg, tr->ops->func_hash->notrace_hash); in clear_mod_from_hashes()
6418 mutex_unlock(&tr->ops->func_hash->regex_lock); in clear_mod_from_hashes()
6430 list_for_each_entry_safe(mod_func, n, &mod_map->funcs, list) { in ftrace_free_mod_map()
6431 kfree(mod_func->name); in ftrace_free_mod_map()
6432 list_del(&mod_func->list); in ftrace_free_mod_map()
6454 if (mod_map->mod == mod) { in ftrace_release_mod()
6455 list_del_rcu(&mod_map->list); in ftrace_release_mod()
6456 call_rcu(&mod_map->rcu, ftrace_free_mod_map); in ftrace_release_mod()
6467 rec = &pg->records[0]; in ftrace_release_mod()
6468 if (within_module_core(rec->ip, mod) || in ftrace_release_mod()
6469 within_module_init(rec->ip, mod)) { in ftrace_release_mod()
6481 ftrace_update_tot_cnt -= pg->index; in ftrace_release_mod()
6482 *last_pg = pg->next; in ftrace_release_mod()
6484 pg->next = tmp_page; in ftrace_release_mod()
6487 last_pg = &pg->next; in ftrace_release_mod()
6500 if (pg->records) { in ftrace_release_mod()
6501 free_pages((unsigned long)pg->records, pg->order); in ftrace_release_mod()
6502 ftrace_number_of_pages -= 1 << pg->order; in ftrace_release_mod()
6504 tmp_page = pg->next; in ftrace_release_mod()
6506 ftrace_number_of_groups--; in ftrace_release_mod()
6530 * text to read-only, as we now need to set it back to read-write in ftrace_module_enable()
6544 if (!within_module_core(rec->ip, mod) && in ftrace_module_enable()
6545 !within_module_init(rec->ip, mod)) in ftrace_module_enable()
6559 rec->flags &= ~FTRACE_FL_DISABLED; in ftrace_module_enable()
6560 rec->flags += cnt; in ftrace_module_enable()
6579 process_cached_mods(mod->name); in ftrace_module_enable()
6584 if (ftrace_disabled || !mod->num_ftrace_callsites) in ftrace_module_init()
6587 ftrace_process_locs(mod, mod->ftrace_callsites, in ftrace_module_init()
6588 mod->ftrace_callsites + mod->num_ftrace_callsites); in ftrace_module_init()
6601 ret = kallsyms_lookup(rec->ip, &symsize, &offset, &modname, str); in save_ftrace_mod_rec()
6609 mod_func->name = kstrdup(str, GFP_KERNEL); in save_ftrace_mod_rec()
6610 if (!mod_func->name) { in save_ftrace_mod_rec()
6615 mod_func->ip = rec->ip - offset; in save_ftrace_mod_rec()
6616 mod_func->size = symsize; in save_ftrace_mod_rec()
6618 mod_map->num_funcs++; in save_ftrace_mod_rec()
6620 list_add_rcu(&mod_func->list, &mod_map->funcs); in save_ftrace_mod_rec()
6633 mod_map->mod = mod; in allocate_ftrace_mod_map()
6634 mod_map->start_addr = start; in allocate_ftrace_mod_map()
6635 mod_map->end_addr = end; in allocate_ftrace_mod_map()
6636 mod_map->num_funcs = 0; in allocate_ftrace_mod_map()
6638 INIT_LIST_HEAD_RCU(&mod_map->funcs); in allocate_ftrace_mod_map()
6640 list_add_rcu(&mod_map->list, &ftrace_mod_maps); in allocate_ftrace_mod_map()
6647 unsigned long addr, unsigned long *size, in ftrace_func_address_lookup() argument
6653 list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) { in ftrace_func_address_lookup()
6654 if (addr >= mod_func->ip && in ftrace_func_address_lookup()
6655 addr < mod_func->ip + mod_func->size) { in ftrace_func_address_lookup()
6662 if (size) in ftrace_func_address_lookup()
6663 *size = found_func->size; in ftrace_func_address_lookup()
6665 *off = addr - found_func->ip; in ftrace_func_address_lookup()
6667 strlcpy(sym, found_func->name, KSYM_NAME_LEN); in ftrace_func_address_lookup()
6669 return found_func->name; in ftrace_func_address_lookup()
6676 ftrace_mod_address_lookup(unsigned long addr, unsigned long *size, in ftrace_mod_address_lookup() argument
6685 ret = ftrace_func_address_lookup(mod_map, addr, size, off, sym); in ftrace_mod_address_lookup()
6688 *modname = mod_map->mod->name; in ftrace_mod_address_lookup()
6708 if (symnum >= mod_map->num_funcs) { in ftrace_mod_get_kallsym()
6709 symnum -= mod_map->num_funcs; in ftrace_mod_get_kallsym()
6713 list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) { in ftrace_mod_get_kallsym()
6715 symnum--; in ftrace_mod_get_kallsym()
6719 *value = mod_func->ip; in ftrace_mod_get_kallsym()
6721 strlcpy(name, mod_func->name, KSYM_NAME_LEN); in ftrace_mod_get_kallsym()
6722 strlcpy(module_name, mod_map->mod->name, MODULE_NAME_LEN); in ftrace_mod_get_kallsym()
6770 entry = ftrace_lookup_ip(hash, func->ip); in clear_func_from_hash()
6777 entry->ip = 0; in clear_func_from_hash()
6787 if (!tr->ops || !tr->ops->func_hash) in clear_func_from_hashes()
6789 mutex_lock(&tr->ops->func_hash->regex_lock); in clear_func_from_hashes()
6790 clear_func_from_hash(func, tr->ops->func_hash->filter_hash); in clear_func_from_hashes()
6791 clear_func_from_hash(func, tr->ops->func_hash->notrace_hash); in clear_func_from_hashes()
6792 mutex_unlock(&tr->ops->func_hash->regex_lock); in clear_func_from_hashes()
6804 MEM_FAIL(1, "alloc failure, ftrace filter could be stale\n"); in add_to_clear_hash_list()
6808 func->ip = rec->ip; in add_to_clear_hash_list()
6809 list_add(&func->list, clear_list); in add_to_clear_hash_list()
6840 for (pg = ftrace_pages_start; pg; last_pg = &pg->next, pg = *last_pg) { in ftrace_free_mem()
6841 if (end < pg->records[0].ip || in ftrace_free_mem()
6842 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE)) in ftrace_free_mem()
6845 rec = bsearch(&key, pg->records, pg->index, in ftrace_free_mem()
6857 pg->index--; in ftrace_free_mem()
6858 ftrace_update_tot_cnt--; in ftrace_free_mem()
6859 if (!pg->index) { in ftrace_free_mem()
6860 *last_pg = pg->next; in ftrace_free_mem()
6861 pg->next = tmp_page; in ftrace_free_mem()
6869 (pg->index - (rec - pg->records)) * sizeof(*rec)); in ftrace_free_mem()
6907 count = __stop_mcount_loc - __start_mcount_loc; in ftrace_init()
6909 pr_info("ftrace: No functions to be traced?\n"); in ftrace_init()
6913 pr_info("ftrace: allocating %ld entries in %ld pages\n", in ftrace_init()
6922 pr_info("ftrace: allocated %ld pages with %ld groups\n", in ftrace_init()
6939 unsigned long trampoline = ops->trampoline; in ftrace_update_trampoline()
6942 if (ops->trampoline && ops->trampoline != trampoline && in ftrace_update_trampoline()
6943 (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) { in ftrace_update_trampoline()
6947 ops->trampoline, ops->trampoline_size, false, in ftrace_update_trampoline()
6953 perf_event_text_poke((void *)ops->trampoline, NULL, 0, in ftrace_update_trampoline()
6954 (void *)ops->trampoline, in ftrace_update_trampoline()
6955 ops->trampoline_size); in ftrace_update_trampoline()
6961 INIT_LIST_HEAD(&tr->func_probes); in ftrace_init_trace_array()
6962 INIT_LIST_HEAD(&tr->mod_trace); in ftrace_init_trace_array()
6963 INIT_LIST_HEAD(&tr->mod_notrace); in ftrace_init_trace_array()
6996 tr->ops = &global_ops; in ftrace_init_global_array_ops()
6997 tr->ops->private = tr; in ftrace_init_global_array_ops()
7004 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) { in ftrace_init_array_ops()
7005 if (WARN_ON(tr->ops->func != ftrace_stub)) in ftrace_init_array_ops()
7006 printk("ftrace ops had %pS for function\n", in ftrace_init_array_ops()
7007 tr->ops->func); in ftrace_init_array_ops()
7009 tr->ops->func = func; in ftrace_init_array_ops()
7010 tr->ops->private = tr; in ftrace_init_array_ops()
7015 tr->ops->func = ftrace_stub; in ftrace_reset_array_ops()
7037 if (op->flags & FTRACE_OPS_FL_STUB) in __ftrace_ops_list_func()
7046 * If any of the above fails then the op->func() is not executed. in __ftrace_ops_list_func()
7048 if ((!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) && in __ftrace_ops_list_func()
7050 if (FTRACE_WARN_ON(!op->func)) { in __ftrace_ops_list_func()
7054 op->func(ip, parent_ip, op, regs); in __ftrace_ops_list_func()
7106 if (!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) in ftrace_ops_assist_func()
7107 op->func(ip, parent_ip, op, regs); in ftrace_ops_assist_func()
7115 * ftrace_ops_get_func - get the function a trampoline should call
7118 * Normally the mcount trampoline will call the ops->func, but there
7131 if (!(ops->flags & FTRACE_OPS_FL_RECURSION_SAFE) || in ftrace_ops_get_func()
7132 ops->flags & FTRACE_OPS_FL_RCU) in ftrace_ops_get_func()
7135 return ops->func; in ftrace_ops_get_func()
7146 pid_list = rcu_dereference_sched(tr->function_pids); in ftrace_filter_pid_sched_switch_probe()
7147 no_pid_list = rcu_dereference_sched(tr->function_no_pids); in ftrace_filter_pid_sched_switch_probe()
7150 this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid, in ftrace_filter_pid_sched_switch_probe()
7153 this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid, in ftrace_filter_pid_sched_switch_probe()
7154 next->pid); in ftrace_filter_pid_sched_switch_probe()
7165 pid_list = rcu_dereference_sched(tr->function_pids); in ftrace_pid_follow_sched_process_fork()
7168 pid_list = rcu_dereference_sched(tr->function_no_pids); in ftrace_pid_follow_sched_process_fork()
7178 pid_list = rcu_dereference_sched(tr->function_pids); in ftrace_pid_follow_sched_process_exit()
7181 pid_list = rcu_dereference_sched(tr->function_no_pids); in ftrace_pid_follow_sched_process_exit()
7206 pid_list = rcu_dereference_protected(tr->function_pids, in clear_ftrace_pids()
7208 no_pid_list = rcu_dereference_protected(tr->function_no_pids, in clear_ftrace_pids()
7219 per_cpu_ptr(tr->array_buffer.data, cpu)->ftrace_ignore_pid = FTRACE_PID_TRACE; in clear_ftrace_pids()
7223 rcu_assign_pointer(tr->function_pids, NULL); in clear_ftrace_pids()
7226 rcu_assign_pointer(tr->function_no_pids, NULL); in clear_ftrace_pids()
7265 struct trace_array *tr = m->private; in fpid_start()
7270 pid_list = rcu_dereference_sched(tr->function_pids); in fpid_start()
7280 struct trace_array *tr = m->private; in fpid_next()
7281 struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_pids); in fpid_next()
7318 struct trace_array *tr = m->private; in fnpid_start()
7323 pid_list = rcu_dereference_sched(tr->function_no_pids); in fnpid_start()
7333 struct trace_array *tr = m->private; in fnpid_next()
7334 struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_no_pids); in fnpid_next()
7353 struct trace_array *tr = inode->i_private; in pid_open()
7361 if ((file->f_mode & FMODE_WRITE) && in pid_open()
7362 (file->f_flags & O_TRUNC)) in pid_open()
7375 return -EINVAL; in pid_open()
7382 m = file->private_data; in pid_open()
7384 m->private = tr; in pid_open()
7412 pid_list = rcu_dereference_protected(tr->function_pids, in ignore_task_cpu()
7414 no_pid_list = rcu_dereference_protected(tr->function_no_pids, in ignore_task_cpu()
7418 this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid, in ignore_task_cpu()
7421 this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid, in ignore_task_cpu()
7422 current->pid); in ignore_task_cpu()
7429 struct seq_file *m = filp->private_data; in pid_write()
7430 struct trace_array *tr = m->private; in pid_write()
7443 filtered_pids = rcu_dereference_protected(tr->function_pids, in pid_write()
7445 other_pids = rcu_dereference_protected(tr->function_no_pids, in pid_write()
7449 filtered_pids = rcu_dereference_protected(tr->function_no_pids, in pid_write()
7451 other_pids = rcu_dereference_protected(tr->function_pids, in pid_write()
7455 ret = -EINVAL; in pid_write()
7466 rcu_assign_pointer(tr->function_pids, pid_list); in pid_write()
7469 rcu_assign_pointer(tr->function_no_pids, pid_list); in pid_write()
7517 struct trace_array *tr = inode->i_private; in ftrace_pid_release()
7552 WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL)); in ftrace_init_tracefs_toplevel()
7559 * ftrace_kill - kill ftrace
7561 * This function should be used by panic code. It stops ftrace
7562 * but in a not so nice way. If you need to simply kill ftrace
7563 * from a non-atomic section, use ftrace_kill.
7573 * Test if ftrace is dead or not.
7581 * register_ftrace_function - register a function for profiling
7582 * @ops - ops structure that holds the function for profiling.
7587 * Note: @ops->func and all the functions it calls must be labeled
7593 int ret = -1; in register_ftrace_function()
7608 * unregister_ftrace_function - unregister a function for profiling.
7609 * @ops - ops structure that holds the function to unregister
7611 * Unregister a function that was added to be called by ftrace profiling.
7630 if (op->flags & FTRACE_OPS_FL_PERMANENT) in is_permanent_ops_registered()
7641 int ret = -ENODEV; in ftrace_enable_sysctl()
7655 /* we are starting ftrace again */ in ftrace_enable_sysctl()
7665 ret = -EBUSY; in ftrace_enable_sysctl()
7669 /* stopping ftrace calls (just send to ftrace_stub) */ in ftrace_enable_sysctl()