/kernel/dma/ |
D | debug.c | 292 struct dma_debug_entry *ref, in __hash_bucket_find() argument 299 if (!match(ref, entry)) in __hash_bucket_find() 314 entry->size == ref->size ? ++match_lvl : 0; in __hash_bucket_find() 315 entry->type == ref->type ? ++match_lvl : 0; in __hash_bucket_find() 316 entry->direction == ref->direction ? ++match_lvl : 0; in __hash_bucket_find() 317 entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0; in __hash_bucket_find() 342 struct dma_debug_entry *ref) in bucket_find_exact() argument 344 return __hash_bucket_find(bucket, ref, exact_match); in bucket_find_exact() 348 struct dma_debug_entry *ref, in bucket_find_contain() argument 352 unsigned int max_range = dma_get_max_seg_size(ref->dev); in bucket_find_contain() [all …]
|
/kernel/bpf/ |
D | bpf_lru_list.h | 27 u8 ref; member 66 if (!READ_ONCE(node->ref)) in bpf_lru_node_set_ref() 67 WRITE_ONCE(node->ref, 1); in bpf_lru_node_set_ref()
|
D | bpf_lru_list.c | 44 return READ_ONCE(node->ref); in bpf_lru_node_is_ref() 49 WRITE_ONCE(node->ref, 0); in bpf_lru_node_clear_ref()
|
D | cgroup.c | 80 static void cgroup_bpf_release_fn(struct percpu_ref *ref) in cgroup_bpf_release_fn() argument 82 struct cgroup *cgrp = container_of(ref, struct cgroup, bpf.refcnt); in cgroup_bpf_release_fn()
|
D | hashtab.c | 559 offsetof(struct bpf_lru_node, ref)); in htab_lru_map_gen_lookup() 563 offsetof(struct bpf_lru_node, ref), in htab_lru_map_gen_lookup()
|
/kernel/trace/ |
D | trace.c | 291 tr->ref++; in trace_array_get() 303 WARN_ON(!this_tr->ref); in __trace_array_put() 304 this_tr->ref--; in __trace_array_put() 5776 if (tr->current_trace->ref) { in tracing_set_tracer() 5992 tr->current_trace->ref++; in tracing_open_pipe() 6011 tr->current_trace->ref--; in tracing_release_pipe() 7344 tr->current_trace->ref++; in tracing_buffers_open() 7445 iter->tr->current_trace->ref--; in tracing_buffers_release() 7466 static void buffer_ref_release(struct buffer_ref *ref) in buffer_ref_release() argument 7468 if (!refcount_dec_and_test(&ref->refcount)) in buffer_ref_release() [all …]
|
D | trace_events_trigger.c | 421 data->ref++; in event_trigger_init() 439 if (WARN_ON_ONCE(data->ref <= 0)) in event_trigger_free() 442 data->ref--; in event_trigger_free() 443 if (!data->ref) in event_trigger_free() 1367 if (WARN_ON_ONCE(data->ref <= 0)) in event_enable_trigger_free() 1370 data->ref--; in event_enable_trigger_free() 1371 if (!data->ref) { in event_enable_trigger_free()
|
D | trace_events_hist.c | 120 unsigned int ref; member 407 int ref; member 431 return event->ref != 0; in synth_event_is_busy() 1401 if (event->ref) in create_or_delete_synth_event() 1444 if (event->ref) in synth_event_release() 2476 hist_field->ref++; in get_hist_field() 2481 if (--hist_field->ref > 1) in __destroy_hist_field() 2528 hist_field->ref = 1; in create_hist_field() 3594 var->ref = 1; in create_var() 3932 data->synth_event->ref--; in action_data_destroy() [all …]
|
D | trace.h | 311 int ref; member 501 int ref; member 1633 int ref; member
|
D | trace_events.c | 2531 int ref; member 2637 edata->ref++; in event_enable_init() 2646 edata->ref--; in free_probe_data() 2647 if (!edata->ref) { in free_probe_data() 2675 if (WARN_ON_ONCE(edata->ref <= 0)) in event_enable_free()
|
D | ftrace.c | 1032 int ref; member 4332 WARN_ON(probe->ref <= 0); in release_probe() 4335 probe->ref--; in release_probe() 4337 if (!probe->ref) { in release_probe() 4357 probe->ref++; in acquire_probe_locked() 4470 probe->ref += count; in register_ftrace_function_probe() 4595 WARN_ON(probe->ref < count); in unregister_ftrace_function_probe_func() 4597 probe->ref -= count; in unregister_ftrace_function_probe_func()
|
/kernel/sched/ |
D | topology.c | 572 if (free_sgc && atomic_dec_and_test(&sg->sgc->ref)) in free_sched_groups() 575 if (atomic_dec_and_test(&sg->ref)) in free_sched_groups() 590 if (sd->shared && atomic_dec_and_test(&sd->shared->ref)) in destroy_sched_domain() 897 atomic_inc(&sg->ref); in build_group_from_child_sched_domain() 913 if (atomic_inc_return(&sg->sgc->ref) == 1) in init_overlap_sched_group() 1074 already_visited = atomic_inc_return(&sg->ref) > 1; in get_group() 1076 WARN_ON(already_visited != (atomic_inc_return(&sg->sgc->ref) > 1)); in get_group() 1271 if (atomic_read(&(*per_cpu_ptr(sdd->sds, cpu))->ref)) in claim_allocations() 1274 if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref)) in claim_allocations() 1277 if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref)) in claim_allocations() [all …]
|
D | sched.h | 1398 atomic_t ref; member 1418 atomic_t ref; member
|
D | fair.c | 8267 group_smaller_min_cpu_capacity(struct sched_group *sg, struct sched_group *ref) in group_smaller_min_cpu_capacity() argument 8269 return fits_capacity(sg->sgc->min_capacity, ref->sgc->min_capacity); in group_smaller_min_cpu_capacity() 8277 group_smaller_max_cpu_capacity(struct sched_group *sg, struct sched_group *ref) in group_smaller_max_cpu_capacity() argument 8279 return fits_capacity(sg->sgc->max_capacity, ref->sgc->max_capacity); in group_smaller_max_cpu_capacity()
|
/kernel/events/ |
D | uprobes.c | 57 refcount_t ref; member 603 refcount_inc(&uprobe->ref); in get_uprobe() 609 if (refcount_dec_and_test(&uprobe->ref)) { in put_uprobe() 700 refcount_set(&uprobe->ref, 2); in __insert_uprobe()
|
/kernel/cgroup/ |
D | cgroup.c | 224 static void css_release(struct percpu_ref *ref); 5240 static void css_release(struct percpu_ref *ref) in css_release() argument 5243 container_of(ref, struct cgroup_subsys_state, refcnt); in css_release() 5623 static void css_killed_ref_fn(struct percpu_ref *ref) in css_killed_ref_fn() argument 5626 container_of(ref, struct cgroup_subsys_state, refcnt); in css_killed_ref_fn()
|