• Home
  • Raw
  • Download

Lines Matching refs:leader

620 	struct perf_event *leader = event->group_leader;  in __perf_effective_state()  local
622 if (leader->state <= PERF_EVENT_STATE_OFF) in __perf_effective_state()
623 return leader->state; in __perf_effective_state()
652 static void perf_event_update_sibling_time(struct perf_event *leader) in perf_event_update_sibling_time() argument
656 for_each_sibling_event(sibling, leader) in perf_event_update_sibling_time()
2163 struct perf_event *leader = event->group_leader; in perf_group_detach() local
2182 if (leader != event) { in perf_group_detach()
2216 for_each_sibling_event(tmp, leader) in perf_group_detach()
2219 perf_event__header_size(leader); in perf_group_detach()
2972 struct perf_event *leader = event->group_leader; in __perf_event_enable() local
2997 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) { in __perf_event_enable()
5306 static int __perf_read_group_add(struct perf_event *leader, in __perf_read_group_add() argument
5309 struct perf_event_context *ctx = leader->ctx; in __perf_read_group_add()
5315 ret = perf_event_read(leader, true); in __perf_read_group_add()
5340 parent = leader->parent; in __perf_read_group_add()
5342 (parent->group_generation != leader->group_generation || in __perf_read_group_add()
5343 parent->nr_siblings != leader->nr_siblings)) { in __perf_read_group_add()
5354 values[n++] += leader->total_time_enabled + in __perf_read_group_add()
5355 atomic64_read(&leader->child_total_time_enabled); in __perf_read_group_add()
5359 values[n++] += leader->total_time_running + in __perf_read_group_add()
5360 atomic64_read(&leader->child_total_time_running); in __perf_read_group_add()
5366 values[n++] += perf_event_count(leader); in __perf_read_group_add()
5368 values[n++] = primary_event_id(leader); in __perf_read_group_add()
5370 values[n++] = atomic64_read(&leader->lost_samples); in __perf_read_group_add()
5372 for_each_sibling_event(sub, leader) { in __perf_read_group_add()
5388 struct perf_event *leader = event->group_leader, *child; in perf_read_group() local
5389 struct perf_event_context *ctx = leader->ctx; in perf_read_group()
5399 values[0] = 1 + leader->nr_siblings; in perf_read_group()
5401 mutex_lock(&leader->child_mutex); in perf_read_group()
5403 ret = __perf_read_group_add(leader, read_format, values); in perf_read_group()
5407 list_for_each_entry(child, &leader->child_list, child_list) { in perf_read_group()
5413 mutex_unlock(&leader->child_mutex); in perf_read_group()
5421 mutex_unlock(&leader->child_mutex); in perf_read_group()
7018 struct perf_event *leader = event->group_leader, *sub; in perf_output_read_group() local
7030 values[n++] = 1 + leader->nr_siblings; in perf_output_read_group()
7038 if ((leader != event) && in perf_output_read_group()
7039 (leader->state == PERF_EVENT_STATE_ACTIVE)) in perf_output_read_group()
7040 leader->pmu->read(leader); in perf_output_read_group()
7042 values[n++] = perf_event_count(leader); in perf_output_read_group()
7044 values[n++] = primary_event_id(leader); in perf_output_read_group()
7046 values[n++] = atomic64_read(&leader->lost_samples); in perf_output_read_group()
7050 for_each_sibling_event(sub, leader) { in perf_output_read_group()
13298 struct perf_event *leader; in inherit_group() local
13302 leader = inherit_event(parent_event, parent, parent_ctx, in inherit_group()
13304 if (IS_ERR(leader)) in inherit_group()
13305 return PTR_ERR(leader); in inherit_group()
13313 child, leader, child_ctx); in inherit_group()
13318 !perf_get_aux_event(child_ctr, leader)) in inherit_group()
13321 leader->group_generation = parent_event->group_generation; in inherit_group()