• Home
  • Raw
  • Download

Lines Matching full:cluster

121  * The cache is made up of one or more clusters, each cluster has its own PMU.
122 * Each cluster is associated with one or more CPUs.
143 /* The CPU that is used for collecting events on this cluster */
145 /* All the CPUs associated with this cluster */
243 static void cluster_pmu_set_resr(struct cluster_pmu *cluster, in cluster_pmu_set_resr() argument
254 spin_lock_irqsave(&cluster->pmu_lock, flags); in cluster_pmu_set_resr()
262 spin_unlock_irqrestore(&cluster->pmu_lock, flags); in cluster_pmu_set_resr()
268 * all CPUS, subunits and ID independent events in this cluster.
319 static void l2_cache_cluster_set_period(struct cluster_pmu *cluster, in l2_cache_cluster_set_period() argument
339 static int l2_cache_get_event_idx(struct cluster_pmu *cluster, in l2_cache_get_event_idx() argument
344 int num_ctrs = cluster->l2cache_pmu->num_counters - 1; in l2_cache_get_event_idx()
348 if (test_and_set_bit(l2_cycle_ctr_idx, cluster->used_counters)) in l2_cache_get_event_idx()
354 idx = find_first_zero_bit(cluster->used_counters, num_ctrs); in l2_cache_get_event_idx()
365 if (test_bit(group, cluster->used_groups)) in l2_cache_get_event_idx()
368 set_bit(idx, cluster->used_counters); in l2_cache_get_event_idx()
369 set_bit(group, cluster->used_groups); in l2_cache_get_event_idx()
374 static void l2_cache_clear_event_idx(struct cluster_pmu *cluster, in l2_cache_clear_event_idx() argument
380 clear_bit(idx, cluster->used_counters); in l2_cache_clear_event_idx()
382 clear_bit(L2_EVT_GROUP(hwc->config_base), cluster->used_groups); in l2_cache_clear_event_idx()
387 struct cluster_pmu *cluster = data; in l2_cache_handle_irq() local
388 int num_counters = cluster->l2cache_pmu->num_counters; in l2_cache_handle_irq()
396 for_each_set_bit(idx, cluster->used_counters, num_counters) { in l2_cache_handle_irq()
397 struct perf_event *event = cluster->events[idx]; in l2_cache_handle_irq()
409 l2_cache_cluster_set_period(cluster, hwc); in l2_cache_handle_irq()
424 * physical PMUs (per cluster), because we do not support per-task mode in l2_cache_pmu_enable()
441 struct cluster_pmu *cluster; in l2_cache_event_init() local
488 cluster = get_cluster_pmu(l2cache_pmu, event->cpu); in l2_cache_event_init()
489 if (!cluster) { in l2_cache_event_init()
492 "CPU%d not associated with L2 cluster\n", event->cpu); in l2_cache_event_init()
498 (cluster->on_cpu != event->group_leader->cpu)) { in l2_cache_event_init()
536 event->cpu = cluster->on_cpu; in l2_cache_event_init()
543 struct cluster_pmu *cluster; in l2_cache_event_start() local
551 cluster = get_cluster_pmu(to_l2cache_pmu(event->pmu), event->cpu); in l2_cache_event_start()
553 l2_cache_cluster_set_period(cluster, hwc); in l2_cache_event_start()
564 cluster_pmu_set_resr(cluster, event_group, event_cc); in l2_cache_event_start()
593 struct cluster_pmu *cluster; in l2_cache_event_add() local
595 cluster = get_cluster_pmu(to_l2cache_pmu(event->pmu), event->cpu); in l2_cache_event_add()
597 idx = l2_cache_get_event_idx(cluster, event); in l2_cache_event_add()
603 cluster->events[idx] = event; in l2_cache_event_add()
618 struct cluster_pmu *cluster; in l2_cache_event_del() local
621 cluster = get_cluster_pmu(to_l2cache_pmu(event->pmu), event->cpu); in l2_cache_event_del()
624 cluster->events[idx] = NULL; in l2_cache_event_del()
625 l2_cache_clear_event_idx(cluster, event); in l2_cache_event_del()
742 struct cluster_pmu *cluster; in l2_cache_associate_cpu_with_cluster() local
755 list_for_each_entry(cluster, &l2cache_pmu->clusters, next) { in l2_cache_associate_cpu_with_cluster()
756 if (cluster->cluster_id != cpu_cluster_id) in l2_cache_associate_cpu_with_cluster()
760 "CPU%d associated with cluster %d\n", cpu, in l2_cache_associate_cpu_with_cluster()
761 cluster->cluster_id); in l2_cache_associate_cpu_with_cluster()
762 cpumask_set_cpu(cpu, &cluster->cluster_cpus); in l2_cache_associate_cpu_with_cluster()
763 *per_cpu_ptr(l2cache_pmu->pmu_cluster, cpu) = cluster; in l2_cache_associate_cpu_with_cluster()
764 return cluster; in l2_cache_associate_cpu_with_cluster()
772 struct cluster_pmu *cluster; in l2cache_pmu_online_cpu() local
776 cluster = get_cluster_pmu(l2cache_pmu, cpu); in l2cache_pmu_online_cpu()
777 if (!cluster) { in l2cache_pmu_online_cpu()
779 cluster = l2_cache_associate_cpu_with_cluster(l2cache_pmu, cpu); in l2cache_pmu_online_cpu()
780 if (!cluster) { in l2cache_pmu_online_cpu()
781 /* Only if broken firmware doesn't list every cluster */ in l2cache_pmu_online_cpu()
782 WARN_ONCE(1, "No L2 cache cluster for CPU%d\n", cpu); in l2cache_pmu_online_cpu()
787 /* If another CPU is managing this cluster, we're done */ in l2cache_pmu_online_cpu()
788 if (cluster->on_cpu != -1) in l2cache_pmu_online_cpu()
792 * All CPUs on this cluster were down, use this one. in l2cache_pmu_online_cpu()
795 cluster->on_cpu = cpu; in l2cache_pmu_online_cpu()
799 WARN_ON(irq_set_affinity(cluster->irq, cpumask_of(cpu))); in l2cache_pmu_online_cpu()
800 enable_irq(cluster->irq); in l2cache_pmu_online_cpu()
807 struct cluster_pmu *cluster; in l2cache_pmu_offline_cpu() local
813 cluster = get_cluster_pmu(l2cache_pmu, cpu); in l2cache_pmu_offline_cpu()
814 if (!cluster) in l2cache_pmu_offline_cpu()
817 /* If this CPU is not managing the cluster, we're done */ in l2cache_pmu_offline_cpu()
818 if (cluster->on_cpu != cpu) in l2cache_pmu_offline_cpu()
821 /* Give up ownership of cluster */ in l2cache_pmu_offline_cpu()
823 cluster->on_cpu = -1; in l2cache_pmu_offline_cpu()
825 /* Any other CPU for this cluster which is still online */ in l2cache_pmu_offline_cpu()
826 cpumask_and(&cluster_online_cpus, &cluster->cluster_cpus, in l2cache_pmu_offline_cpu()
830 disable_irq(cluster->irq); in l2cache_pmu_offline_cpu()
835 cluster->on_cpu = target; in l2cache_pmu_offline_cpu()
837 WARN_ON(irq_set_affinity(cluster->irq, cpumask_of(target))); in l2cache_pmu_offline_cpu()
847 struct cluster_pmu *cluster; in l2_cache_pmu_probe_cluster() local
861 cluster = devm_kzalloc(&pdev->dev, sizeof(*cluster), GFP_KERNEL); in l2_cache_pmu_probe_cluster()
862 if (!cluster) in l2_cache_pmu_probe_cluster()
865 INIT_LIST_HEAD(&cluster->next); in l2_cache_pmu_probe_cluster()
866 list_add(&cluster->next, &l2cache_pmu->clusters); in l2_cache_pmu_probe_cluster()
867 cluster->cluster_id = fw_cluster_id; in l2_cache_pmu_probe_cluster()
873 cluster->irq = irq; in l2_cache_pmu_probe_cluster()
875 cluster->l2cache_pmu = l2cache_pmu; in l2_cache_pmu_probe_cluster()
876 cluster->on_cpu = -1; in l2_cache_pmu_probe_cluster()
880 "l2-cache-pmu", cluster); in l2_cache_pmu_probe_cluster()
888 "Registered L2 cache PMU cluster %ld\n", fw_cluster_id); in l2_cache_pmu_probe_cluster()
890 spin_lock_init(&cluster->pmu_lock); in l2_cache_pmu_probe_cluster()
939 /* Read cluster info and initialize each cluster */ in l2_cache_pmu_probe()