| /kernel/linux/linux-5.10/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/ |
| D | base.c | 32 struct nvkm_pmu *pmu = device->pmu; in nvkm_pmu_fan_controlled() local 34 /* Internal PMU FW does not currently control fans in any way, in nvkm_pmu_fan_controlled() 37 if (pmu && pmu->func->code.size) in nvkm_pmu_fan_controlled() 40 /* Default (board-loaded, or VBIOS PMU/PREOS) PMU FW on Fermi in nvkm_pmu_fan_controlled() 48 nvkm_pmu_pgob(struct nvkm_pmu *pmu, bool enable) in nvkm_pmu_pgob() argument 50 if (pmu && pmu->func->pgob) in nvkm_pmu_pgob() 51 pmu->func->pgob(pmu, enable); in nvkm_pmu_pgob() 57 struct nvkm_pmu *pmu = container_of(work, typeof(*pmu), recv.work); in nvkm_pmu_recv() local 58 return pmu->func->recv(pmu); in nvkm_pmu_recv() 62 nvkm_pmu_send(struct nvkm_pmu *pmu, u32 reply[2], in nvkm_pmu_send() argument [all …]
|
| D | gk20a.c | 51 gk20a_pmu_dvfs_target(struct gk20a_pmu *pmu, int *state) in gk20a_pmu_dvfs_target() argument 53 struct nvkm_clk *clk = pmu->base.subdev.device->clk; in gk20a_pmu_dvfs_target() 59 gk20a_pmu_dvfs_get_cur_state(struct gk20a_pmu *pmu, int *state) in gk20a_pmu_dvfs_get_cur_state() argument 61 struct nvkm_clk *clk = pmu->base.subdev.device->clk; in gk20a_pmu_dvfs_get_cur_state() 67 gk20a_pmu_dvfs_get_target_state(struct gk20a_pmu *pmu, in gk20a_pmu_dvfs_get_target_state() argument 70 struct gk20a_pmu_dvfs_data *data = pmu->data; in gk20a_pmu_dvfs_get_target_state() 71 struct nvkm_clk *clk = pmu->base.subdev.device->clk; in gk20a_pmu_dvfs_get_target_state() 86 nvkm_trace(&pmu->base.subdev, "cur level = %d, new level = %d\n", in gk20a_pmu_dvfs_get_target_state() 95 gk20a_pmu_dvfs_get_dev_status(struct gk20a_pmu *pmu, in gk20a_pmu_dvfs_get_dev_status() argument 98 struct nvkm_falcon *falcon = &pmu->base.falcon; in gk20a_pmu_dvfs_get_dev_status() [all …]
|
| /kernel/linux/linux-5.10/arch/x86/kvm/vmx/ |
| D | pmu_intel.c | 3 * KVM PMU support for Intel CPUs 19 #include "pmu.h" 38 static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data) in reprogram_fixed_counters() argument 42 for (i = 0; i < pmu->nr_arch_fixed_counters; i++) { in reprogram_fixed_counters() 44 u8 old_ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, i); in reprogram_fixed_counters() 47 pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i); in reprogram_fixed_counters() 52 __set_bit(INTEL_PMC_IDX_FIXED + i, pmu->pmc_in_use); in reprogram_fixed_counters() 56 pmu->fixed_ctr_ctrl = data; in reprogram_fixed_counters() 60 static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data) in global_ctrl_changed() argument 63 u64 diff = pmu->global_ctrl ^ data; in global_ctrl_changed() [all …]
|
| /kernel/linux/linux-4.19/arch/x86/kvm/ |
| D | pmu_intel.c | 2 * KVM PMU support for Intel CPUs 21 #include "pmu.h" 38 static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data) in reprogram_fixed_counters() argument 42 for (i = 0; i < pmu->nr_arch_fixed_counters; i++) { in reprogram_fixed_counters() 44 u8 old_ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, i); in reprogram_fixed_counters() 47 pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i); in reprogram_fixed_counters() 55 pmu->fixed_ctr_ctrl = data; in reprogram_fixed_counters() 59 static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data) in global_ctrl_changed() argument 62 u64 diff = pmu->global_ctrl ^ data; in global_ctrl_changed() 64 pmu->global_ctrl = data; in global_ctrl_changed() [all …]
|
| D | pmu_amd.c | 2 * KVM PMU support for AMD 20 #include "pmu.h" 49 static unsigned int get_msr_base(struct kvm_pmu *pmu, enum pmu_type type) in get_msr_base() argument 51 struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu); in get_msr_base() 100 static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr, in get_gp_pmc_amd() argument 128 return &pmu->gp_counters[msr_to_index(msr)]; in get_gp_pmc_amd() 131 static unsigned amd_find_arch_event(struct kvm_pmu *pmu, in amd_find_arch_event() argument 162 static struct kvm_pmc *amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx) in amd_pmc_idx_to_pmc() argument 164 unsigned int base = get_msr_base(pmu, PMU_TYPE_COUNTER); in amd_pmc_idx_to_pmc() 165 struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu); in amd_pmc_idx_to_pmc() [all …]
|
| /kernel/linux/linux-4.19/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/ |
| D | base.c | 30 nvkm_pmu_pgob(struct nvkm_pmu *pmu, bool enable) in nvkm_pmu_pgob() argument 32 if (pmu && pmu->func->pgob) in nvkm_pmu_pgob() 33 pmu->func->pgob(pmu, enable); in nvkm_pmu_pgob() 39 struct nvkm_pmu *pmu = container_of(work, typeof(*pmu), recv.work); in nvkm_pmu_recv() local 40 return pmu->func->recv(pmu); in nvkm_pmu_recv() 44 nvkm_pmu_send(struct nvkm_pmu *pmu, u32 reply[2], in nvkm_pmu_send() argument 47 if (!pmu || !pmu->func->send) in nvkm_pmu_send() 49 return pmu->func->send(pmu, reply, process, message, data0, data1); in nvkm_pmu_send() 55 struct nvkm_pmu *pmu = nvkm_pmu(subdev); in nvkm_pmu_intr() local 56 if (!pmu->func->intr) in nvkm_pmu_intr() [all …]
|
| D | gk20a.c | 51 gk20a_pmu_dvfs_target(struct gk20a_pmu *pmu, int *state) in gk20a_pmu_dvfs_target() argument 53 struct nvkm_clk *clk = pmu->base.subdev.device->clk; in gk20a_pmu_dvfs_target() 59 gk20a_pmu_dvfs_get_cur_state(struct gk20a_pmu *pmu, int *state) in gk20a_pmu_dvfs_get_cur_state() argument 61 struct nvkm_clk *clk = pmu->base.subdev.device->clk; in gk20a_pmu_dvfs_get_cur_state() 67 gk20a_pmu_dvfs_get_target_state(struct gk20a_pmu *pmu, in gk20a_pmu_dvfs_get_target_state() argument 70 struct gk20a_pmu_dvfs_data *data = pmu->data; in gk20a_pmu_dvfs_get_target_state() 71 struct nvkm_clk *clk = pmu->base.subdev.device->clk; in gk20a_pmu_dvfs_get_target_state() 86 nvkm_trace(&pmu->base.subdev, "cur level = %d, new level = %d\n", in gk20a_pmu_dvfs_get_target_state() 95 gk20a_pmu_dvfs_get_dev_status(struct gk20a_pmu *pmu, in gk20a_pmu_dvfs_get_dev_status() argument 98 struct nvkm_falcon *falcon = pmu->base.falcon; in gk20a_pmu_dvfs_get_dev_status() [all …]
|
| /kernel/linux/linux-5.10/drivers/gpu/drm/i915/ |
| D | i915_pmu.c | 82 static bool pmu_needs_timer(struct i915_pmu *pmu, bool gpu_active) in pmu_needs_timer() argument 84 struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu); in pmu_needs_timer() 92 enable = pmu->enable; in pmu_needs_timer() 150 struct i915_pmu *pmu = &i915->pmu; in get_rc6() local 161 spin_lock_irqsave(&pmu->lock, flags); in get_rc6() 164 pmu->sample[__I915_SAMPLE_RC6].cur = val; in get_rc6() 173 val = ktime_since(pmu->sleep_last); in get_rc6() 174 val += pmu->sample[__I915_SAMPLE_RC6].cur; in get_rc6() 177 if (val < pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur) in get_rc6() 178 val = pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur; in get_rc6() [all …]
|
| /kernel/linux/linux-4.19/drivers/soc/dove/ |
| D | pmu.c | 3 * Marvell Dove PMU support 17 #include <linux/soc/dove/pmu.h> 42 * The PMU contains a register to reset various subsystems within the 50 struct pmu_data *pmu = rcdev_to_pmu(rc); in pmu_reset_reset() local 54 spin_lock_irqsave(&pmu->lock, flags); in pmu_reset_reset() 55 val = readl_relaxed(pmu->pmc_base + PMC_SW_RST); in pmu_reset_reset() 56 writel_relaxed(val & ~BIT(id), pmu->pmc_base + PMC_SW_RST); in pmu_reset_reset() 57 writel_relaxed(val | BIT(id), pmu->pmc_base + PMC_SW_RST); in pmu_reset_reset() 58 spin_unlock_irqrestore(&pmu->lock, flags); in pmu_reset_reset() 65 struct pmu_data *pmu = rcdev_to_pmu(rc); in pmu_reset_assert() local [all …]
|
| /kernel/linux/linux-5.10/drivers/soc/dove/ |
| D | pmu.c | 3 * Marvell Dove PMU support 17 #include <linux/soc/dove/pmu.h> 42 * The PMU contains a register to reset various subsystems within the 50 struct pmu_data *pmu = rcdev_to_pmu(rc); in pmu_reset_reset() local 54 spin_lock_irqsave(&pmu->lock, flags); in pmu_reset_reset() 55 val = readl_relaxed(pmu->pmc_base + PMC_SW_RST); in pmu_reset_reset() 56 writel_relaxed(val & ~BIT(id), pmu->pmc_base + PMC_SW_RST); in pmu_reset_reset() 57 writel_relaxed(val | BIT(id), pmu->pmc_base + PMC_SW_RST); in pmu_reset_reset() 58 spin_unlock_irqrestore(&pmu->lock, flags); in pmu_reset_reset() 65 struct pmu_data *pmu = rcdev_to_pmu(rc); in pmu_reset_assert() local [all …]
|
| /kernel/linux/linux-5.10/drivers/perf/ |
| D | fsl_imx8_ddr_perf.c | 40 #define to_ddr_pmu(p) container_of(p, struct ddr_pmu, pmu) 66 { .compatible = "fsl,imx8-ddr-pmu", .data = &imx8_devtype_data}, 67 { .compatible = "fsl,imx8m-ddr-pmu", .data = &imx8m_devtype_data}, 68 { .compatible = "fsl,imx8mp-ddr-pmu", .data = &imx8mp_devtype_data}, 74 struct pmu pmu; member 93 static u32 ddr_perf_filter_cap_get(struct ddr_pmu *pmu, int cap) in ddr_perf_filter_cap_get() argument 95 u32 quirks = pmu->devtype_data->quirks; in ddr_perf_filter_cap_get() 114 struct ddr_pmu *pmu = dev_get_drvdata(dev); in ddr_perf_filter_cap_show() local 120 ddr_perf_filter_cap_get(pmu, cap)); in ddr_perf_filter_cap_show() 145 struct ddr_pmu *pmu = dev_get_drvdata(dev); in ddr_perf_cpumask_show() local [all …]
|
| D | arm_pmu_platform.c | 26 static int probe_current_pmu(struct arm_pmu *pmu, in probe_current_pmu() argument 33 pr_info("probing PMU on CPU %d\n", cpu); in probe_current_pmu() 38 ret = info->init(pmu); in probe_current_pmu() 46 static int pmu_parse_percpu_irq(struct arm_pmu *pmu, int irq) in pmu_parse_percpu_irq() argument 49 struct pmu_hw_events __percpu *hw_events = pmu->hw_events; in pmu_parse_percpu_irq() 51 ret = irq_get_percpu_devid_partition(irq, &pmu->supported_cpus); in pmu_parse_percpu_irq() 55 for_each_cpu(cpu, &pmu->supported_cpus) in pmu_parse_percpu_irq() 97 static int pmu_parse_irqs(struct arm_pmu *pmu) in pmu_parse_irqs() argument 100 struct platform_device *pdev = pmu->plat_device; in pmu_parse_irqs() 101 struct pmu_hw_events __percpu *hw_events = pmu->hw_events; in pmu_parse_irqs() [all …]
|
| /kernel/linux/linux-5.10/Documentation/devicetree/bindings/arm/ |
| D | pmu.yaml | 4 $id: http://devicetree.org/schemas/arm/pmu.yaml# 14 ARM cores often have a PMU for counting cpu and cache events like cache misses 15 and hits. The interface to the PMU is part of the ARM ARM. The ARM PMU 22 - apm,potenza-pmu 24 - arm,arm1136-pmu 25 - arm,arm1176-pmu 26 - arm,arm11mpcore-pmu 27 - arm,cortex-a5-pmu 28 - arm,cortex-a7-pmu 29 - arm,cortex-a8-pmu [all …]
|
| /kernel/linux/linux-4.19/Documentation/devicetree/bindings/arm/ |
| D | pmu.txt | 3 ARM cores often have a PMU for counting cpu and cache events like cache misses 4 and hits. The interface to the PMU is part of the ARM ARM. The ARM PMU 10 "apm,potenza-pmu" 12 "arm,cortex-a73-pmu" 13 "arm,cortex-a72-pmu" 14 "arm,cortex-a57-pmu" 15 "arm,cortex-a53-pmu" 16 "arm,cortex-a35-pmu" 17 "arm,cortex-a17-pmu" 18 "arm,cortex-a15-pmu" [all …]
|
| /kernel/linux/linux-5.10/Documentation/devicetree/bindings/arm/samsung/ |
| D | pmu.yaml | 4 $id: http://devicetree.org/schemas/arm/samsung/pmu.yaml# 7 title: Samsung Exynos SoC series Power Management Unit (PMU) 18 - samsung,exynos3250-pmu 19 - samsung,exynos4210-pmu 20 - samsung,exynos4412-pmu 21 - samsung,exynos5250-pmu 22 - samsung,exynos5260-pmu 23 - samsung,exynos5410-pmu 24 - samsung,exynos5420-pmu 25 - samsung,exynos5433-pmu [all …]
|
| /kernel/linux/linux-5.10/Documentation/devicetree/bindings/pinctrl/ |
| D | marvell,dove-pinctrl.txt | 9 - reg: register specifiers of MPP, MPP4, and PMU MPP registers 14 Note: pmu* also allows for Power Management functions listed below 18 mpp0 0 gpio, pmu, uart2(rts), sdio0(cd), lcd0(pwm), pmu* 19 mpp1 1 gpio, pmu, uart2(cts), sdio0(wp), lcd1(pwm), pmu* 20 mpp2 2 gpio, pmu, uart2(txd), sdio0(buspwr), sata(prsnt), 21 uart1(rts), pmu* 22 mpp3 3 gpio, pmu, uart2(rxd), sdio0(ledctrl), sata(act), 23 uart1(cts), lcd-spi(cs1), pmu* 24 mpp4 4 gpio, pmu, uart3(rts), sdio1(cd), spi1(miso), pmu* 25 mpp5 5 gpio, pmu, uart3(cts), sdio1(wp), spi1(cs), pmu* [all …]
|
| /kernel/linux/linux-4.19/Documentation/devicetree/bindings/pinctrl/ |
| D | marvell,dove-pinctrl.txt | 9 - reg: register specifiers of MPP, MPP4, and PMU MPP registers 14 Note: pmu* also allows for Power Management functions listed below 18 mpp0 0 gpio, pmu, uart2(rts), sdio0(cd), lcd0(pwm), pmu* 19 mpp1 1 gpio, pmu, uart2(cts), sdio0(wp), lcd1(pwm), pmu* 20 mpp2 2 gpio, pmu, uart2(txd), sdio0(buspwr), sata(prsnt), 21 uart1(rts), pmu* 22 mpp3 3 gpio, pmu, uart2(rxd), sdio0(ledctrl), sata(act), 23 uart1(cts), lcd-spi(cs1), pmu* 24 mpp4 4 gpio, pmu, uart3(rts), sdio1(cd), spi1(miso), pmu* 25 mpp5 5 gpio, pmu, uart3(cts), sdio1(wp), spi1(cs), pmu* [all …]
|
| /kernel/linux/linux-4.19/drivers/perf/ |
| D | arm_pmu_acpi.c | 34 * Per the ACPI spec, the MADT cannot describe a PMU that doesn't in arm_pmu_acpi_register_irq() 82 pr_warn("Unable to parse ACPI PMU IRQ for CPU%d: %d\n", in arm_pmu_acpi_parse_irqs() 86 pr_warn("No ACPI PMU IRQ for CPU%d\n", cpu); in arm_pmu_acpi_parse_irqs() 124 struct arm_pmu *pmu; in arm_pmu_acpi_find_alloc_pmu() local 128 pmu = per_cpu(probed_pmus, cpu); in arm_pmu_acpi_find_alloc_pmu() 129 if (!pmu || pmu->acpi_cpuid != cpuid) in arm_pmu_acpi_find_alloc_pmu() 132 return pmu; in arm_pmu_acpi_find_alloc_pmu() 135 pmu = armpmu_alloc_atomic(); in arm_pmu_acpi_find_alloc_pmu() 136 if (!pmu) { in arm_pmu_acpi_find_alloc_pmu() 137 pr_warn("Unable to allocate PMU for CPU%d\n", in arm_pmu_acpi_find_alloc_pmu() [all …]
|
| D | arm_pmu_platform.c | 25 static int probe_current_pmu(struct arm_pmu *pmu, in probe_current_pmu() argument 32 pr_info("probing PMU on CPU %d\n", cpu); in probe_current_pmu() 37 ret = info->init(pmu); in probe_current_pmu() 45 static int pmu_parse_percpu_irq(struct arm_pmu *pmu, int irq) in pmu_parse_percpu_irq() argument 48 struct pmu_hw_events __percpu *hw_events = pmu->hw_events; in pmu_parse_percpu_irq() 50 ret = irq_get_percpu_devid_partition(irq, &pmu->supported_cpus); in pmu_parse_percpu_irq() 54 for_each_cpu(cpu, &pmu->supported_cpus) in pmu_parse_percpu_irq() 96 static int pmu_parse_irqs(struct arm_pmu *pmu) in pmu_parse_irqs() argument 99 struct platform_device *pdev = pmu->plat_device; in pmu_parse_irqs() 100 struct pmu_hw_events __percpu *hw_events = pmu->hw_events; in pmu_parse_irqs() [all …]
|
| D | arm_pmu.c | 104 if (type == event->pmu->type) in armpmu_map_event() 121 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); in armpmu_event_set_period() 163 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); in armpmu_event_update() 193 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); in armpmu_stop() 197 * ARM pmu always has to update the counter, so ignore in armpmu_stop() 209 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); in armpmu_start() 213 * ARM pmu always has to reprogram the period, so ignore in armpmu_start() 234 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); in armpmu_del() 250 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); in armpmu_add() 283 validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events, in validate_event() argument [all …]
|
| /kernel/linux/linux-5.10/arch/x86/kvm/svm/ |
| D | pmu.c | 3 * KVM PMU support for AMD 18 #include "pmu.h" 47 static unsigned int get_msr_base(struct kvm_pmu *pmu, enum pmu_type type) in get_msr_base() argument 49 struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu); in get_msr_base() 98 static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr, in get_gp_pmc_amd() argument 126 return &pmu->gp_counters[msr_to_index(msr)]; in get_gp_pmc_amd() 129 static unsigned amd_find_arch_event(struct kvm_pmu *pmu, in amd_find_arch_event() argument 160 static struct kvm_pmc *amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx) in amd_pmc_idx_to_pmc() argument 162 unsigned int base = get_msr_base(pmu, PMU_TYPE_COUNTER); in amd_pmc_idx_to_pmc() 163 struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu); in amd_pmc_idx_to_pmc() [all …]
|
| /kernel/linux/linux-4.19/drivers/gpu/drm/i915/ |
| D | i915_pmu.c | 82 enable = i915->pmu.enable; in pmu_needs_timer() 115 if (!i915->pmu.base.event_init) in i915_pmu_gt_parked() 118 spin_lock_irq(&i915->pmu.lock); in i915_pmu_gt_parked() 123 i915->pmu.timer_enabled = pmu_needs_timer(i915, false); in i915_pmu_gt_parked() 124 spin_unlock_irq(&i915->pmu.lock); in i915_pmu_gt_parked() 129 if (!i915->pmu.timer_enabled && pmu_needs_timer(i915, true)) { in __i915_pmu_maybe_start_timer() 130 i915->pmu.timer_enabled = true; in __i915_pmu_maybe_start_timer() 131 i915->pmu.timer_last = ktime_get(); in __i915_pmu_maybe_start_timer() 132 hrtimer_start_range_ns(&i915->pmu.timer, in __i915_pmu_maybe_start_timer() 140 if (!i915->pmu.base.event_init) in i915_pmu_gt_unparked() [all …]
|
| /kernel/linux/linux-4.19/virt/kvm/arm/ |
| D | pmu.c | 28 * kvm_pmu_get_counter_value - get PMU counter value 35 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_get_counter_value() local 36 struct kvm_pmc *pmc = &pmu->pmc[select_idx]; in kvm_pmu_get_counter_value() 53 * kvm_pmu_set_counter_value - set PMU counter value 68 * kvm_pmu_stop_counter - stop PMU counter 69 * @pmc: The PMU counter pointer 89 * kvm_pmu_vcpu_reset - reset pmu state for cpu 96 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_vcpu_reset() local 99 kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]); in kvm_pmu_vcpu_reset() 100 pmu->pmc[i].idx = i; in kvm_pmu_vcpu_reset() [all …]
|
| /kernel/linux/linux-4.19/Documentation/devicetree/bindings/perf/ |
| D | apm-xgene-pmu.txt | 1 * APM X-Gene SoC PMU bindings 3 This is APM X-Gene SoC PMU (Performance Monitoring Unit) module. 4 The following PMU devices are supported: 11 The following section describes the SoC PMU DT node binding. 14 - compatible : Shall be "apm,xgene-pmu" for revision 1 or 15 "apm,xgene-pmu-v2" for revision 2. 19 - reg : First resource shall be the CPU bus PMU resource. 20 - interrupts : Interrupt-specifier for PMU IRQ. 23 - compatible : Shall be "apm,xgene-pmu-l3c". 24 - reg : First resource shall be the L3C PMU resource. [all …]
|
| /kernel/linux/linux-5.10/Documentation/devicetree/bindings/perf/ |
| D | apm-xgene-pmu.txt | 1 * APM X-Gene SoC PMU bindings 3 This is APM X-Gene SoC PMU (Performance Monitoring Unit) module. 4 The following PMU devices are supported: 11 The following section describes the SoC PMU DT node binding. 14 - compatible : Shall be "apm,xgene-pmu" for revision 1 or 15 "apm,xgene-pmu-v2" for revision 2. 19 - reg : First resource shall be the CPU bus PMU resource. 20 - interrupts : Interrupt-specifier for PMU IRQ. 23 - compatible : Shall be "apm,xgene-pmu-l3c". 24 - reg : First resource shall be the L3C PMU resource. [all …]
|