| /kernel/linux/linux-6.6/drivers/gpu/drm/msm/adreno/ |
| D | a6xx_gmu.c | 19 static void a6xx_gmu_fault(struct a6xx_gmu *gmu) in a6xx_gmu_fault() argument 21 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); in a6xx_gmu_fault() 26 gmu->hung = true; in a6xx_gmu_fault() 37 struct a6xx_gmu *gmu = data; in a6xx_gmu_irq() local 40 status = gmu_read(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_STATUS); in a6xx_gmu_irq() 41 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, status); in a6xx_gmu_irq() 44 dev_err_ratelimited(gmu->dev, "GMU watchdog expired\n"); in a6xx_gmu_irq() 46 a6xx_gmu_fault(gmu); in a6xx_gmu_irq() 50 dev_err_ratelimited(gmu->dev, "GMU AHB bus error\n"); in a6xx_gmu_irq() 53 dev_err_ratelimited(gmu->dev, "GMU fence error: 0x%x\n", in a6xx_gmu_irq() [all …]
|
| D | a6xx_gmu.h | 22 * These define the different GMU wake up options - these define how both the 23 * CPU and the GMU bring up the hardware 26 /* THe GMU has already been booted and the rentention registers are active */ 29 /* the GMU is coming up for the first time or back from a power collapse */ 33 * These define the level of control that the GMU has - the higher the number 34 * the more things that the GMU hardware controls on its own. 37 /* The GMU does not do any idle state management */ 40 /* The GMU manages SPTP power collapse */ 43 /* The GMU does automatic IFPC (intra-frame power collapse) */ 49 /* For serializing communication with the GMU: */ [all …]
|
| D | a6xx_hfi.c | 26 static int a6xx_hfi_queue_read(struct a6xx_gmu *gmu, in a6xx_hfi_queue_read() argument 42 * If we are to assume that the GMU firmware is in fact a rational actor in a6xx_hfi_queue_read() 57 if (!gmu->legacy) in a6xx_hfi_queue_read() 64 static int a6xx_hfi_queue_write(struct a6xx_gmu *gmu, in a6xx_hfi_queue_write() argument 88 if (!gmu->legacy) { in a6xx_hfi_queue_write() 96 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 0x01); in a6xx_hfi_queue_write() 100 static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum, in a6xx_hfi_wait_for_ack() argument 103 struct a6xx_hfi_queue *queue = &gmu->queues[HFI_RESPONSE_QUEUE]; in a6xx_hfi_wait_for_ack() 108 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val, in a6xx_hfi_wait_for_ack() 112 DRM_DEV_ERROR(gmu->dev, in a6xx_hfi_wait_for_ack() [all …]
|
| D | a6xx_gpu.c | 23 /* Check that the GMU is idle */ in _a6xx_check_idle() 24 if (!adreno_has_gmu_wrapper(adreno_gpu) && !a6xx_gmu_isidle(&a6xx_gpu->gmu)) in _a6xx_check_idle() 186 * For PM4 the GMU register offsets are calculated from the base of the in a6xx_submit() 702 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_set_hwcg() local 725 gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 1, 0); in a6xx_set_hwcg() 732 gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 0, 1); in a6xx_set_hwcg() 1199 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in hw_init() local 1203 /* Make sure the GMU keeps the GPU on while we set it up */ in hw_init() 1204 ret = a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET); in hw_init() 1225 a6xx_sptprac_enable(gmu); in hw_init() [all …]
|
| D | a6xx_gpu.h | 23 struct a6xx_gmu gmu; member 86 int a6xx_gmu_wait_for_idle(struct a6xx_gmu *gmu); 88 bool a6xx_gmu_isidle(struct a6xx_gmu *gmu); 90 int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state); 91 void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state);
|
| D | a6xx_gpu_state.c | 144 if (!a6xx_gmu_sptprac_is_on(&a6xx_gpu->gmu)) in a6xx_crashdumper_run() 775 /* Read a block of GMU registers */ 784 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in _a6xx_get_gmu_registers() local 804 val = gmu_read_rscc(gmu, offset); in _a6xx_get_gmu_registers() 806 val = gmu_read(gmu, offset); in _a6xx_get_gmu_registers() 827 /* Get the CX GMU registers from AHB */ in a6xx_get_gmu_registers() 833 if (!a6xx_gmu_gx_is_on(&a6xx_gpu->gmu)) in a6xx_get_gmu_registers() 871 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_snapshot_gmu_hfi_history() local 874 BUILD_BUG_ON(ARRAY_SIZE(gmu->queues) != ARRAY_SIZE(a6xx_state->hfi_queue_history)); in a6xx_snapshot_gmu_hfi_history() 876 for (i = 0; i < ARRAY_SIZE(gmu->queues); i++) { in a6xx_snapshot_gmu_hfi_history() [all …]
|
| D | a6xx_hfi.h | 49 /* This is the outgoing queue to the GMU */ 52 /* THis is the incoming response queue from the GMU */
|
| /kernel/linux/linux-5.10/drivers/gpu/drm/msm/adreno/ |
| D | a6xx_gmu.c | 17 static void a6xx_gmu_fault(struct a6xx_gmu *gmu) in a6xx_gmu_fault() argument 19 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); in a6xx_gmu_fault() 26 gmu->hung = true; in a6xx_gmu_fault() 37 struct a6xx_gmu *gmu = data; in a6xx_gmu_irq() local 40 status = gmu_read(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_STATUS); in a6xx_gmu_irq() 41 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, status); in a6xx_gmu_irq() 44 dev_err_ratelimited(gmu->dev, "GMU watchdog expired\n"); in a6xx_gmu_irq() 46 a6xx_gmu_fault(gmu); in a6xx_gmu_irq() 50 dev_err_ratelimited(gmu->dev, "GMU AHB bus error\n"); in a6xx_gmu_irq() 53 dev_err_ratelimited(gmu->dev, "GMU fence error: 0x%x\n", in a6xx_gmu_irq() [all …]
|
| D | a6xx_gmu.h | 20 * These define the different GMU wake up options - these define how both the 21 * CPU and the GMU bring up the hardware 24 /* THe GMU has already been booted and the rentention registers are active */ 27 /* the GMU is coming up for the first time or back from a power collapse */ 31 * These define the level of control that the GMU has - the higher the number 32 * the more things that the GMU hardware controls on its own. 35 /* The GMU does not do any idle state management */ 38 /* The GMU manages SPTP power collapse */ 41 /* The GMU does automatic IFPC (intra-frame power collapse) */ 90 static inline u32 gmu_read(struct a6xx_gmu *gmu, u32 offset) in gmu_read() argument [all …]
|
| D | a6xx_hfi.c | 26 static int a6xx_hfi_queue_read(struct a6xx_gmu *gmu, in a6xx_hfi_queue_read() argument 40 * If we are to assume that the GMU firmware is in fact a rational actor in a6xx_hfi_queue_read() 55 if (!gmu->legacy) in a6xx_hfi_queue_read() 62 static int a6xx_hfi_queue_write(struct a6xx_gmu *gmu, in a6xx_hfi_queue_write() argument 84 if (!gmu->legacy) { in a6xx_hfi_queue_write() 92 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 0x01); in a6xx_hfi_queue_write() 96 static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum, in a6xx_hfi_wait_for_ack() argument 99 struct a6xx_hfi_queue *queue = &gmu->queues[HFI_RESPONSE_QUEUE]; in a6xx_hfi_wait_for_ack() 104 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val, in a6xx_hfi_wait_for_ack() 108 DRM_DEV_ERROR(gmu->dev, in a6xx_hfi_wait_for_ack() [all …]
|
| D | a6xx_gpu.h | 33 struct a6xx_gmu gmu; member 75 int a6xx_gmu_wait_for_idle(struct a6xx_gmu *gmu); 77 bool a6xx_gmu_isidle(struct a6xx_gmu *gmu); 79 int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state); 80 void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state);
|
| D | a6xx_gpu.c | 20 /* Check that the GMU is idle */ in _a6xx_check_idle() 21 if (!a6xx_gmu_isidle(&a6xx_gpu->gmu)) in _a6xx_check_idle() 153 * For PM4 the GMU register offsets are calculated from the base of the in a6xx_submit() 431 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_set_hwcg() local 451 gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 1, 0); in a6xx_set_hwcg() 457 gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 0, 1); in a6xx_set_hwcg() 714 /* Make sure the GMU keeps the GPU on while we set it up */ in a6xx_hw_init() 715 a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET); in a6xx_hw_init() 931 * Tell the GMU that we are done touching the GPU and it can start power in a6xx_hw_init() 934 a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET); in a6xx_hw_init() [all …]
|
| D | a6xx_hfi.h | 38 /* This is the outgoing queue to the GMU */ 41 /* THis is the incoming response queue from the GMU */
|
| D | a6xx_gpu_state.c | 136 if (!a6xx_gmu_sptprac_is_on(&a6xx_gpu->gmu)) in a6xx_crashdumper_run() 735 /* Read a block of GMU registers */ 744 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in _a6xx_get_gmu_registers() local 764 val = gmu_read_rscc(gmu, offset); in _a6xx_get_gmu_registers() 766 val = gmu_read(gmu, offset); in _a6xx_get_gmu_registers() 787 /* Get the CX GMU registers from AHB */ in a6xx_get_gmu_registers() 793 if (!a6xx_gmu_gx_is_on(&a6xx_gpu->gmu)) in a6xx_get_gmu_registers() 925 if (!a6xx_gmu_gx_is_on(&a6xx_gpu->gmu)) in a6xx_gpu_state_get() 1179 drm_puts(p, "registers-gmu:\n"); in a6xx_show()
|
| D | a6xx_gpu_state.h | 325 /* GMU GX */ 334 /* GMU CX */ 344 /* GMU AO */
|
| /kernel/linux/linux-6.6/Documentation/devicetree/bindings/display/msm/ |
| D | gmu.yaml | 6 $id: http://devicetree.org/schemas/display/msm/gmu.yaml# 9 title: GMU attached to certain Adreno GPUs 15 These bindings describe the Graphics Management Unit (GMU) that is attached 16 to members of the Adreno A6xx GPU family. The GMU provides on-device power 24 - pattern: '^qcom,adreno-gmu-6[0-9][0-9]\.[0-9]$' 25 - const: qcom,adreno-gmu 26 - const: qcom,adreno-gmu-wrapper 46 - description: GMU HFI interrupt 47 - description: GMU interrupt 52 - const: gmu [all …]
|
| D | gpu.yaml | 111 qcom,gmu: 114 For GMU attached devices a phandle to the GMU device that will 187 - const: gmu 188 description: CX GMU clock 208 then: # Starting with A6xx, the clocks are usually defined in the GMU node 272 // Example a6xx (with GMU): 309 qcom,gmu = <&gmu>;
|
| /kernel/linux/linux-5.10/Documentation/devicetree/bindings/display/msm/ |
| D | gmu.yaml | 6 $id: "http://devicetree.org/schemas/display/msm/gmu.yaml#" 9 title: Devicetree bindings for the GMU attached to certain Adreno GPUs 15 These bindings describe the Graphics Management Unit (GMU) that is attached 16 to members of the Adreno A6xx GPU family. The GMU provides on-device power 24 - qcom,adreno-gmu-630.2 25 - const: qcom,adreno-gmu 29 - description: Core GMU registers 30 - description: GMU PDC registers 31 - description: GMU PDC sequence registers 35 - const: gmu [all …]
|
| D | gpu.txt | 20 For GMU attached devices the GPU clocks are not used and are not required. The 30 - qcom,gmu: For GMU attached devices a phandle to the GMU device that will 89 Example a6xx (with GMU): 103 * controlled entirely by the GMU 143 qcom,gmu = <&gmu>;
|
| /kernel/linux/linux-6.6/arch/arm64/boot/dts/qcom/ |
| D | msm8992.dtsi | 31 gmu-sram@0 {
|
| /kernel/linux/linux-5.10/arch/arm64/boot/dts/qcom/ |
| D | sm8150.dtsi | 628 qcom,gmu = <&gmu>; 670 gmu: gmu@2c6a000 { label 671 compatible="qcom,adreno-gmu-640.1", "qcom,adreno-gmu"; 676 reg-names = "gmu", "gmu_pdc", "gmu_pdc_seq"; 680 interrupt-names = "hfi", "gmu"; 687 clock-names = "ahb", "gmu", "cxo", "axi", "memnoc";
|
| /kernel/linux/linux-5.10/Documentation/devicetree/bindings/sram/ |
| D | qcom,ocmem.yaml | 95 gmu-sram@0 {
|
| /kernel/linux/linux-5.10/drivers/clk/qcom/ |
| D | gdsc.c | 499 * On SDM845+ the GPU GX domain is *almost* entirely controlled by the GMU 504 * the GMU crashes it could leave the GX on. In order to successfully bring back 513 * driver. During power up, nothing will happen from the CPU (and the GMU will
|
| /kernel/linux/linux-6.6/Documentation/devicetree/bindings/sram/ |
| D | qcom,ocmem.yaml | 120 gmu-sram@0 {
|
| /kernel/linux/linux-6.6/drivers/clk/qcom/ |
| D | gdsc.c | 555 * On SDM845+ the GPU GX domain is *almost* entirely controlled by the GMU 560 * the GMU crashes it could leave the GX on. In order to successfully bring back 569 * driver. During power up, nothing will happen from the CPU (and the GMU will
|