Searched refs:gmu (Results 1 – 6 of 6) sorted by relevance
/drivers/gpu/drm/msm/adreno/ |
D | a6xx_gmu.c | 17 static void a6xx_gmu_fault(struct a6xx_gmu *gmu) in a6xx_gmu_fault() argument 19 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); in a6xx_gmu_fault() 24 gmu->hung = true; in a6xx_gmu_fault() 35 struct a6xx_gmu *gmu = data; in a6xx_gmu_irq() local 38 status = gmu_read(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_STATUS); in a6xx_gmu_irq() 39 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, status); in a6xx_gmu_irq() 42 dev_err_ratelimited(gmu->dev, "GMU watchdog expired\n"); in a6xx_gmu_irq() 44 a6xx_gmu_fault(gmu); in a6xx_gmu_irq() 48 dev_err_ratelimited(gmu->dev, "GMU AHB bus error\n"); in a6xx_gmu_irq() 51 dev_err_ratelimited(gmu->dev, "GMU fence error: 0x%x\n", in a6xx_gmu_irq() [all …]
|
D | a6xx_hfi.c | 26 static int a6xx_hfi_queue_read(struct a6xx_gmu *gmu, in a6xx_hfi_queue_read() argument 55 if (!gmu->legacy) in a6xx_hfi_queue_read() 62 static int a6xx_hfi_queue_write(struct a6xx_gmu *gmu, in a6xx_hfi_queue_write() argument 84 if (!gmu->legacy) { in a6xx_hfi_queue_write() 92 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 0x01); in a6xx_hfi_queue_write() 96 static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum, in a6xx_hfi_wait_for_ack() argument 99 struct a6xx_hfi_queue *queue = &gmu->queues[HFI_RESPONSE_QUEUE]; in a6xx_hfi_wait_for_ack() 104 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val, in a6xx_hfi_wait_for_ack() 108 DRM_DEV_ERROR(gmu->dev, in a6xx_hfi_wait_for_ack() 115 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, in a6xx_hfi_wait_for_ack() [all …]
|
D | a6xx_gmu.h | 94 static inline u32 gmu_read(struct a6xx_gmu *gmu, u32 offset) in gmu_read() argument 96 return msm_readl(gmu->mmio + (offset << 2)); in gmu_read() 99 static inline void gmu_write(struct a6xx_gmu *gmu, u32 offset, u32 value) in gmu_write() argument 101 return msm_writel(value, gmu->mmio + (offset << 2)); in gmu_write() 105 gmu_write_bulk(struct a6xx_gmu *gmu, u32 offset, const u32 *data, u32 size) in gmu_write_bulk() argument 107 memcpy_toio(gmu->mmio + (offset << 2), data, size); in gmu_write_bulk() 111 static inline void gmu_rmw(struct a6xx_gmu *gmu, u32 reg, u32 mask, u32 or) in gmu_rmw() argument 113 u32 val = gmu_read(gmu, reg); in gmu_rmw() 117 gmu_write(gmu, reg, val | or); in gmu_rmw() 120 static inline u64 gmu_read64(struct a6xx_gmu *gmu, u32 lo, u32 hi) in gmu_read64() argument [all …]
|
D | a6xx_gpu.h | 33 struct a6xx_gmu gmu; member 80 int a6xx_gmu_wait_for_idle(struct a6xx_gmu *gmu); 82 bool a6xx_gmu_isidle(struct a6xx_gmu *gmu); 84 int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state); 85 void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state);
|
D | a6xx_gpu.c | 24 if (!a6xx_gmu_isidle(&a6xx_gpu->gmu)) in _a6xx_check_idle() 502 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_set_hwcg() local 522 gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 1, 0); in a6xx_set_hwcg() 528 gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 0, 1); in a6xx_set_hwcg() 895 a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET); in hw_init() 1132 a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET); in hw_init() 1134 if (a6xx_gpu->gmu.legacy) { in hw_init() 1136 a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_BOOT_SLUMBER); in hw_init() 1148 mutex_lock(&a6xx_gpu->gmu.lock); in a6xx_hw_init() 1150 mutex_unlock(&a6xx_gpu->gmu.lock); in a6xx_hw_init() [all …]
|
D | a6xx_gpu_state.c | 136 if (!a6xx_gmu_sptprac_is_on(&a6xx_gpu->gmu)) in a6xx_crashdumper_run() 744 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in _a6xx_get_gmu_registers() local 764 val = gmu_read_rscc(gmu, offset); in _a6xx_get_gmu_registers() 766 val = gmu_read(gmu, offset); in _a6xx_get_gmu_registers() 793 if (!a6xx_gmu_gx_is_on(&a6xx_gpu->gmu)) in a6xx_get_gmu_registers() 941 if (!a6xx_gmu_gx_is_on(&a6xx_gpu->gmu)) in a6xx_gpu_state_get()
|