Searched refs:gmu (Results 1 – 6 of 6) sorted by relevance
/drivers/gpu/drm/msm/adreno/ |
D | a6xx_gmu.c | 13 static void a6xx_gmu_fault(struct a6xx_gmu *gmu) in a6xx_gmu_fault() argument 15 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); in a6xx_gmu_fault() 22 gmu->hung = true; in a6xx_gmu_fault() 33 struct a6xx_gmu *gmu = data; in a6xx_gmu_irq() local 36 status = gmu_read(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_STATUS); in a6xx_gmu_irq() 37 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, status); in a6xx_gmu_irq() 40 dev_err_ratelimited(gmu->dev, "GMU watchdog expired\n"); in a6xx_gmu_irq() 42 a6xx_gmu_fault(gmu); in a6xx_gmu_irq() 46 dev_err_ratelimited(gmu->dev, "GMU AHB bus error\n"); in a6xx_gmu_irq() 49 dev_err_ratelimited(gmu->dev, "GMU fence error: 0x%x\n", in a6xx_gmu_irq() [all …]
|
D | a6xx_hfi.c | 54 static int a6xx_hfi_queue_write(struct a6xx_gmu *gmu, in a6xx_hfi_queue_write() argument 78 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 0x01); in a6xx_hfi_queue_write() 82 static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum, in a6xx_hfi_wait_for_ack() argument 85 struct a6xx_hfi_queue *queue = &gmu->queues[HFI_RESPONSE_QUEUE]; in a6xx_hfi_wait_for_ack() 90 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val, in a6xx_hfi_wait_for_ack() 94 DRM_DEV_ERROR(gmu->dev, in a6xx_hfi_wait_for_ack() 101 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, in a6xx_hfi_wait_for_ack() 113 DRM_DEV_ERROR(gmu->dev, in a6xx_hfi_wait_for_ack() 123 DRM_DEV_ERROR(gmu->dev, "GMU firmware error %d\n", in a6xx_hfi_wait_for_ack() 129 DRM_DEV_ERROR(gmu->dev, in a6xx_hfi_wait_for_ack() [all …]
|
D | a6xx_gmu.h | 82 static inline u32 gmu_read(struct a6xx_gmu *gmu, u32 offset) in gmu_read() argument 84 return msm_readl(gmu->mmio + (offset << 2)); in gmu_read() 87 static inline void gmu_write(struct a6xx_gmu *gmu, u32 offset, u32 value) in gmu_write() argument 89 return msm_writel(value, gmu->mmio + (offset << 2)); in gmu_write() 92 static inline void gmu_rmw(struct a6xx_gmu *gmu, u32 reg, u32 mask, u32 or) in gmu_rmw() argument 94 u32 val = gmu_read(gmu, reg); in gmu_rmw() 98 gmu_write(gmu, reg, val | or); in gmu_rmw() 101 static inline u64 gmu_read64(struct a6xx_gmu *gmu, u32 lo, u32 hi) in gmu_read64() argument 105 val = (u64) msm_readl(gmu->mmio + (lo << 2)); in gmu_read64() 106 val |= ((u64) msm_readl(gmu->mmio + (hi << 2)) << 32); in gmu_read64() [all …]
|
D | a6xx_gpu.h | 23 struct a6xx_gmu gmu; member 49 int a6xx_gmu_wait_for_idle(struct a6xx_gmu *gmu); 51 bool a6xx_gmu_isidle(struct a6xx_gmu *gmu); 53 int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state); 54 void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state);
|
D | a6xx_gpu.c | 21 if (!a6xx_gmu_isidle(&a6xx_gpu->gmu)) in _a6xx_check_idle() 148 gmu_read64(&a6xx_gpu->gmu, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L, in a6xx_submit() 269 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_set_hwcg() local 280 gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 1, 0); in a6xx_set_hwcg() 287 gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 0, 1); in a6xx_set_hwcg() 382 a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET); in a6xx_hw_init() 553 a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET); in a6xx_hw_init() 556 a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_BOOT_SLUMBER); in a6xx_hw_init() 590 gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 0); in a6xx_recover() 666 gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 1); in a6xx_fault_detect_irq() [all …]
|
D | a6xx_gpu_state.c | 136 if (!a6xx_gmu_sptprac_is_on(&a6xx_gpu->gmu)) in a6xx_crashdumper_run() 724 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in _a6xx_get_gmu_registers() local 740 obj->data[index++] = gmu_read(gmu, in _a6xx_get_gmu_registers() 763 if (!a6xx_gmu_gx_is_on(&a6xx_gpu->gmu)) in a6xx_get_gmu_registers() 884 if (!a6xx_gmu_gx_is_on(&a6xx_gpu->gmu)) in a6xx_gpu_state_get()
|