| /kernel/linux/linux-6.6/drivers/gpu/drm/etnaviv/ |
| D | etnaviv_gpu.c | 31 { .name = "etnaviv-gpu,2d" }, 39 int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value) in etnaviv_gpu_get_param() argument 41 struct etnaviv_drm_private *priv = gpu->drm->dev_private; in etnaviv_gpu_get_param() 45 *value = gpu->identity.model; in etnaviv_gpu_get_param() 49 *value = gpu->identity.revision; in etnaviv_gpu_get_param() 53 *value = gpu->identity.features; in etnaviv_gpu_get_param() 57 *value = gpu->identity.minor_features0; in etnaviv_gpu_get_param() 61 *value = gpu->identity.minor_features1; in etnaviv_gpu_get_param() 65 *value = gpu->identity.minor_features2; in etnaviv_gpu_get_param() 69 *value = gpu->identity.minor_features3; in etnaviv_gpu_get_param() [all …]
|
| /kernel/linux/linux-5.10/drivers/gpu/drm/etnaviv/ |
| D | etnaviv_gpu.c | 35 { .name = "etnaviv-gpu,2d" }, 43 int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value) in etnaviv_gpu_get_param() argument 45 struct etnaviv_drm_private *priv = gpu->drm->dev_private; in etnaviv_gpu_get_param() 49 *value = gpu->identity.model; in etnaviv_gpu_get_param() 53 *value = gpu->identity.revision; in etnaviv_gpu_get_param() 57 *value = gpu->identity.features; in etnaviv_gpu_get_param() 61 *value = gpu->identity.minor_features0; in etnaviv_gpu_get_param() 65 *value = gpu->identity.minor_features1; in etnaviv_gpu_get_param() 69 *value = gpu->identity.minor_features2; in etnaviv_gpu_get_param() 73 *value = gpu->identity.minor_features3; in etnaviv_gpu_get_param() [all …]
|
| /kernel/linux/linux-6.6/drivers/gpu/drm/msm/ |
| D | msm_gpu.c | 25 static int enable_pwrrail(struct msm_gpu *gpu) in enable_pwrrail() argument 27 struct drm_device *dev = gpu->dev; in enable_pwrrail() 30 if (gpu->gpu_reg) { in enable_pwrrail() 31 ret = regulator_enable(gpu->gpu_reg); in enable_pwrrail() 38 if (gpu->gpu_cx) { in enable_pwrrail() 39 ret = regulator_enable(gpu->gpu_cx); in enable_pwrrail() 49 static int disable_pwrrail(struct msm_gpu *gpu) in disable_pwrrail() argument 51 if (gpu->gpu_cx) in disable_pwrrail() 52 regulator_disable(gpu->gpu_cx); in disable_pwrrail() 53 if (gpu->gpu_reg) in disable_pwrrail() [all …]
|
| D | msm_gpu_devfreq.c | 22 struct msm_gpu *gpu = dev_to_gpu(dev); in msm_devfreq_target() local 23 struct msm_gpu_devfreq *df = &gpu->devfreq; in msm_devfreq_target() 37 * If the GPU is idle, devfreq is not aware, so just stash in msm_devfreq_target() 46 if (gpu->funcs->gpu_set_freq) { in msm_devfreq_target() 48 gpu->funcs->gpu_set_freq(gpu, opp, df->suspended); in msm_devfreq_target() 59 static unsigned long get_freq(struct msm_gpu *gpu) in get_freq() argument 61 struct msm_gpu_devfreq *df = &gpu->devfreq; in get_freq() 64 * If the GPU is idle, use the shadow/saved freq to avoid in get_freq() 71 if (gpu->funcs->gpu_get_freq) in get_freq() 72 return gpu->funcs->gpu_get_freq(gpu); in get_freq() [all …]
|
| D | msm_gpu.h | 47 int (*get_param)(struct msm_gpu *gpu, struct msm_file_private *ctx, 49 int (*set_param)(struct msm_gpu *gpu, struct msm_file_private *ctx, 51 int (*hw_init)(struct msm_gpu *gpu); 56 int (*ucode_load)(struct msm_gpu *gpu); 58 int (*pm_suspend)(struct msm_gpu *gpu); 59 int (*pm_resume)(struct msm_gpu *gpu); 60 void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit); 61 void (*flush)(struct msm_gpu *gpu, struct msm_ringbuffer *ring); 63 struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu); 64 void (*recover)(struct msm_gpu *gpu); [all …]
|
| /kernel/linux/linux-5.10/drivers/gpu/drm/msm/ |
| D | msm_gpu.c | 27 struct msm_gpu *gpu = dev_to_gpu(dev); in msm_devfreq_target() local 37 if (gpu->funcs->gpu_set_freq) in msm_devfreq_target() 38 gpu->funcs->gpu_set_freq(gpu, opp); in msm_devfreq_target() 40 clk_set_rate(gpu->core_clk, *freq); in msm_devfreq_target() 50 struct msm_gpu *gpu = dev_to_gpu(dev); in msm_devfreq_get_dev_status() local 53 if (gpu->funcs->gpu_get_freq) in msm_devfreq_get_dev_status() 54 status->current_frequency = gpu->funcs->gpu_get_freq(gpu); in msm_devfreq_get_dev_status() 56 status->current_frequency = clk_get_rate(gpu->core_clk); in msm_devfreq_get_dev_status() 58 status->busy_time = gpu->funcs->gpu_busy(gpu); in msm_devfreq_get_dev_status() 61 status->total_time = ktime_us_delta(time, gpu->devfreq.time); in msm_devfreq_get_dev_status() [all …]
|
| D | msm_gpu.h | 45 int (*get_param)(struct msm_gpu *gpu, uint32_t param, uint64_t *value); 46 int (*hw_init)(struct msm_gpu *gpu); 47 int (*pm_suspend)(struct msm_gpu *gpu); 48 int (*pm_resume)(struct msm_gpu *gpu); 49 void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit); 50 void (*flush)(struct msm_gpu *gpu, struct msm_ringbuffer *ring); 52 struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu); 53 void (*recover)(struct msm_gpu *gpu); 54 void (*destroy)(struct msm_gpu *gpu); 56 /* show GPU status in debugfs: */ [all …]
|
| /kernel/linux/linux-5.10/drivers/gpu/drm/msm/adreno/ |
| D | a3xx_gpu.c | 28 static void a3xx_dump(struct msm_gpu *gpu); 29 static bool a3xx_idle(struct msm_gpu *gpu); 31 static void a3xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) in a3xx_submit() argument 33 struct msm_drm_private *priv = gpu->dev->dev_private; in a3xx_submit() 70 /* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */ in a3xx_submit() 83 adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR); in a3xx_submit() 86 static bool a3xx_me_init(struct msm_gpu *gpu) in a3xx_me_init() argument 88 struct msm_ringbuffer *ring = gpu->rb[0]; in a3xx_me_init() 109 adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR); in a3xx_me_init() 110 return a3xx_idle(gpu); in a3xx_me_init() [all …]
|
| D | a5xx_gpu.c | 17 static void a5xx_dump(struct msm_gpu *gpu); 21 void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, in a5xx_flush() argument 24 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a5xx_flush() 54 gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr); in a5xx_flush() 57 static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit) in a5xx_submit_in_rb() argument 59 struct msm_drm_private *priv = gpu->dev->dev_private; in a5xx_submit_in_rb() 104 a5xx_flush(gpu, ring, true); in a5xx_submit_in_rb() 105 a5xx_preempt_trigger(gpu); in a5xx_submit_in_rb() 111 a5xx_idle(gpu, ring); in a5xx_submit_in_rb() 113 msm_gpu_retire(gpu); in a5xx_submit_in_rb() [all …]
|
| D | a6xx_gpu.c | 15 static inline bool _a6xx_check_idle(struct msm_gpu *gpu) in _a6xx_check_idle() argument 17 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in _a6xx_check_idle() 25 if (gpu_read(gpu, REG_A6XX_RBBM_STATUS) & in _a6xx_check_idle() 29 return !(gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS) & in _a6xx_check_idle() 33 bool a6xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring) in a6xx_idle() argument 36 if (!adreno_idle(gpu, ring)) in a6xx_idle() 39 if (spin_until(_a6xx_check_idle(gpu))) { in a6xx_idle() 40 DRM_ERROR("%s: %ps: timeout waiting for GPU to idle: status %8.8X irq %8.8X rptr/wptr %d/%d\n", in a6xx_idle() 41 gpu->name, __builtin_return_address(0), in a6xx_idle() 42 gpu_read(gpu, REG_A6XX_RBBM_STATUS), in a6xx_idle() [all …]
|
| D | a4xx_gpu.c | 22 static void a4xx_dump(struct msm_gpu *gpu); 23 static bool a4xx_idle(struct msm_gpu *gpu); 25 static void a4xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) in a4xx_submit() argument 27 struct msm_drm_private *priv = gpu->dev->dev_private; in a4xx_submit() 64 /* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */ in a4xx_submit() 70 adreno_flush(gpu, ring, REG_A4XX_CP_RB_WPTR); in a4xx_submit() 77 static void a4xx_enable_hwcg(struct msm_gpu *gpu) in a4xx_enable_hwcg() argument 79 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a4xx_enable_hwcg() 82 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_TP(i), 0x02222202); in a4xx_enable_hwcg() 84 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_TP(i), 0x00002222); in a4xx_enable_hwcg() [all …]
|
| D | a5xx_power.c | 103 static inline uint32_t _get_mvolts(struct msm_gpu *gpu, uint32_t freq) in _get_mvolts() argument 105 struct drm_device *dev = gpu->dev; in _get_mvolts() 122 static void a530_lm_setup(struct msm_gpu *gpu) in a530_lm_setup() argument 124 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a530_lm_setup() 130 gpu_write(gpu, a5xx_sequence_regs[i].reg, in a530_lm_setup() 133 /* Hard code the A530 GPU thermal sensor ID for the GPMU */ in a530_lm_setup() 134 gpu_write(gpu, REG_A5XX_GPMU_TEMP_SENSOR_ID, 0x60007); in a530_lm_setup() 135 gpu_write(gpu, REG_A5XX_GPMU_DELTA_TEMP_THRESHOLD, 0x01); in a530_lm_setup() 136 gpu_write(gpu, REG_A5XX_GPMU_TEMP_SENSOR_CONFIG, 0x01); in a530_lm_setup() 139 gpu_write(gpu, REG_A5XX_GPMU_GPMU_VOLTAGE, 0x80000000 | 0); in a530_lm_setup() [all …]
|
| D | adreno_gpu.h | 47 int (*get_timestamp)(struct msm_gpu *gpu, uint64_t *value); 85 * of gpu firmware to linux-firmware, the fw files were 109 * GPU specific offsets will be exported by GPU specific 141 static inline bool adreno_is_a2xx(struct adreno_gpu *gpu) in adreno_is_a2xx() argument 143 return (gpu->revn < 300); in adreno_is_a2xx() 146 static inline bool adreno_is_a20x(struct adreno_gpu *gpu) in adreno_is_a20x() argument 148 return (gpu->revn < 210); in adreno_is_a20x() 151 static inline bool adreno_is_a225(struct adreno_gpu *gpu) in adreno_is_a225() argument 153 return gpu->revn == 225; in adreno_is_a225() 156 static inline bool adreno_is_a305(struct adreno_gpu *gpu) in adreno_is_a305() argument [all …]
|
| D | a5xx_preempt.c | 25 static inline void set_preempt_state(struct a5xx_gpu *gpu, in set_preempt_state() argument 34 atomic_set(&gpu->preempt_state, new); in set_preempt_state() 40 static inline void update_wptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) in update_wptr() argument 52 gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr); in update_wptr() 56 static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu) in get_next_ring() argument 61 for (i = 0; i < gpu->nr_rings; i++) { in get_next_ring() 63 struct msm_ringbuffer *ring = gpu->rb[i]; in get_next_ring() 66 empty = (get_wptr(ring) == gpu->funcs->get_rptr(gpu, ring)); in get_next_ring() 79 struct msm_gpu *gpu = &a5xx_gpu->base.base; in a5xx_preempt_timer() local 80 struct drm_device *dev = gpu->dev; in a5xx_preempt_timer() [all …]
|
| /kernel/linux/linux-6.6/drivers/gpu/drm/msm/adreno/ |
| D | a3xx_gpu.c | 28 static void a3xx_dump(struct msm_gpu *gpu); 29 static bool a3xx_idle(struct msm_gpu *gpu); 31 static void a3xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) in a3xx_submit() argument 43 if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno) in a3xx_submit() 69 /* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */ in a3xx_submit() 82 adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR); in a3xx_submit() 85 static bool a3xx_me_init(struct msm_gpu *gpu) in a3xx_me_init() argument 87 struct msm_ringbuffer *ring = gpu->rb[0]; in a3xx_me_init() 108 adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR); in a3xx_me_init() 109 return a3xx_idle(gpu); in a3xx_me_init() [all …]
|
| D | a5xx_gpu.c | 17 static void a5xx_dump(struct msm_gpu *gpu); 21 static void update_shadow_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) in update_shadow_rptr() argument 23 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in update_shadow_rptr() 33 void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, in a5xx_flush() argument 36 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a5xx_flush() 46 update_shadow_rptr(gpu, ring); in a5xx_flush() 63 gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr); in a5xx_flush() 66 static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit) in a5xx_submit_in_rb() argument 68 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a5xx_submit_in_rb() 80 if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno) in a5xx_submit_in_rb() [all …]
|
| D | adreno_gpu.h | 36 * so it helps to be able to group the GPU devices by generation and if 69 int (*get_timestamp)(struct msm_gpu *gpu, uint64_t *value); 144 * of gpu firmware to linux-firmware, the fw files were 168 * GPU specific offsets will be exported by GPU specific 202 static inline uint8_t adreno_patchid(const struct adreno_gpu *gpu) in adreno_patchid() argument 208 WARN_ON_ONCE(gpu->info->family >= ADRENO_6XX_GEN1); in adreno_patchid() 209 return gpu->chip_id & 0xff; in adreno_patchid() 212 static inline bool adreno_is_revn(const struct adreno_gpu *gpu, uint32_t revn) in adreno_is_revn() argument 214 if (WARN_ON_ONCE(!gpu->info)) in adreno_is_revn() 216 return gpu->info->revn == revn; in adreno_is_revn() [all …]
|
| D | a4xx_gpu.c | 22 static void a4xx_dump(struct msm_gpu *gpu); 23 static bool a4xx_idle(struct msm_gpu *gpu); 25 static void a4xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) in a4xx_submit() argument 37 if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno) in a4xx_submit() 63 /* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */ in a4xx_submit() 69 adreno_flush(gpu, ring, REG_A4XX_CP_RB_WPTR); in a4xx_submit() 76 static void a4xx_enable_hwcg(struct msm_gpu *gpu) in a4xx_enable_hwcg() argument 78 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a4xx_enable_hwcg() 81 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_TP(i), 0x02222202); in a4xx_enable_hwcg() 83 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_TP(i), 0x00002222); in a4xx_enable_hwcg() [all …]
|
| D | a5xx_power.c | 103 static inline uint32_t _get_mvolts(struct msm_gpu *gpu, uint32_t freq) in _get_mvolts() argument 105 struct drm_device *dev = gpu->dev; in _get_mvolts() 122 static void a530_lm_setup(struct msm_gpu *gpu) in a530_lm_setup() argument 124 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a530_lm_setup() 130 gpu_write(gpu, a5xx_sequence_regs[i].reg, in a530_lm_setup() 133 /* Hard code the A530 GPU thermal sensor ID for the GPMU */ in a530_lm_setup() 134 gpu_write(gpu, REG_A5XX_GPMU_TEMP_SENSOR_ID, 0x60007); in a530_lm_setup() 135 gpu_write(gpu, REG_A5XX_GPMU_DELTA_TEMP_THRESHOLD, 0x01); in a530_lm_setup() 136 gpu_write(gpu, REG_A5XX_GPMU_TEMP_SENSOR_CONFIG, 0x01); in a530_lm_setup() 139 gpu_write(gpu, REG_A5XX_GPMU_GPMU_VOLTAGE, 0x80000000 | 0); in a530_lm_setup() [all …]
|
| D | a6xx_gpu.c | 18 static inline bool _a6xx_check_idle(struct msm_gpu *gpu) in _a6xx_check_idle() argument 20 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in _a6xx_check_idle() 28 if (gpu_read(gpu, REG_A6XX_RBBM_STATUS) & in _a6xx_check_idle() 32 return !(gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS) & in _a6xx_check_idle() 36 static bool a6xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring) in a6xx_idle() argument 39 if (!adreno_idle(gpu, ring)) in a6xx_idle() 42 if (spin_until(_a6xx_check_idle(gpu))) { in a6xx_idle() 43 DRM_ERROR("%s: %ps: timeout waiting for GPU to idle: status %8.8X irq %8.8X rptr/wptr %d/%d\n", in a6xx_idle() 44 gpu->name, __builtin_return_address(0), in a6xx_idle() 45 gpu_read(gpu, REG_A6XX_RBBM_STATUS), in a6xx_idle() [all …]
|
| D | a5xx_preempt.c | 25 static inline void set_preempt_state(struct a5xx_gpu *gpu, in set_preempt_state() argument 34 atomic_set(&gpu->preempt_state, new); in set_preempt_state() 40 static inline void update_wptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) in update_wptr() argument 52 gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr); in update_wptr() 56 static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu) in get_next_ring() argument 58 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in get_next_ring() 63 for (i = 0; i < gpu->nr_rings; i++) { in get_next_ring() 65 struct msm_ringbuffer *ring = gpu->rb[i]; in get_next_ring() 68 empty = (get_wptr(ring) == gpu->funcs->get_rptr(gpu, ring)); in get_next_ring() 83 struct msm_gpu *gpu = &a5xx_gpu->base.base; in a5xx_preempt_timer() local [all …]
|
| D | a2xx_gpu.c | 10 static void a2xx_dump(struct msm_gpu *gpu); 11 static bool a2xx_idle(struct msm_gpu *gpu); 13 static void a2xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) in a2xx_submit() argument 25 if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno) in a2xx_submit() 51 adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR); in a2xx_submit() 54 static bool a2xx_me_init(struct msm_gpu *gpu) in a2xx_me_init() argument 56 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a2xx_me_init() 58 struct msm_ringbuffer *ring = gpu->rb[0]; in a2xx_me_init() 104 adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR); in a2xx_me_init() 105 return a2xx_idle(gpu); in a2xx_me_init() [all …]
|
| /kernel/linux/linux-6.6/drivers/gpu/drm/ |
| D | Kconfig | 188 source "drivers/gpu/drm/display/Kconfig" 194 GPU memory management subsystem for devices with multiple 195 GPU memory types. Will be enabled automatically if a device driver 207 Enables unit tests for TTM, a GPU memory manager subsystem used 260 source "drivers/gpu/drm/i2c/Kconfig" 262 source "drivers/gpu/drm/arm/Kconfig" 264 source "drivers/gpu/drm/radeon/Kconfig" 266 source "drivers/gpu/drm/amd/amdgpu/Kconfig" 268 source "drivers/gpu/drm/nouveau/Kconfig" 270 source "drivers/gpu/drm/i915/Kconfig" [all …]
|
| /kernel/linux/linux-5.10/Documentation/gpu/ |
| D | i915.rst | 19 .. kernel-doc:: drivers/gpu/drm/i915/intel_runtime_pm.c 22 .. kernel-doc:: drivers/gpu/drm/i915/intel_runtime_pm.c 25 .. kernel-doc:: drivers/gpu/drm/i915/intel_uncore.c 31 .. kernel-doc:: drivers/gpu/drm/i915/i915_irq.c 34 .. kernel-doc:: drivers/gpu/drm/i915/i915_irq.c 37 .. kernel-doc:: drivers/gpu/drm/i915/i915_irq.c 40 .. kernel-doc:: drivers/gpu/drm/i915/i915_irq.c 46 .. kernel-doc:: drivers/gpu/drm/i915/i915_vgpu.c 49 .. kernel-doc:: drivers/gpu/drm/i915/i915_vgpu.c 55 .. kernel-doc:: drivers/gpu/drm/i915/intel_gvt.c [all …]
|
| /kernel/linux/linux-6.6/Documentation/gpu/ |
| D | i915.rst | 19 .. kernel-doc:: drivers/gpu/drm/i915/intel_runtime_pm.c 22 .. kernel-doc:: drivers/gpu/drm/i915/intel_runtime_pm.c 25 .. kernel-doc:: drivers/gpu/drm/i915/intel_uncore.c 31 .. kernel-doc:: drivers/gpu/drm/i915/i915_irq.c 34 .. kernel-doc:: drivers/gpu/drm/i915/i915_irq.c 37 .. kernel-doc:: drivers/gpu/drm/i915/i915_irq.c 40 .. kernel-doc:: drivers/gpu/drm/i915/i915_irq.c 46 .. kernel-doc:: drivers/gpu/drm/i915/i915_vgpu.c 49 .. kernel-doc:: drivers/gpu/drm/i915/i915_vgpu.c 55 .. kernel-doc:: drivers/gpu/drm/i915/intel_gvt.c [all …]
|