1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018 The Linux Foundation. All rights reserved. */
3
4 #include "a2xx_gpu.h"
5 #include "msm_gem.h"
6 #include "msm_mmu.h"
7
8 extern bool hang_debug;
9
10 static void a2xx_dump(struct msm_gpu *gpu);
11 static bool a2xx_idle(struct msm_gpu *gpu);
12
a2xx_submit(struct msm_gpu * gpu,struct msm_gem_submit * submit)13 static void a2xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
14 {
15 struct msm_ringbuffer *ring = submit->ring;
16 unsigned int i;
17
18 for (i = 0; i < submit->nr_cmds; i++) {
19 switch (submit->cmd[i].type) {
20 case MSM_SUBMIT_CMD_IB_TARGET_BUF:
21 /* ignore IB-targets */
22 break;
23 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
24 /* ignore if there has not been a ctx switch: */
25 if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
26 break;
27 fallthrough;
28 case MSM_SUBMIT_CMD_BUF:
29 OUT_PKT3(ring, CP_INDIRECT_BUFFER_PFD, 2);
30 OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
31 OUT_RING(ring, submit->cmd[i].size);
32 OUT_PKT2(ring);
33 break;
34 }
35 }
36
37 OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
38 OUT_RING(ring, submit->seqno);
39
40 /* wait for idle before cache flush/interrupt */
41 OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
42 OUT_RING(ring, 0x00000000);
43
44 OUT_PKT3(ring, CP_EVENT_WRITE, 3);
45 OUT_RING(ring, CACHE_FLUSH_TS);
46 OUT_RING(ring, rbmemptr(ring, fence));
47 OUT_RING(ring, submit->seqno);
48 OUT_PKT3(ring, CP_INTERRUPT, 1);
49 OUT_RING(ring, 0x80000000);
50
51 adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR);
52 }
53
a2xx_me_init(struct msm_gpu * gpu)54 static bool a2xx_me_init(struct msm_gpu *gpu)
55 {
56 struct msm_ringbuffer *ring = gpu->rb[0];
57
58 OUT_PKT3(ring, CP_ME_INIT, 18);
59
60 /* All fields present (bits 9:0) */
61 OUT_RING(ring, 0x000003ff);
62 /* Disable/Enable Real-Time Stream processing (present but ignored) */
63 OUT_RING(ring, 0x00000000);
64 /* Enable (2D <-> 3D) implicit synchronization (present but ignored) */
65 OUT_RING(ring, 0x00000000);
66
67 OUT_RING(ring, REG_A2XX_RB_SURFACE_INFO - 0x2000);
68 OUT_RING(ring, REG_A2XX_PA_SC_WINDOW_OFFSET - 0x2000);
69 OUT_RING(ring, REG_A2XX_VGT_MAX_VTX_INDX - 0x2000);
70 OUT_RING(ring, REG_A2XX_SQ_PROGRAM_CNTL - 0x2000);
71 OUT_RING(ring, REG_A2XX_RB_DEPTHCONTROL - 0x2000);
72 OUT_RING(ring, REG_A2XX_PA_SU_POINT_SIZE - 0x2000);
73 OUT_RING(ring, REG_A2XX_PA_SC_LINE_CNTL - 0x2000);
74 OUT_RING(ring, REG_A2XX_PA_SU_POLY_OFFSET_FRONT_SCALE - 0x2000);
75
76 /* Vertex and Pixel Shader Start Addresses in instructions
77 * (3 DWORDS per instruction) */
78 OUT_RING(ring, 0x80000180);
79 /* Maximum Contexts */
80 OUT_RING(ring, 0x00000001);
81 /* Write Confirm Interval and The CP will wait the
82 * wait_interval * 16 clocks between polling */
83 OUT_RING(ring, 0x00000000);
84 /* NQ and External Memory Swap */
85 OUT_RING(ring, 0x00000000);
86 /* protected mode error checking (0x1f2 is REG_AXXX_CP_INT_CNTL) */
87 OUT_RING(ring, 0x200001f2);
88 /* Disable header dumping and Header dump address */
89 OUT_RING(ring, 0x00000000);
90 /* Header dump size */
91 OUT_RING(ring, 0x00000000);
92
93 /* enable protected mode */
94 OUT_PKT3(ring, CP_SET_PROTECTED_MODE, 1);
95 OUT_RING(ring, 1);
96
97 adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR);
98 return a2xx_idle(gpu);
99 }
100
a2xx_hw_init(struct msm_gpu * gpu)101 static int a2xx_hw_init(struct msm_gpu *gpu)
102 {
103 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
104 dma_addr_t pt_base, tran_error;
105 uint32_t *ptr, len;
106 int i, ret;
107
108 msm_gpummu_params(gpu->aspace->mmu, &pt_base, &tran_error);
109
110 DBG("%s", gpu->name);
111
112 /* halt ME to avoid ucode upload issues on a20x */
113 gpu_write(gpu, REG_AXXX_CP_ME_CNTL, AXXX_CP_ME_CNTL_HALT);
114
115 gpu_write(gpu, REG_A2XX_RBBM_PM_OVERRIDE1, 0xfffffffe);
116 gpu_write(gpu, REG_A2XX_RBBM_PM_OVERRIDE2, 0xffffffff);
117
118 /* note: kgsl uses 0x00000001 after first reset on a22x */
119 gpu_write(gpu, REG_A2XX_RBBM_SOFT_RESET, 0xffffffff);
120 msleep(30);
121 gpu_write(gpu, REG_A2XX_RBBM_SOFT_RESET, 0x00000000);
122
123 if (adreno_is_a225(adreno_gpu))
124 gpu_write(gpu, REG_A2XX_SQ_FLOW_CONTROL, 0x18000000);
125
126 /* note: kgsl uses 0x0000ffff for a20x */
127 gpu_write(gpu, REG_A2XX_RBBM_CNTL, 0x00004442);
128
129 /* MPU: physical range */
130 gpu_write(gpu, REG_A2XX_MH_MMU_MPU_BASE, 0x00000000);
131 gpu_write(gpu, REG_A2XX_MH_MMU_MPU_END, 0xfffff000);
132
133 gpu_write(gpu, REG_A2XX_MH_MMU_CONFIG, A2XX_MH_MMU_CONFIG_MMU_ENABLE |
134 A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
135 A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
136 A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
137 A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
138 A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
139 A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
140 A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
141 A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
142 A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
143 A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
144 A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR(BEH_TRAN_RNG));
145
146 /* same as parameters in adreno_gpu */
147 gpu_write(gpu, REG_A2XX_MH_MMU_VA_RANGE, SZ_16M |
148 A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS(0xfff));
149
150 gpu_write(gpu, REG_A2XX_MH_MMU_PT_BASE, pt_base);
151 gpu_write(gpu, REG_A2XX_MH_MMU_TRAN_ERROR, tran_error);
152
153 gpu_write(gpu, REG_A2XX_MH_MMU_INVALIDATE,
154 A2XX_MH_MMU_INVALIDATE_INVALIDATE_ALL |
155 A2XX_MH_MMU_INVALIDATE_INVALIDATE_TC);
156
157 gpu_write(gpu, REG_A2XX_MH_ARBITER_CONFIG,
158 A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT(16) |
159 A2XX_MH_ARBITER_CONFIG_L1_ARB_ENABLE |
160 A2XX_MH_ARBITER_CONFIG_L1_ARB_HOLD_ENABLE |
161 A2XX_MH_ARBITER_CONFIG_PAGE_SIZE(1) |
162 A2XX_MH_ARBITER_CONFIG_TC_REORDER_ENABLE |
163 A2XX_MH_ARBITER_CONFIG_TC_ARB_HOLD_ENABLE |
164 A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT_ENABLE |
165 A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT(8) |
166 A2XX_MH_ARBITER_CONFIG_CP_CLNT_ENABLE |
167 A2XX_MH_ARBITER_CONFIG_VGT_CLNT_ENABLE |
168 A2XX_MH_ARBITER_CONFIG_TC_CLNT_ENABLE |
169 A2XX_MH_ARBITER_CONFIG_RB_CLNT_ENABLE |
170 A2XX_MH_ARBITER_CONFIG_PA_CLNT_ENABLE);
171 if (!adreno_is_a20x(adreno_gpu))
172 gpu_write(gpu, REG_A2XX_MH_CLNT_INTF_CTRL_CONFIG1, 0x00032f07);
173
174 gpu_write(gpu, REG_A2XX_SQ_VS_PROGRAM, 0x00000000);
175 gpu_write(gpu, REG_A2XX_SQ_PS_PROGRAM, 0x00000000);
176
177 gpu_write(gpu, REG_A2XX_RBBM_PM_OVERRIDE1, 0); /* 0x200 for msm8960? */
178 gpu_write(gpu, REG_A2XX_RBBM_PM_OVERRIDE2, 0); /* 0x80/0x1a0 for a22x? */
179
180 /* note: gsl doesn't set this */
181 gpu_write(gpu, REG_A2XX_RBBM_DEBUG, 0x00080000);
182
183 gpu_write(gpu, REG_A2XX_RBBM_INT_CNTL,
184 A2XX_RBBM_INT_CNTL_RDERR_INT_MASK);
185 gpu_write(gpu, REG_AXXX_CP_INT_CNTL,
186 AXXX_CP_INT_CNTL_T0_PACKET_IN_IB_MASK |
187 AXXX_CP_INT_CNTL_OPCODE_ERROR_MASK |
188 AXXX_CP_INT_CNTL_PROTECTED_MODE_ERROR_MASK |
189 AXXX_CP_INT_CNTL_RESERVED_BIT_ERROR_MASK |
190 AXXX_CP_INT_CNTL_IB_ERROR_MASK |
191 AXXX_CP_INT_CNTL_IB1_INT_MASK |
192 AXXX_CP_INT_CNTL_RB_INT_MASK);
193 gpu_write(gpu, REG_A2XX_SQ_INT_CNTL, 0);
194 gpu_write(gpu, REG_A2XX_MH_INTERRUPT_MASK,
195 A2XX_MH_INTERRUPT_MASK_AXI_READ_ERROR |
196 A2XX_MH_INTERRUPT_MASK_AXI_WRITE_ERROR |
197 A2XX_MH_INTERRUPT_MASK_MMU_PAGE_FAULT);
198
199 for (i = 3; i <= 5; i++)
200 if ((SZ_16K << i) == adreno_gpu->gmem)
201 break;
202 gpu_write(gpu, REG_A2XX_RB_EDRAM_INFO, i);
203
204 ret = adreno_hw_init(gpu);
205 if (ret)
206 return ret;
207
208 gpu_write(gpu, REG_AXXX_CP_RB_CNTL,
209 MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
210
211 gpu_write(gpu, REG_AXXX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova));
212
213 /* NOTE: PM4/micro-engine firmware registers look to be the same
214 * for a2xx and a3xx.. we could possibly push that part down to
215 * adreno_gpu base class. Or push both PM4 and PFP but
216 * parameterize the pfp ucode addr/data registers..
217 */
218
219 /* Load PM4: */
220 ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PM4]->data);
221 len = adreno_gpu->fw[ADRENO_FW_PM4]->size / 4;
222 DBG("loading PM4 ucode version: %x", ptr[1]);
223
224 gpu_write(gpu, REG_AXXX_CP_DEBUG,
225 AXXX_CP_DEBUG_MIU_128BIT_WRITE_ENABLE);
226 gpu_write(gpu, REG_AXXX_CP_ME_RAM_WADDR, 0);
227 for (i = 1; i < len; i++)
228 gpu_write(gpu, REG_AXXX_CP_ME_RAM_DATA, ptr[i]);
229
230 /* Load PFP: */
231 ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PFP]->data);
232 len = adreno_gpu->fw[ADRENO_FW_PFP]->size / 4;
233 DBG("loading PFP ucode version: %x", ptr[5]);
234
235 gpu_write(gpu, REG_A2XX_CP_PFP_UCODE_ADDR, 0);
236 for (i = 1; i < len; i++)
237 gpu_write(gpu, REG_A2XX_CP_PFP_UCODE_DATA, ptr[i]);
238
239 gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS, 0x000C0804);
240
241 /* clear ME_HALT to start micro engine */
242 gpu_write(gpu, REG_AXXX_CP_ME_CNTL, 0);
243
244 return a2xx_me_init(gpu) ? 0 : -EINVAL;
245 }
246
a2xx_recover(struct msm_gpu * gpu)247 static void a2xx_recover(struct msm_gpu *gpu)
248 {
249 int i;
250
251 adreno_dump_info(gpu);
252
253 for (i = 0; i < 8; i++) {
254 printk("CP_SCRATCH_REG%d: %u\n", i,
255 gpu_read(gpu, REG_AXXX_CP_SCRATCH_REG0 + i));
256 }
257
258 /* dump registers before resetting gpu, if enabled: */
259 if (hang_debug)
260 a2xx_dump(gpu);
261
262 gpu_write(gpu, REG_A2XX_RBBM_SOFT_RESET, 1);
263 gpu_read(gpu, REG_A2XX_RBBM_SOFT_RESET);
264 gpu_write(gpu, REG_A2XX_RBBM_SOFT_RESET, 0);
265 adreno_recover(gpu);
266 }
267
a2xx_destroy(struct msm_gpu * gpu)268 static void a2xx_destroy(struct msm_gpu *gpu)
269 {
270 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
271 struct a2xx_gpu *a2xx_gpu = to_a2xx_gpu(adreno_gpu);
272
273 DBG("%s", gpu->name);
274
275 adreno_gpu_cleanup(adreno_gpu);
276
277 kfree(a2xx_gpu);
278 }
279
a2xx_idle(struct msm_gpu * gpu)280 static bool a2xx_idle(struct msm_gpu *gpu)
281 {
282 /* wait for ringbuffer to drain: */
283 if (!adreno_idle(gpu, gpu->rb[0]))
284 return false;
285
286 /* then wait for GPU to finish: */
287 if (spin_until(!(gpu_read(gpu, REG_A2XX_RBBM_STATUS) &
288 A2XX_RBBM_STATUS_GUI_ACTIVE))) {
289 DRM_ERROR("%s: timeout waiting for GPU to idle!\n", gpu->name);
290
291 /* TODO maybe we need to reset GPU here to recover from hang? */
292 return false;
293 }
294
295 return true;
296 }
297
a2xx_irq(struct msm_gpu * gpu)298 static irqreturn_t a2xx_irq(struct msm_gpu *gpu)
299 {
300 uint32_t mstatus, status;
301
302 mstatus = gpu_read(gpu, REG_A2XX_MASTER_INT_SIGNAL);
303
304 if (mstatus & A2XX_MASTER_INT_SIGNAL_MH_INT_STAT) {
305 status = gpu_read(gpu, REG_A2XX_MH_INTERRUPT_STATUS);
306
307 dev_warn(gpu->dev->dev, "MH_INT: %08X\n", status);
308 dev_warn(gpu->dev->dev, "MMU_PAGE_FAULT: %08X\n",
309 gpu_read(gpu, REG_A2XX_MH_MMU_PAGE_FAULT));
310
311 gpu_write(gpu, REG_A2XX_MH_INTERRUPT_CLEAR, status);
312 }
313
314 if (mstatus & A2XX_MASTER_INT_SIGNAL_CP_INT_STAT) {
315 status = gpu_read(gpu, REG_AXXX_CP_INT_STATUS);
316
317 /* only RB_INT is expected */
318 if (status & ~AXXX_CP_INT_CNTL_RB_INT_MASK)
319 dev_warn(gpu->dev->dev, "CP_INT: %08X\n", status);
320
321 gpu_write(gpu, REG_AXXX_CP_INT_ACK, status);
322 }
323
324 if (mstatus & A2XX_MASTER_INT_SIGNAL_RBBM_INT_STAT) {
325 status = gpu_read(gpu, REG_A2XX_RBBM_INT_STATUS);
326
327 dev_warn(gpu->dev->dev, "RBBM_INT: %08X\n", status);
328
329 gpu_write(gpu, REG_A2XX_RBBM_INT_ACK, status);
330 }
331
332 msm_gpu_retire(gpu);
333
334 return IRQ_HANDLED;
335 }
336
337 static const unsigned int a200_registers[] = {
338 0x0000, 0x0002, 0x0004, 0x000B, 0x003B, 0x003D, 0x0040, 0x0044,
339 0x0046, 0x0047, 0x01C0, 0x01C1, 0x01C3, 0x01C8, 0x01D5, 0x01D9,
340 0x01DC, 0x01DD, 0x01EA, 0x01EA, 0x01EE, 0x01F3, 0x01F6, 0x01F7,
341 0x01FC, 0x01FF, 0x0391, 0x0392, 0x039B, 0x039E, 0x03B2, 0x03B5,
342 0x03B7, 0x03B7, 0x03F8, 0x03FB, 0x0440, 0x0440, 0x0443, 0x0444,
343 0x044B, 0x044B, 0x044D, 0x044F, 0x0452, 0x0452, 0x0454, 0x045B,
344 0x047F, 0x047F, 0x0578, 0x0587, 0x05C9, 0x05C9, 0x05D0, 0x05D0,
345 0x0601, 0x0604, 0x0606, 0x0609, 0x060B, 0x060E, 0x0613, 0x0614,
346 0x0A29, 0x0A2B, 0x0A2F, 0x0A31, 0x0A40, 0x0A43, 0x0A45, 0x0A45,
347 0x0A4E, 0x0A4F, 0x0C2C, 0x0C2C, 0x0C30, 0x0C30, 0x0C38, 0x0C3C,
348 0x0C40, 0x0C40, 0x0C44, 0x0C44, 0x0C80, 0x0C86, 0x0C88, 0x0C94,
349 0x0C99, 0x0C9A, 0x0CA4, 0x0CA5, 0x0D00, 0x0D03, 0x0D06, 0x0D06,
350 0x0D08, 0x0D0B, 0x0D34, 0x0D35, 0x0DAE, 0x0DC1, 0x0DC8, 0x0DD4,
351 0x0DD8, 0x0DD9, 0x0E00, 0x0E00, 0x0E02, 0x0E04, 0x0E17, 0x0E1E,
352 0x0EC0, 0x0EC9, 0x0ECB, 0x0ECC, 0x0ED0, 0x0ED0, 0x0ED4, 0x0ED7,
353 0x0EE0, 0x0EE2, 0x0F01, 0x0F02, 0x0F0C, 0x0F0C, 0x0F0E, 0x0F12,
354 0x0F26, 0x0F2A, 0x0F2C, 0x0F2C, 0x2000, 0x2002, 0x2006, 0x200F,
355 0x2080, 0x2082, 0x2100, 0x2109, 0x210C, 0x2114, 0x2180, 0x2184,
356 0x21F5, 0x21F7, 0x2200, 0x2208, 0x2280, 0x2283, 0x2293, 0x2294,
357 0x2300, 0x2308, 0x2312, 0x2312, 0x2316, 0x231D, 0x2324, 0x2326,
358 0x2380, 0x2383, 0x2400, 0x2402, 0x2406, 0x240F, 0x2480, 0x2482,
359 0x2500, 0x2509, 0x250C, 0x2514, 0x2580, 0x2584, 0x25F5, 0x25F7,
360 0x2600, 0x2608, 0x2680, 0x2683, 0x2693, 0x2694, 0x2700, 0x2708,
361 0x2712, 0x2712, 0x2716, 0x271D, 0x2724, 0x2726, 0x2780, 0x2783,
362 0x4000, 0x4003, 0x4800, 0x4805, 0x4900, 0x4900, 0x4908, 0x4908,
363 ~0 /* sentinel */
364 };
365
366 static const unsigned int a220_registers[] = {
367 0x0000, 0x0002, 0x0004, 0x000B, 0x003B, 0x003D, 0x0040, 0x0044,
368 0x0046, 0x0047, 0x01C0, 0x01C1, 0x01C3, 0x01C8, 0x01D5, 0x01D9,
369 0x01DC, 0x01DD, 0x01EA, 0x01EA, 0x01EE, 0x01F3, 0x01F6, 0x01F7,
370 0x01FC, 0x01FF, 0x0391, 0x0392, 0x039B, 0x039E, 0x03B2, 0x03B5,
371 0x03B7, 0x03B7, 0x03F8, 0x03FB, 0x0440, 0x0440, 0x0443, 0x0444,
372 0x044B, 0x044B, 0x044D, 0x044F, 0x0452, 0x0452, 0x0454, 0x045B,
373 0x047F, 0x047F, 0x0578, 0x0587, 0x05C9, 0x05C9, 0x05D0, 0x05D0,
374 0x0601, 0x0604, 0x0606, 0x0609, 0x060B, 0x060E, 0x0613, 0x0614,
375 0x0A29, 0x0A2B, 0x0A2F, 0x0A31, 0x0A40, 0x0A40, 0x0A42, 0x0A43,
376 0x0A45, 0x0A45, 0x0A4E, 0x0A4F, 0x0C30, 0x0C30, 0x0C38, 0x0C39,
377 0x0C3C, 0x0C3C, 0x0C80, 0x0C81, 0x0C88, 0x0C93, 0x0D00, 0x0D03,
378 0x0D05, 0x0D06, 0x0D08, 0x0D0B, 0x0D34, 0x0D35, 0x0DAE, 0x0DC1,
379 0x0DC8, 0x0DD4, 0x0DD8, 0x0DD9, 0x0E00, 0x0E00, 0x0E02, 0x0E04,
380 0x0E17, 0x0E1E, 0x0EC0, 0x0EC9, 0x0ECB, 0x0ECC, 0x0ED0, 0x0ED0,
381 0x0ED4, 0x0ED7, 0x0EE0, 0x0EE2, 0x0F01, 0x0F02, 0x2000, 0x2002,
382 0x2006, 0x200F, 0x2080, 0x2082, 0x2100, 0x2102, 0x2104, 0x2109,
383 0x210C, 0x2114, 0x2180, 0x2184, 0x21F5, 0x21F7, 0x2200, 0x2202,
384 0x2204, 0x2204, 0x2208, 0x2208, 0x2280, 0x2282, 0x2294, 0x2294,
385 0x2300, 0x2308, 0x2309, 0x230A, 0x2312, 0x2312, 0x2316, 0x2316,
386 0x2318, 0x231D, 0x2324, 0x2326, 0x2380, 0x2383, 0x2400, 0x2402,
387 0x2406, 0x240F, 0x2480, 0x2482, 0x2500, 0x2502, 0x2504, 0x2509,
388 0x250C, 0x2514, 0x2580, 0x2584, 0x25F5, 0x25F7, 0x2600, 0x2602,
389 0x2604, 0x2606, 0x2608, 0x2608, 0x2680, 0x2682, 0x2694, 0x2694,
390 0x2700, 0x2708, 0x2712, 0x2712, 0x2716, 0x2716, 0x2718, 0x271D,
391 0x2724, 0x2726, 0x2780, 0x2783, 0x4000, 0x4003, 0x4800, 0x4805,
392 0x4900, 0x4900, 0x4908, 0x4908,
393 ~0 /* sentinel */
394 };
395
396 static const unsigned int a225_registers[] = {
397 0x0000, 0x0002, 0x0004, 0x000B, 0x003B, 0x003D, 0x0040, 0x0044,
398 0x0046, 0x0047, 0x013C, 0x013C, 0x0140, 0x014F, 0x01C0, 0x01C1,
399 0x01C3, 0x01C8, 0x01D5, 0x01D9, 0x01DC, 0x01DD, 0x01EA, 0x01EA,
400 0x01EE, 0x01F3, 0x01F6, 0x01F7, 0x01FC, 0x01FF, 0x0391, 0x0392,
401 0x039B, 0x039E, 0x03B2, 0x03B5, 0x03B7, 0x03B7, 0x03F8, 0x03FB,
402 0x0440, 0x0440, 0x0443, 0x0444, 0x044B, 0x044B, 0x044D, 0x044F,
403 0x0452, 0x0452, 0x0454, 0x045B, 0x047F, 0x047F, 0x0578, 0x0587,
404 0x05C9, 0x05C9, 0x05D0, 0x05D0, 0x0601, 0x0604, 0x0606, 0x0609,
405 0x060B, 0x060E, 0x0613, 0x0614, 0x0A29, 0x0A2B, 0x0A2F, 0x0A31,
406 0x0A40, 0x0A40, 0x0A42, 0x0A43, 0x0A45, 0x0A45, 0x0A4E, 0x0A4F,
407 0x0C01, 0x0C1D, 0x0C30, 0x0C30, 0x0C38, 0x0C39, 0x0C3C, 0x0C3C,
408 0x0C80, 0x0C81, 0x0C88, 0x0C93, 0x0D00, 0x0D03, 0x0D05, 0x0D06,
409 0x0D08, 0x0D0B, 0x0D34, 0x0D35, 0x0DAE, 0x0DC1, 0x0DC8, 0x0DD4,
410 0x0DD8, 0x0DD9, 0x0E00, 0x0E00, 0x0E02, 0x0E04, 0x0E17, 0x0E1E,
411 0x0EC0, 0x0EC9, 0x0ECB, 0x0ECC, 0x0ED0, 0x0ED0, 0x0ED4, 0x0ED7,
412 0x0EE0, 0x0EE2, 0x0F01, 0x0F02, 0x2000, 0x200F, 0x2080, 0x2082,
413 0x2100, 0x2109, 0x210C, 0x2114, 0x2180, 0x2184, 0x21F5, 0x21F7,
414 0x2200, 0x2202, 0x2204, 0x2206, 0x2208, 0x2210, 0x2220, 0x2222,
415 0x2280, 0x2282, 0x2294, 0x2294, 0x2297, 0x2297, 0x2300, 0x230A,
416 0x2312, 0x2312, 0x2315, 0x2316, 0x2318, 0x231D, 0x2324, 0x2326,
417 0x2340, 0x2357, 0x2360, 0x2360, 0x2380, 0x2383, 0x2400, 0x240F,
418 0x2480, 0x2482, 0x2500, 0x2509, 0x250C, 0x2514, 0x2580, 0x2584,
419 0x25F5, 0x25F7, 0x2600, 0x2602, 0x2604, 0x2606, 0x2608, 0x2610,
420 0x2620, 0x2622, 0x2680, 0x2682, 0x2694, 0x2694, 0x2697, 0x2697,
421 0x2700, 0x270A, 0x2712, 0x2712, 0x2715, 0x2716, 0x2718, 0x271D,
422 0x2724, 0x2726, 0x2740, 0x2757, 0x2760, 0x2760, 0x2780, 0x2783,
423 0x4000, 0x4003, 0x4800, 0x4806, 0x4808, 0x4808, 0x4900, 0x4900,
424 0x4908, 0x4908,
425 ~0 /* sentinel */
426 };
427
428 /* would be nice to not have to duplicate the _show() stuff with printk(): */
a2xx_dump(struct msm_gpu * gpu)429 static void a2xx_dump(struct msm_gpu *gpu)
430 {
431 printk("status: %08x\n",
432 gpu_read(gpu, REG_A2XX_RBBM_STATUS));
433 adreno_dump(gpu);
434 }
435
a2xx_gpu_state_get(struct msm_gpu * gpu)436 static struct msm_gpu_state *a2xx_gpu_state_get(struct msm_gpu *gpu)
437 {
438 struct msm_gpu_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
439
440 if (!state)
441 return ERR_PTR(-ENOMEM);
442
443 adreno_gpu_state_get(gpu, state);
444
445 state->rbbm_status = gpu_read(gpu, REG_A2XX_RBBM_STATUS);
446
447 return state;
448 }
449
450 static struct msm_gem_address_space *
a2xx_create_address_space(struct msm_gpu * gpu,struct platform_device * pdev)451 a2xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev)
452 {
453 struct msm_mmu *mmu = msm_gpummu_new(&pdev->dev, gpu);
454 struct msm_gem_address_space *aspace;
455
456 aspace = msm_gem_address_space_create(mmu, "gpu", SZ_16M,
457 0xfff * SZ_64K);
458
459 if (IS_ERR(aspace) && !IS_ERR(mmu))
460 mmu->funcs->destroy(mmu);
461
462 return aspace;
463 }
464
a2xx_get_rptr(struct msm_gpu * gpu,struct msm_ringbuffer * ring)465 static u32 a2xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
466 {
467 ring->memptrs->rptr = gpu_read(gpu, REG_AXXX_CP_RB_RPTR);
468 return ring->memptrs->rptr;
469 }
470
471 static const struct adreno_gpu_funcs funcs = {
472 .base = {
473 .get_param = adreno_get_param,
474 .set_param = adreno_set_param,
475 .hw_init = a2xx_hw_init,
476 .pm_suspend = msm_gpu_pm_suspend,
477 .pm_resume = msm_gpu_pm_resume,
478 .recover = a2xx_recover,
479 .submit = a2xx_submit,
480 .active_ring = adreno_active_ring,
481 .irq = a2xx_irq,
482 .destroy = a2xx_destroy,
483 #if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
484 .show = adreno_show,
485 #endif
486 .gpu_state_get = a2xx_gpu_state_get,
487 .gpu_state_put = adreno_gpu_state_put,
488 .create_address_space = a2xx_create_address_space,
489 .get_rptr = a2xx_get_rptr,
490 },
491 };
492
493 static const struct msm_gpu_perfcntr perfcntrs[] = {
494 /* TODO */
495 };
496
a2xx_gpu_init(struct drm_device * dev)497 struct msm_gpu *a2xx_gpu_init(struct drm_device *dev)
498 {
499 struct a2xx_gpu *a2xx_gpu = NULL;
500 struct adreno_gpu *adreno_gpu;
501 struct msm_gpu *gpu;
502 struct msm_drm_private *priv = dev->dev_private;
503 struct platform_device *pdev = priv->gpu_pdev;
504 int ret;
505
506 if (!pdev) {
507 dev_err(dev->dev, "no a2xx device\n");
508 ret = -ENXIO;
509 goto fail;
510 }
511
512 a2xx_gpu = kzalloc(sizeof(*a2xx_gpu), GFP_KERNEL);
513 if (!a2xx_gpu) {
514 ret = -ENOMEM;
515 goto fail;
516 }
517
518 adreno_gpu = &a2xx_gpu->base;
519 gpu = &adreno_gpu->base;
520
521 gpu->perfcntrs = perfcntrs;
522 gpu->num_perfcntrs = ARRAY_SIZE(perfcntrs);
523
524 ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
525 if (ret)
526 goto fail;
527
528 if (adreno_is_a20x(adreno_gpu))
529 adreno_gpu->registers = a200_registers;
530 else if (adreno_is_a225(adreno_gpu))
531 adreno_gpu->registers = a225_registers;
532 else
533 adreno_gpu->registers = a220_registers;
534
535 if (!gpu->aspace) {
536 dev_err(dev->dev, "No memory protection without MMU\n");
537 if (!allow_vram_carveout) {
538 ret = -ENXIO;
539 goto fail;
540 }
541 }
542
543 return gpu;
544
545 fail:
546 if (a2xx_gpu)
547 a2xx_destroy(&a2xx_gpu->base.base);
548
549 return ERR_PTR(ret);
550 }
551