1 #ifndef __NVKM_PMU_MEMX_H__
2 #define __NVKM_PMU_MEMX_H__
3 #include "priv.h"
4
5 struct nvkm_memx {
6 struct nvkm_pmu *pmu;
7 u32 base;
8 u32 size;
9 struct {
10 u32 mthd;
11 u32 size;
12 u32 data[64];
13 } c;
14 };
15
16 static void
memx_out(struct nvkm_memx * memx)17 memx_out(struct nvkm_memx *memx)
18 {
19 struct nvkm_device *device = memx->pmu->subdev.device;
20 int i;
21
22 if (memx->c.mthd) {
23 nvkm_wr32(device, 0x10a1c4, (memx->c.size << 16) | memx->c.mthd);
24 for (i = 0; i < memx->c.size; i++)
25 nvkm_wr32(device, 0x10a1c4, memx->c.data[i]);
26 memx->c.mthd = 0;
27 memx->c.size = 0;
28 }
29 }
30
31 static void
memx_cmd(struct nvkm_memx * memx,u32 mthd,u32 size,u32 data[])32 memx_cmd(struct nvkm_memx *memx, u32 mthd, u32 size, u32 data[])
33 {
34 if ((memx->c.size + size >= ARRAY_SIZE(memx->c.data)) ||
35 (memx->c.mthd && memx->c.mthd != mthd))
36 memx_out(memx);
37 memcpy(&memx->c.data[memx->c.size], data, size * sizeof(data[0]));
38 memx->c.size += size;
39 memx->c.mthd = mthd;
40 }
41
42 int
nvkm_memx_init(struct nvkm_pmu * pmu,struct nvkm_memx ** pmemx)43 nvkm_memx_init(struct nvkm_pmu *pmu, struct nvkm_memx **pmemx)
44 {
45 struct nvkm_device *device = pmu->subdev.device;
46 struct nvkm_memx *memx;
47 u32 reply[2];
48 int ret;
49
50 ret = nvkm_pmu_send(pmu, reply, PROC_MEMX, MEMX_MSG_INFO,
51 MEMX_INFO_DATA, 0);
52 if (ret)
53 return ret;
54
55 memx = *pmemx = kzalloc(sizeof(*memx), GFP_KERNEL);
56 if (!memx)
57 return -ENOMEM;
58 memx->pmu = pmu;
59 memx->base = reply[0];
60 memx->size = reply[1];
61
62 /* acquire data segment access */
63 do {
64 nvkm_wr32(device, 0x10a580, 0x00000003);
65 } while (nvkm_rd32(device, 0x10a580) != 0x00000003);
66 nvkm_wr32(device, 0x10a1c0, 0x01000000 | memx->base);
67 return 0;
68 }
69
70 int
nvkm_memx_fini(struct nvkm_memx ** pmemx,bool exec)71 nvkm_memx_fini(struct nvkm_memx **pmemx, bool exec)
72 {
73 struct nvkm_memx *memx = *pmemx;
74 struct nvkm_pmu *pmu = memx->pmu;
75 struct nvkm_subdev *subdev = &pmu->subdev;
76 struct nvkm_device *device = subdev->device;
77 u32 finish, reply[2];
78
79 /* flush the cache... */
80 memx_out(memx);
81
82 /* release data segment access */
83 finish = nvkm_rd32(device, 0x10a1c0) & 0x00ffffff;
84 nvkm_wr32(device, 0x10a580, 0x00000000);
85
86 /* call MEMX process to execute the script, and wait for reply */
87 if (exec) {
88 nvkm_pmu_send(pmu, reply, PROC_MEMX, MEMX_MSG_EXEC,
89 memx->base, finish);
90 }
91
92 nvkm_debug(subdev, "Exec took %uns, PMU_IN %08x\n",
93 reply[0], reply[1]);
94 kfree(memx);
95 return 0;
96 }
97
98 void
nvkm_memx_wr32(struct nvkm_memx * memx,u32 addr,u32 data)99 nvkm_memx_wr32(struct nvkm_memx *memx, u32 addr, u32 data)
100 {
101 nvkm_debug(&memx->pmu->subdev, "R[%06x] = %08x\n", addr, data);
102 memx_cmd(memx, MEMX_WR32, 2, (u32[]){ addr, data });
103 }
104
105 void
nvkm_memx_wait(struct nvkm_memx * memx,u32 addr,u32 mask,u32 data,u32 nsec)106 nvkm_memx_wait(struct nvkm_memx *memx,
107 u32 addr, u32 mask, u32 data, u32 nsec)
108 {
109 nvkm_debug(&memx->pmu->subdev, "R[%06x] & %08x == %08x, %d us\n",
110 addr, mask, data, nsec);
111 memx_cmd(memx, MEMX_WAIT, 4, (u32[]){ addr, mask, data, nsec });
112 memx_out(memx); /* fuc can't handle multiple */
113 }
114
115 void
nvkm_memx_nsec(struct nvkm_memx * memx,u32 nsec)116 nvkm_memx_nsec(struct nvkm_memx *memx, u32 nsec)
117 {
118 nvkm_debug(&memx->pmu->subdev, " DELAY = %d ns\n", nsec);
119 memx_cmd(memx, MEMX_DELAY, 1, (u32[]){ nsec });
120 memx_out(memx); /* fuc can't handle multiple */
121 }
122
123 void
nvkm_memx_wait_vblank(struct nvkm_memx * memx)124 nvkm_memx_wait_vblank(struct nvkm_memx *memx)
125 {
126 struct nvkm_subdev *subdev = &memx->pmu->subdev;
127 struct nvkm_device *device = subdev->device;
128 u32 heads, x, y, px = 0;
129 int i, head_sync;
130
131 if (device->chipset < 0xd0) {
132 heads = nvkm_rd32(device, 0x610050);
133 for (i = 0; i < 2; i++) {
134 /* Heuristic: sync to head with biggest resolution */
135 if (heads & (2 << (i << 3))) {
136 x = nvkm_rd32(device, 0x610b40 + (0x540 * i));
137 y = (x & 0xffff0000) >> 16;
138 x &= 0x0000ffff;
139 if ((x * y) > px) {
140 px = (x * y);
141 head_sync = i;
142 }
143 }
144 }
145 }
146
147 if (px == 0) {
148 nvkm_debug(subdev, "WAIT VBLANK !NO ACTIVE HEAD\n");
149 return;
150 }
151
152 nvkm_debug(subdev, "WAIT VBLANK HEAD%d\n", head_sync);
153 memx_cmd(memx, MEMX_VBLANK, 1, (u32[]){ head_sync });
154 memx_out(memx); /* fuc can't handle multiple */
155 }
156
157 void
nvkm_memx_train(struct nvkm_memx * memx)158 nvkm_memx_train(struct nvkm_memx *memx)
159 {
160 nvkm_debug(&memx->pmu->subdev, " MEM TRAIN\n");
161 memx_cmd(memx, MEMX_TRAIN, 0, NULL);
162 }
163
164 int
nvkm_memx_train_result(struct nvkm_pmu * pmu,u32 * res,int rsize)165 nvkm_memx_train_result(struct nvkm_pmu *pmu, u32 *res, int rsize)
166 {
167 struct nvkm_device *device = pmu->subdev.device;
168 u32 reply[2], base, size, i;
169 int ret;
170
171 ret = nvkm_pmu_send(pmu, reply, PROC_MEMX, MEMX_MSG_INFO,
172 MEMX_INFO_TRAIN, 0);
173 if (ret)
174 return ret;
175
176 base = reply[0];
177 size = reply[1] >> 2;
178 if (size > rsize)
179 return -ENOMEM;
180
181 /* read the packet */
182 nvkm_wr32(device, 0x10a1c0, 0x02000000 | base);
183
184 for (i = 0; i < size; i++)
185 res[i] = nvkm_rd32(device, 0x10a1c4);
186
187 return 0;
188 }
189
190 void
nvkm_memx_block(struct nvkm_memx * memx)191 nvkm_memx_block(struct nvkm_memx *memx)
192 {
193 nvkm_debug(&memx->pmu->subdev, " HOST BLOCKED\n");
194 memx_cmd(memx, MEMX_ENTER, 0, NULL);
195 }
196
197 void
nvkm_memx_unblock(struct nvkm_memx * memx)198 nvkm_memx_unblock(struct nvkm_memx *memx)
199 {
200 nvkm_debug(&memx->pmu->subdev, " HOST UNBLOCKED\n");
201 memx_cmd(memx, MEMX_LEAVE, 0, NULL);
202 }
203 #endif
204