1 /*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24 #include "priv.h"
25
26 #include <core/memory.h>
27 #include <subdev/bar.h>
28
29 /******************************************************************************
30 * instmem object base implementation
31 *****************************************************************************/
32 #define nvkm_instobj(p) container_of((p), struct nvkm_instobj, memory)
33
34 struct nvkm_instobj {
35 struct nvkm_memory memory;
36 struct nvkm_memory *parent;
37 struct nvkm_instmem *imem;
38 struct list_head head;
39 u32 *suspend;
40 void __iomem *map;
41 };
42
43 static enum nvkm_memory_target
nvkm_instobj_target(struct nvkm_memory * memory)44 nvkm_instobj_target(struct nvkm_memory *memory)
45 {
46 memory = nvkm_instobj(memory)->parent;
47 return nvkm_memory_target(memory);
48 }
49
50 static u64
nvkm_instobj_addr(struct nvkm_memory * memory)51 nvkm_instobj_addr(struct nvkm_memory *memory)
52 {
53 memory = nvkm_instobj(memory)->parent;
54 return nvkm_memory_addr(memory);
55 }
56
57 static u64
nvkm_instobj_size(struct nvkm_memory * memory)58 nvkm_instobj_size(struct nvkm_memory *memory)
59 {
60 memory = nvkm_instobj(memory)->parent;
61 return nvkm_memory_size(memory);
62 }
63
64 static void
nvkm_instobj_release(struct nvkm_memory * memory)65 nvkm_instobj_release(struct nvkm_memory *memory)
66 {
67 struct nvkm_instobj *iobj = nvkm_instobj(memory);
68 nvkm_bar_flush(iobj->imem->subdev.device->bar);
69 }
70
71 static void __iomem *
nvkm_instobj_acquire(struct nvkm_memory * memory)72 nvkm_instobj_acquire(struct nvkm_memory *memory)
73 {
74 return nvkm_instobj(memory)->map;
75 }
76
77 static u32
nvkm_instobj_rd32(struct nvkm_memory * memory,u64 offset)78 nvkm_instobj_rd32(struct nvkm_memory *memory, u64 offset)
79 {
80 return ioread32_native(nvkm_instobj(memory)->map + offset);
81 }
82
83 static void
nvkm_instobj_wr32(struct nvkm_memory * memory,u64 offset,u32 data)84 nvkm_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
85 {
86 iowrite32_native(data, nvkm_instobj(memory)->map + offset);
87 }
88
89 static void
nvkm_instobj_map(struct nvkm_memory * memory,struct nvkm_vma * vma,u64 offset)90 nvkm_instobj_map(struct nvkm_memory *memory, struct nvkm_vma *vma, u64 offset)
91 {
92 memory = nvkm_instobj(memory)->parent;
93 nvkm_memory_map(memory, vma, offset);
94 }
95
96 static void *
nvkm_instobj_dtor(struct nvkm_memory * memory)97 nvkm_instobj_dtor(struct nvkm_memory *memory)
98 {
99 struct nvkm_instobj *iobj = nvkm_instobj(memory);
100 spin_lock(&iobj->imem->lock);
101 list_del(&iobj->head);
102 spin_unlock(&iobj->imem->lock);
103 nvkm_memory_del(&iobj->parent);
104 return iobj;
105 }
106
107 const struct nvkm_memory_func
108 nvkm_instobj_func = {
109 .dtor = nvkm_instobj_dtor,
110 .target = nvkm_instobj_target,
111 .addr = nvkm_instobj_addr,
112 .size = nvkm_instobj_size,
113 .acquire = nvkm_instobj_acquire,
114 .release = nvkm_instobj_release,
115 .rd32 = nvkm_instobj_rd32,
116 .wr32 = nvkm_instobj_wr32,
117 .map = nvkm_instobj_map,
118 };
119
120 static void
nvkm_instobj_boot(struct nvkm_memory * memory,struct nvkm_vm * vm)121 nvkm_instobj_boot(struct nvkm_memory *memory, struct nvkm_vm *vm)
122 {
123 memory = nvkm_instobj(memory)->parent;
124 nvkm_memory_boot(memory, vm);
125 }
126
127 static void
nvkm_instobj_release_slow(struct nvkm_memory * memory)128 nvkm_instobj_release_slow(struct nvkm_memory *memory)
129 {
130 struct nvkm_instobj *iobj = nvkm_instobj(memory);
131 nvkm_instobj_release(memory);
132 nvkm_done(iobj->parent);
133 }
134
135 static void __iomem *
nvkm_instobj_acquire_slow(struct nvkm_memory * memory)136 nvkm_instobj_acquire_slow(struct nvkm_memory *memory)
137 {
138 struct nvkm_instobj *iobj = nvkm_instobj(memory);
139 iobj->map = nvkm_kmap(iobj->parent);
140 if (iobj->map)
141 memory->func = &nvkm_instobj_func;
142 return iobj->map;
143 }
144
145 static u32
nvkm_instobj_rd32_slow(struct nvkm_memory * memory,u64 offset)146 nvkm_instobj_rd32_slow(struct nvkm_memory *memory, u64 offset)
147 {
148 struct nvkm_instobj *iobj = nvkm_instobj(memory);
149 return nvkm_ro32(iobj->parent, offset);
150 }
151
152 static void
nvkm_instobj_wr32_slow(struct nvkm_memory * memory,u64 offset,u32 data)153 nvkm_instobj_wr32_slow(struct nvkm_memory *memory, u64 offset, u32 data)
154 {
155 struct nvkm_instobj *iobj = nvkm_instobj(memory);
156 return nvkm_wo32(iobj->parent, offset, data);
157 }
158
159 const struct nvkm_memory_func
160 nvkm_instobj_func_slow = {
161 .dtor = nvkm_instobj_dtor,
162 .target = nvkm_instobj_target,
163 .addr = nvkm_instobj_addr,
164 .size = nvkm_instobj_size,
165 .boot = nvkm_instobj_boot,
166 .acquire = nvkm_instobj_acquire_slow,
167 .release = nvkm_instobj_release_slow,
168 .rd32 = nvkm_instobj_rd32_slow,
169 .wr32 = nvkm_instobj_wr32_slow,
170 .map = nvkm_instobj_map,
171 };
172
173 int
nvkm_instobj_new(struct nvkm_instmem * imem,u32 size,u32 align,bool zero,struct nvkm_memory ** pmemory)174 nvkm_instobj_new(struct nvkm_instmem *imem, u32 size, u32 align, bool zero,
175 struct nvkm_memory **pmemory)
176 {
177 struct nvkm_memory *memory = NULL;
178 struct nvkm_instobj *iobj;
179 u32 offset;
180 int ret;
181
182 ret = imem->func->memory_new(imem, size, align, zero, &memory);
183 if (ret)
184 goto done;
185
186 if (!imem->func->persistent) {
187 if (!(iobj = kzalloc(sizeof(*iobj), GFP_KERNEL))) {
188 ret = -ENOMEM;
189 goto done;
190 }
191
192 nvkm_memory_ctor(&nvkm_instobj_func_slow, &iobj->memory);
193 iobj->parent = memory;
194 iobj->imem = imem;
195 spin_lock(&iobj->imem->lock);
196 list_add_tail(&iobj->head, &imem->list);
197 spin_unlock(&iobj->imem->lock);
198 memory = &iobj->memory;
199 }
200
201 if (!imem->func->zero && zero) {
202 void __iomem *map = nvkm_kmap(memory);
203 if (unlikely(!map)) {
204 for (offset = 0; offset < size; offset += 4)
205 nvkm_wo32(memory, offset, 0x00000000);
206 } else {
207 memset_io(map, 0x00, size);
208 }
209 nvkm_done(memory);
210 }
211
212 done:
213 if (ret)
214 nvkm_memory_del(&memory);
215 *pmemory = memory;
216 return ret;
217 }
218
219 /******************************************************************************
220 * instmem subdev base implementation
221 *****************************************************************************/
222
223 u32
nvkm_instmem_rd32(struct nvkm_instmem * imem,u32 addr)224 nvkm_instmem_rd32(struct nvkm_instmem *imem, u32 addr)
225 {
226 return imem->func->rd32(imem, addr);
227 }
228
229 void
nvkm_instmem_wr32(struct nvkm_instmem * imem,u32 addr,u32 data)230 nvkm_instmem_wr32(struct nvkm_instmem *imem, u32 addr, u32 data)
231 {
232 return imem->func->wr32(imem, addr, data);
233 }
234
235 static int
nvkm_instmem_fini(struct nvkm_subdev * subdev,bool suspend)236 nvkm_instmem_fini(struct nvkm_subdev *subdev, bool suspend)
237 {
238 struct nvkm_instmem *imem = nvkm_instmem(subdev);
239 struct nvkm_instobj *iobj;
240 int i;
241
242 if (imem->func->fini)
243 imem->func->fini(imem);
244
245 if (suspend) {
246 list_for_each_entry(iobj, &imem->list, head) {
247 struct nvkm_memory *memory = iobj->parent;
248 u64 size = nvkm_memory_size(memory);
249
250 iobj->suspend = vmalloc(size);
251 if (!iobj->suspend)
252 return -ENOMEM;
253
254 for (i = 0; i < size; i += 4)
255 iobj->suspend[i / 4] = nvkm_ro32(memory, i);
256 }
257 }
258
259 return 0;
260 }
261
262 static int
nvkm_instmem_oneinit(struct nvkm_subdev * subdev)263 nvkm_instmem_oneinit(struct nvkm_subdev *subdev)
264 {
265 struct nvkm_instmem *imem = nvkm_instmem(subdev);
266 if (imem->func->oneinit)
267 return imem->func->oneinit(imem);
268 return 0;
269 }
270
271 static int
nvkm_instmem_init(struct nvkm_subdev * subdev)272 nvkm_instmem_init(struct nvkm_subdev *subdev)
273 {
274 struct nvkm_instmem *imem = nvkm_instmem(subdev);
275 struct nvkm_instobj *iobj;
276 int i;
277
278 list_for_each_entry(iobj, &imem->list, head) {
279 if (iobj->suspend) {
280 struct nvkm_memory *memory = iobj->parent;
281 u64 size = nvkm_memory_size(memory);
282 for (i = 0; i < size; i += 4)
283 nvkm_wo32(memory, i, iobj->suspend[i / 4]);
284 vfree(iobj->suspend);
285 iobj->suspend = NULL;
286 }
287 }
288
289 return 0;
290 }
291
292 static void *
nvkm_instmem_dtor(struct nvkm_subdev * subdev)293 nvkm_instmem_dtor(struct nvkm_subdev *subdev)
294 {
295 struct nvkm_instmem *imem = nvkm_instmem(subdev);
296 if (imem->func->dtor)
297 return imem->func->dtor(imem);
298 return imem;
299 }
300
301 static const struct nvkm_subdev_func
302 nvkm_instmem = {
303 .dtor = nvkm_instmem_dtor,
304 .oneinit = nvkm_instmem_oneinit,
305 .init = nvkm_instmem_init,
306 .fini = nvkm_instmem_fini,
307 };
308
309 void
nvkm_instmem_ctor(const struct nvkm_instmem_func * func,struct nvkm_device * device,int index,struct nvkm_instmem * imem)310 nvkm_instmem_ctor(const struct nvkm_instmem_func *func,
311 struct nvkm_device *device, int index,
312 struct nvkm_instmem *imem)
313 {
314 nvkm_subdev_ctor(&nvkm_instmem, device, index, 0, &imem->subdev);
315 imem->func = func;
316 spin_lock_init(&imem->lock);
317 INIT_LIST_HEAD(&imem->list);
318 }
319