1 /*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24 #include "priv.h"
25
26 #include <subdev/bar.h>
27
28 /******************************************************************************
29 * instmem object base implementation
30 *****************************************************************************/
31 static void
nvkm_instobj_load(struct nvkm_instobj * iobj)32 nvkm_instobj_load(struct nvkm_instobj *iobj)
33 {
34 struct nvkm_memory *memory = &iobj->memory;
35 const u64 size = nvkm_memory_size(memory);
36 void __iomem *map;
37 int i;
38
39 if (!(map = nvkm_kmap(memory))) {
40 for (i = 0; i < size; i += 4)
41 nvkm_wo32(memory, i, iobj->suspend[i / 4]);
42 } else {
43 memcpy_toio(map, iobj->suspend, size);
44 }
45 nvkm_done(memory);
46
47 kvfree(iobj->suspend);
48 iobj->suspend = NULL;
49 }
50
51 static int
nvkm_instobj_save(struct nvkm_instobj * iobj)52 nvkm_instobj_save(struct nvkm_instobj *iobj)
53 {
54 struct nvkm_memory *memory = &iobj->memory;
55 const u64 size = nvkm_memory_size(memory);
56 void __iomem *map;
57 int i;
58
59 iobj->suspend = kvmalloc(size, GFP_KERNEL);
60 if (!iobj->suspend)
61 return -ENOMEM;
62
63 if (!(map = nvkm_kmap(memory))) {
64 for (i = 0; i < size; i += 4)
65 iobj->suspend[i / 4] = nvkm_ro32(memory, i);
66 } else {
67 memcpy_fromio(iobj->suspend, map, size);
68 }
69 nvkm_done(memory);
70 return 0;
71 }
72
73 void
nvkm_instobj_dtor(struct nvkm_instmem * imem,struct nvkm_instobj * iobj)74 nvkm_instobj_dtor(struct nvkm_instmem *imem, struct nvkm_instobj *iobj)
75 {
76 spin_lock(&imem->lock);
77 list_del(&iobj->head);
78 spin_unlock(&imem->lock);
79 }
80
81 void
nvkm_instobj_ctor(const struct nvkm_memory_func * func,struct nvkm_instmem * imem,struct nvkm_instobj * iobj)82 nvkm_instobj_ctor(const struct nvkm_memory_func *func,
83 struct nvkm_instmem *imem, struct nvkm_instobj *iobj)
84 {
85 nvkm_memory_ctor(func, &iobj->memory);
86 iobj->suspend = NULL;
87 spin_lock(&imem->lock);
88 list_add_tail(&iobj->head, &imem->list);
89 spin_unlock(&imem->lock);
90 }
91
92 int
nvkm_instobj_wrap(struct nvkm_device * device,struct nvkm_memory * memory,struct nvkm_memory ** pmemory)93 nvkm_instobj_wrap(struct nvkm_device *device,
94 struct nvkm_memory *memory, struct nvkm_memory **pmemory)
95 {
96 struct nvkm_instmem *imem = device->imem;
97
98 if (!imem->func->memory_wrap)
99 return -ENOSYS;
100
101 return imem->func->memory_wrap(imem, memory, pmemory);
102 }
103
104 int
nvkm_instobj_new(struct nvkm_instmem * imem,u32 size,u32 align,bool zero,struct nvkm_memory ** pmemory)105 nvkm_instobj_new(struct nvkm_instmem *imem, u32 size, u32 align, bool zero,
106 struct nvkm_memory **pmemory)
107 {
108 struct nvkm_subdev *subdev = &imem->subdev;
109 struct nvkm_memory *memory = NULL;
110 u32 offset;
111 int ret;
112
113 ret = imem->func->memory_new(imem, size, align, zero, &memory);
114 if (ret) {
115 nvkm_error(subdev, "OOM: %08x %08x %d\n", size, align, ret);
116 goto done;
117 }
118
119 nvkm_trace(subdev, "new %08x %08x %d: %010llx %010llx\n", size, align,
120 zero, nvkm_memory_addr(memory), nvkm_memory_size(memory));
121
122 if (!imem->func->zero && zero) {
123 void __iomem *map = nvkm_kmap(memory);
124 if (unlikely(!map)) {
125 for (offset = 0; offset < size; offset += 4)
126 nvkm_wo32(memory, offset, 0x00000000);
127 } else {
128 memset_io(map, 0x00, size);
129 }
130 nvkm_done(memory);
131 }
132
133 done:
134 if (ret)
135 nvkm_memory_unref(&memory);
136 *pmemory = memory;
137 return ret;
138 }
139
140 /******************************************************************************
141 * instmem subdev base implementation
142 *****************************************************************************/
143
144 u32
nvkm_instmem_rd32(struct nvkm_instmem * imem,u32 addr)145 nvkm_instmem_rd32(struct nvkm_instmem *imem, u32 addr)
146 {
147 return imem->func->rd32(imem, addr);
148 }
149
150 void
nvkm_instmem_wr32(struct nvkm_instmem * imem,u32 addr,u32 data)151 nvkm_instmem_wr32(struct nvkm_instmem *imem, u32 addr, u32 data)
152 {
153 return imem->func->wr32(imem, addr, data);
154 }
155
156 void
nvkm_instmem_boot(struct nvkm_instmem * imem)157 nvkm_instmem_boot(struct nvkm_instmem *imem)
158 {
159 /* Separate bootstrapped objects from normal list, as we need
160 * to make sure they're accessed with the slowpath on suspend
161 * and resume.
162 */
163 struct nvkm_instobj *iobj, *itmp;
164 spin_lock(&imem->lock);
165 list_for_each_entry_safe(iobj, itmp, &imem->list, head) {
166 list_move_tail(&iobj->head, &imem->boot);
167 }
168 spin_unlock(&imem->lock);
169 }
170
171 static int
nvkm_instmem_fini(struct nvkm_subdev * subdev,bool suspend)172 nvkm_instmem_fini(struct nvkm_subdev *subdev, bool suspend)
173 {
174 struct nvkm_instmem *imem = nvkm_instmem(subdev);
175 struct nvkm_instobj *iobj;
176
177 if (suspend) {
178 list_for_each_entry(iobj, &imem->list, head) {
179 int ret = nvkm_instobj_save(iobj);
180 if (ret)
181 return ret;
182 }
183
184 nvkm_bar_bar2_fini(subdev->device);
185
186 list_for_each_entry(iobj, &imem->boot, head) {
187 int ret = nvkm_instobj_save(iobj);
188 if (ret)
189 return ret;
190 }
191 }
192
193 if (imem->func->fini)
194 imem->func->fini(imem);
195
196 return 0;
197 }
198
199 static int
nvkm_instmem_init(struct nvkm_subdev * subdev)200 nvkm_instmem_init(struct nvkm_subdev *subdev)
201 {
202 struct nvkm_instmem *imem = nvkm_instmem(subdev);
203 struct nvkm_instobj *iobj;
204
205 list_for_each_entry(iobj, &imem->boot, head) {
206 if (iobj->suspend)
207 nvkm_instobj_load(iobj);
208 }
209
210 nvkm_bar_bar2_init(subdev->device);
211
212 list_for_each_entry(iobj, &imem->list, head) {
213 if (iobj->suspend)
214 nvkm_instobj_load(iobj);
215 }
216
217 return 0;
218 }
219
220 static int
nvkm_instmem_oneinit(struct nvkm_subdev * subdev)221 nvkm_instmem_oneinit(struct nvkm_subdev *subdev)
222 {
223 struct nvkm_instmem *imem = nvkm_instmem(subdev);
224 if (imem->func->oneinit)
225 return imem->func->oneinit(imem);
226 return 0;
227 }
228
229 static void *
nvkm_instmem_dtor(struct nvkm_subdev * subdev)230 nvkm_instmem_dtor(struct nvkm_subdev *subdev)
231 {
232 struct nvkm_instmem *imem = nvkm_instmem(subdev);
233 void *data = imem;
234 if (imem->func->dtor)
235 data = imem->func->dtor(imem);
236 mutex_destroy(&imem->mutex);
237 return data;
238 }
239
240 static const struct nvkm_subdev_func
241 nvkm_instmem = {
242 .dtor = nvkm_instmem_dtor,
243 .oneinit = nvkm_instmem_oneinit,
244 .init = nvkm_instmem_init,
245 .fini = nvkm_instmem_fini,
246 };
247
248 void
nvkm_instmem_ctor(const struct nvkm_instmem_func * func,struct nvkm_device * device,enum nvkm_subdev_type type,int inst,struct nvkm_instmem * imem)249 nvkm_instmem_ctor(const struct nvkm_instmem_func *func, struct nvkm_device *device,
250 enum nvkm_subdev_type type, int inst, struct nvkm_instmem *imem)
251 {
252 nvkm_subdev_ctor(&nvkm_instmem, device, type, inst, &imem->subdev);
253 imem->func = func;
254 spin_lock_init(&imem->lock);
255 INIT_LIST_HEAD(&imem->list);
256 INIT_LIST_HEAD(&imem->boot);
257 mutex_init(&imem->mutex);
258 }
259