• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include "priv.h"
25 
26 #include <subdev/bar.h>
27 
28 /******************************************************************************
29  * instmem object base implementation
30  *****************************************************************************/
31 static void
nvkm_instobj_load(struct nvkm_instobj * iobj)32 nvkm_instobj_load(struct nvkm_instobj *iobj)
33 {
34 	struct nvkm_memory *memory = &iobj->memory;
35 	const u64 size = nvkm_memory_size(memory);
36 	void __iomem *map;
37 	int i;
38 
39 	if (!(map = nvkm_kmap(memory))) {
40 		for (i = 0; i < size; i += 4)
41 			nvkm_wo32(memory, i, iobj->suspend[i / 4]);
42 	} else {
43 		memcpy_toio(map, iobj->suspend, size);
44 	}
45 	nvkm_done(memory);
46 
47 	kvfree(iobj->suspend);
48 	iobj->suspend = NULL;
49 }
50 
51 static int
nvkm_instobj_save(struct nvkm_instobj * iobj)52 nvkm_instobj_save(struct nvkm_instobj *iobj)
53 {
54 	struct nvkm_memory *memory = &iobj->memory;
55 	const u64 size = nvkm_memory_size(memory);
56 	void __iomem *map;
57 	int i;
58 
59 	iobj->suspend = kvmalloc(size, GFP_KERNEL);
60 	if (!iobj->suspend)
61 		return -ENOMEM;
62 
63 	if (!(map = nvkm_kmap(memory))) {
64 		for (i = 0; i < size; i += 4)
65 			iobj->suspend[i / 4] = nvkm_ro32(memory, i);
66 	} else {
67 		memcpy_fromio(iobj->suspend, map, size);
68 	}
69 	nvkm_done(memory);
70 	return 0;
71 }
72 
73 void
nvkm_instobj_dtor(struct nvkm_instmem * imem,struct nvkm_instobj * iobj)74 nvkm_instobj_dtor(struct nvkm_instmem *imem, struct nvkm_instobj *iobj)
75 {
76 	spin_lock(&imem->lock);
77 	list_del(&iobj->head);
78 	spin_unlock(&imem->lock);
79 }
80 
81 void
nvkm_instobj_ctor(const struct nvkm_memory_func * func,struct nvkm_instmem * imem,struct nvkm_instobj * iobj)82 nvkm_instobj_ctor(const struct nvkm_memory_func *func,
83 		  struct nvkm_instmem *imem, struct nvkm_instobj *iobj)
84 {
85 	nvkm_memory_ctor(func, &iobj->memory);
86 	iobj->suspend = NULL;
87 	spin_lock(&imem->lock);
88 	list_add_tail(&iobj->head, &imem->list);
89 	spin_unlock(&imem->lock);
90 }
91 
92 int
nvkm_instobj_new(struct nvkm_instmem * imem,u32 size,u32 align,bool zero,struct nvkm_memory ** pmemory)93 nvkm_instobj_new(struct nvkm_instmem *imem, u32 size, u32 align, bool zero,
94 		 struct nvkm_memory **pmemory)
95 {
96 	struct nvkm_subdev *subdev = &imem->subdev;
97 	struct nvkm_memory *memory = NULL;
98 	u32 offset;
99 	int ret;
100 
101 	ret = imem->func->memory_new(imem, size, align, zero, &memory);
102 	if (ret) {
103 		nvkm_error(subdev, "OOM: %08x %08x %d\n", size, align, ret);
104 		goto done;
105 	}
106 
107 	nvkm_trace(subdev, "new %08x %08x %d: %010llx %010llx\n", size, align,
108 		   zero, nvkm_memory_addr(memory), nvkm_memory_size(memory));
109 
110 	if (!imem->func->zero && zero) {
111 		void __iomem *map = nvkm_kmap(memory);
112 		if (unlikely(!map)) {
113 			for (offset = 0; offset < size; offset += 4)
114 				nvkm_wo32(memory, offset, 0x00000000);
115 		} else {
116 			memset_io(map, 0x00, size);
117 		}
118 		nvkm_done(memory);
119 	}
120 
121 done:
122 	if (ret)
123 		nvkm_memory_unref(&memory);
124 	*pmemory = memory;
125 	return ret;
126 }
127 
128 /******************************************************************************
129  * instmem subdev base implementation
130  *****************************************************************************/
131 
132 u32
nvkm_instmem_rd32(struct nvkm_instmem * imem,u32 addr)133 nvkm_instmem_rd32(struct nvkm_instmem *imem, u32 addr)
134 {
135 	return imem->func->rd32(imem, addr);
136 }
137 
138 void
nvkm_instmem_wr32(struct nvkm_instmem * imem,u32 addr,u32 data)139 nvkm_instmem_wr32(struct nvkm_instmem *imem, u32 addr, u32 data)
140 {
141 	return imem->func->wr32(imem, addr, data);
142 }
143 
144 void
nvkm_instmem_boot(struct nvkm_instmem * imem)145 nvkm_instmem_boot(struct nvkm_instmem *imem)
146 {
147 	/* Separate bootstrapped objects from normal list, as we need
148 	 * to make sure they're accessed with the slowpath on suspend
149 	 * and resume.
150 	 */
151 	struct nvkm_instobj *iobj, *itmp;
152 	spin_lock(&imem->lock);
153 	list_for_each_entry_safe(iobj, itmp, &imem->list, head) {
154 		list_move_tail(&iobj->head, &imem->boot);
155 	}
156 	spin_unlock(&imem->lock);
157 }
158 
159 static int
nvkm_instmem_fini(struct nvkm_subdev * subdev,bool suspend)160 nvkm_instmem_fini(struct nvkm_subdev *subdev, bool suspend)
161 {
162 	struct nvkm_instmem *imem = nvkm_instmem(subdev);
163 	struct nvkm_instobj *iobj;
164 
165 	if (suspend) {
166 		list_for_each_entry(iobj, &imem->list, head) {
167 			int ret = nvkm_instobj_save(iobj);
168 			if (ret)
169 				return ret;
170 		}
171 
172 		nvkm_bar_bar2_fini(subdev->device);
173 
174 		list_for_each_entry(iobj, &imem->boot, head) {
175 			int ret = nvkm_instobj_save(iobj);
176 			if (ret)
177 				return ret;
178 		}
179 	}
180 
181 	if (imem->func->fini)
182 		imem->func->fini(imem);
183 
184 	return 0;
185 }
186 
187 static int
nvkm_instmem_init(struct nvkm_subdev * subdev)188 nvkm_instmem_init(struct nvkm_subdev *subdev)
189 {
190 	struct nvkm_instmem *imem = nvkm_instmem(subdev);
191 	struct nvkm_instobj *iobj;
192 
193 	list_for_each_entry(iobj, &imem->boot, head) {
194 		if (iobj->suspend)
195 			nvkm_instobj_load(iobj);
196 	}
197 
198 	nvkm_bar_bar2_init(subdev->device);
199 
200 	list_for_each_entry(iobj, &imem->list, head) {
201 		if (iobj->suspend)
202 			nvkm_instobj_load(iobj);
203 	}
204 
205 	return 0;
206 }
207 
208 static int
nvkm_instmem_oneinit(struct nvkm_subdev * subdev)209 nvkm_instmem_oneinit(struct nvkm_subdev *subdev)
210 {
211 	struct nvkm_instmem *imem = nvkm_instmem(subdev);
212 	if (imem->func->oneinit)
213 		return imem->func->oneinit(imem);
214 	return 0;
215 }
216 
217 static void *
nvkm_instmem_dtor(struct nvkm_subdev * subdev)218 nvkm_instmem_dtor(struct nvkm_subdev *subdev)
219 {
220 	struct nvkm_instmem *imem = nvkm_instmem(subdev);
221 	void *data = imem;
222 	if (imem->func->dtor)
223 		data = imem->func->dtor(imem);
224 	mutex_destroy(&imem->mutex);
225 	return data;
226 }
227 
228 static const struct nvkm_subdev_func
229 nvkm_instmem = {
230 	.dtor = nvkm_instmem_dtor,
231 	.oneinit = nvkm_instmem_oneinit,
232 	.init = nvkm_instmem_init,
233 	.fini = nvkm_instmem_fini,
234 };
235 
236 void
nvkm_instmem_ctor(const struct nvkm_instmem_func * func,struct nvkm_device * device,enum nvkm_subdev_type type,int inst,struct nvkm_instmem * imem)237 nvkm_instmem_ctor(const struct nvkm_instmem_func *func, struct nvkm_device *device,
238 		  enum nvkm_subdev_type type, int inst, struct nvkm_instmem *imem)
239 {
240 	nvkm_subdev_ctor(&nvkm_instmem, device, type, inst, &imem->subdev);
241 	imem->func = func;
242 	spin_lock_init(&imem->lock);
243 	INIT_LIST_HEAD(&imem->list);
244 	INIT_LIST_HEAD(&imem->boot);
245 	mutex_init(&imem->mutex);
246 }
247