• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20  * DEALINGS IN THE SOFTWARE.
21  */
22 #include "priv.h"
23 
24 #include <subdev/mc.h>
25 #include <subdev/top.h>
26 
27 void
nvkm_falcon_load_imem(struct nvkm_falcon * falcon,void * data,u32 start,u32 size,u16 tag,u8 port,bool secure)28 nvkm_falcon_load_imem(struct nvkm_falcon *falcon, void *data, u32 start,
29 		      u32 size, u16 tag, u8 port, bool secure)
30 {
31 	if (secure && !falcon->secret) {
32 		nvkm_warn(falcon->user,
33 			  "writing with secure tag on a non-secure falcon!\n");
34 		return;
35 	}
36 
37 	falcon->func->load_imem(falcon, data, start, size, tag, port,
38 				secure);
39 }
40 
41 void
nvkm_falcon_load_dmem(struct nvkm_falcon * falcon,void * data,u32 start,u32 size,u8 port)42 nvkm_falcon_load_dmem(struct nvkm_falcon *falcon, void *data, u32 start,
43 		      u32 size, u8 port)
44 {
45 	mutex_lock(&falcon->dmem_mutex);
46 
47 	falcon->func->load_dmem(falcon, data, start, size, port);
48 
49 	mutex_unlock(&falcon->dmem_mutex);
50 }
51 
52 void
nvkm_falcon_read_dmem(struct nvkm_falcon * falcon,u32 start,u32 size,u8 port,void * data)53 nvkm_falcon_read_dmem(struct nvkm_falcon *falcon, u32 start, u32 size, u8 port,
54 		      void *data)
55 {
56 	mutex_lock(&falcon->dmem_mutex);
57 
58 	falcon->func->read_dmem(falcon, start, size, port, data);
59 
60 	mutex_unlock(&falcon->dmem_mutex);
61 }
62 
63 void
nvkm_falcon_bind_context(struct nvkm_falcon * falcon,struct nvkm_memory * inst)64 nvkm_falcon_bind_context(struct nvkm_falcon *falcon, struct nvkm_memory *inst)
65 {
66 	if (!falcon->func->bind_context) {
67 		nvkm_error(falcon->user,
68 			   "Context binding not supported on this falcon!\n");
69 		return;
70 	}
71 
72 	falcon->func->bind_context(falcon, inst);
73 }
74 
75 void
nvkm_falcon_set_start_addr(struct nvkm_falcon * falcon,u32 start_addr)76 nvkm_falcon_set_start_addr(struct nvkm_falcon *falcon, u32 start_addr)
77 {
78 	falcon->func->set_start_addr(falcon, start_addr);
79 }
80 
81 void
nvkm_falcon_start(struct nvkm_falcon * falcon)82 nvkm_falcon_start(struct nvkm_falcon *falcon)
83 {
84 	falcon->func->start(falcon);
85 }
86 
87 int
nvkm_falcon_enable(struct nvkm_falcon * falcon)88 nvkm_falcon_enable(struct nvkm_falcon *falcon)
89 {
90 	struct nvkm_device *device = falcon->owner->device;
91 	enum nvkm_devidx id = falcon->owner->index;
92 	int ret;
93 
94 	nvkm_mc_enable(device, id);
95 	ret = falcon->func->enable(falcon);
96 	if (ret) {
97 		nvkm_mc_disable(device, id);
98 		return ret;
99 	}
100 
101 	return 0;
102 }
103 
104 void
nvkm_falcon_disable(struct nvkm_falcon * falcon)105 nvkm_falcon_disable(struct nvkm_falcon *falcon)
106 {
107 	struct nvkm_device *device = falcon->owner->device;
108 	enum nvkm_devidx id = falcon->owner->index;
109 
110 	/* already disabled, return or wait_idle will timeout */
111 	if (!nvkm_mc_enabled(device, id))
112 		return;
113 
114 	falcon->func->disable(falcon);
115 
116 	nvkm_mc_disable(device, id);
117 }
118 
119 int
nvkm_falcon_reset(struct nvkm_falcon * falcon)120 nvkm_falcon_reset(struct nvkm_falcon *falcon)
121 {
122 	if (!falcon->func->reset) {
123 		nvkm_falcon_disable(falcon);
124 		return nvkm_falcon_enable(falcon);
125 	}
126 
127 	return falcon->func->reset(falcon);
128 }
129 
130 int
nvkm_falcon_wait_for_halt(struct nvkm_falcon * falcon,u32 ms)131 nvkm_falcon_wait_for_halt(struct nvkm_falcon *falcon, u32 ms)
132 {
133 	return falcon->func->wait_for_halt(falcon, ms);
134 }
135 
136 int
nvkm_falcon_clear_interrupt(struct nvkm_falcon * falcon,u32 mask)137 nvkm_falcon_clear_interrupt(struct nvkm_falcon *falcon, u32 mask)
138 {
139 	return falcon->func->clear_interrupt(falcon, mask);
140 }
141 
142 static int
nvkm_falcon_oneinit(struct nvkm_falcon * falcon)143 nvkm_falcon_oneinit(struct nvkm_falcon *falcon)
144 {
145 	const struct nvkm_falcon_func *func = falcon->func;
146 	const struct nvkm_subdev *subdev = falcon->owner;
147 	u32 reg;
148 
149 	if (!falcon->addr) {
150 		falcon->addr = nvkm_top_addr(subdev->device, subdev->index);
151 		if (WARN_ON(!falcon->addr))
152 			return -ENODEV;
153 	}
154 
155 	reg = nvkm_falcon_rd32(falcon, 0x12c);
156 	falcon->version = reg & 0xf;
157 	falcon->secret = (reg >> 4) & 0x3;
158 	falcon->code.ports = (reg >> 8) & 0xf;
159 	falcon->data.ports = (reg >> 12) & 0xf;
160 
161 	reg = nvkm_falcon_rd32(falcon, 0x108);
162 	falcon->code.limit = (reg & 0x1ff) << 8;
163 	falcon->data.limit = (reg & 0x3fe00) >> 1;
164 
165 	if (func->debug) {
166 		u32 val = nvkm_falcon_rd32(falcon, func->debug);
167 		falcon->debug = (val >> 20) & 0x1;
168 	}
169 
170 	return 0;
171 }
172 
173 void
nvkm_falcon_put(struct nvkm_falcon * falcon,const struct nvkm_subdev * user)174 nvkm_falcon_put(struct nvkm_falcon *falcon, const struct nvkm_subdev *user)
175 {
176 	if (unlikely(!falcon))
177 		return;
178 
179 	mutex_lock(&falcon->mutex);
180 	if (falcon->user == user) {
181 		nvkm_debug(falcon->user, "released %s falcon\n", falcon->name);
182 		falcon->user = NULL;
183 	}
184 	mutex_unlock(&falcon->mutex);
185 }
186 
187 int
nvkm_falcon_get(struct nvkm_falcon * falcon,const struct nvkm_subdev * user)188 nvkm_falcon_get(struct nvkm_falcon *falcon, const struct nvkm_subdev *user)
189 {
190 	int ret = 0;
191 
192 	mutex_lock(&falcon->mutex);
193 	if (falcon->user) {
194 		nvkm_error(user, "%s falcon already acquired by %s!\n",
195 			   falcon->name, nvkm_subdev_name[falcon->user->index]);
196 		mutex_unlock(&falcon->mutex);
197 		return -EBUSY;
198 	}
199 
200 	nvkm_debug(user, "acquired %s falcon\n", falcon->name);
201 	if (!falcon->oneinit)
202 		ret = nvkm_falcon_oneinit(falcon);
203 	falcon->user = user;
204 	mutex_unlock(&falcon->mutex);
205 	return ret;
206 }
207 
208 void
nvkm_falcon_dtor(struct nvkm_falcon * falcon)209 nvkm_falcon_dtor(struct nvkm_falcon *falcon)
210 {
211 }
212 
213 int
nvkm_falcon_ctor(const struct nvkm_falcon_func * func,struct nvkm_subdev * subdev,const char * name,u32 addr,struct nvkm_falcon * falcon)214 nvkm_falcon_ctor(const struct nvkm_falcon_func *func,
215 		 struct nvkm_subdev *subdev, const char *name, u32 addr,
216 		 struct nvkm_falcon *falcon)
217 {
218 	falcon->func = func;
219 	falcon->owner = subdev;
220 	falcon->name = name;
221 	falcon->addr = addr;
222 	mutex_init(&falcon->mutex);
223 	mutex_init(&falcon->dmem_mutex);
224 	return 0;
225 }
226 
227 void
nvkm_falcon_del(struct nvkm_falcon ** pfalcon)228 nvkm_falcon_del(struct nvkm_falcon **pfalcon)
229 {
230 	if (*pfalcon) {
231 		nvkm_falcon_dtor(*pfalcon);
232 		kfree(*pfalcon);
233 		*pfalcon = NULL;
234 	}
235 }
236