• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include "amdgpu.h"
25 #include "nbio/nbio_6_1_offset.h"
26 #include "nbio/nbio_6_1_sh_mask.h"
27 #include "gc/gc_9_0_offset.h"
28 #include "gc/gc_9_0_sh_mask.h"
29 #include "mp/mp_9_0_offset.h"
30 #include "soc15.h"
31 #include "vega10_ih.h"
32 #include "soc15_common.h"
33 #include "mxgpu_ai.h"
34 
xgpu_ai_mailbox_send_ack(struct amdgpu_device * adev)35 static void xgpu_ai_mailbox_send_ack(struct amdgpu_device *adev)
36 {
37 	WREG8(AI_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2);
38 }
39 
xgpu_ai_mailbox_set_valid(struct amdgpu_device * adev,bool val)40 static void xgpu_ai_mailbox_set_valid(struct amdgpu_device *adev, bool val)
41 {
42 	WREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE, val ? 1 : 0);
43 }
44 
45 /*
46  * this peek_msg could *only* be called in IRQ routine becuase in IRQ routine
47  * RCV_MSG_VALID filed of BIF_BX_PF0_MAILBOX_CONTROL must already be set to 1
48  * by host.
49  *
50  * if called no in IRQ routine, this peek_msg cannot guaranteed to return the
51  * correct value since it doesn't return the RCV_DW0 under the case that
52  * RCV_MSG_VALID is set by host.
53  */
xgpu_ai_mailbox_peek_msg(struct amdgpu_device * adev)54 static enum idh_event xgpu_ai_mailbox_peek_msg(struct amdgpu_device *adev)
55 {
56 	return RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
57 				mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0));
58 }
59 
60 
xgpu_ai_mailbox_rcv_msg(struct amdgpu_device * adev,enum idh_event event)61 static int xgpu_ai_mailbox_rcv_msg(struct amdgpu_device *adev,
62 				   enum idh_event event)
63 {
64 	u32 reg;
65 
66 	reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
67 					     mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0));
68 	if (reg != event)
69 		return -ENOENT;
70 
71 	xgpu_ai_mailbox_send_ack(adev);
72 
73 	return 0;
74 }
75 
xgpu_ai_peek_ack(struct amdgpu_device * adev)76 static uint8_t xgpu_ai_peek_ack(struct amdgpu_device *adev) {
77 	return RREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE) & 2;
78 }
79 
xgpu_ai_poll_ack(struct amdgpu_device * adev)80 static int xgpu_ai_poll_ack(struct amdgpu_device *adev)
81 {
82 	int timeout  = AI_MAILBOX_POLL_ACK_TIMEDOUT;
83 	u8 reg;
84 
85 	do {
86 		reg = RREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE);
87 		if (reg & 2)
88 			return 0;
89 
90 		mdelay(5);
91 		timeout -= 5;
92 	} while (timeout > 1);
93 
94 	pr_err("Doesn't get TRN_MSG_ACK from pf in %d msec\n", AI_MAILBOX_POLL_ACK_TIMEDOUT);
95 
96 	return -ETIME;
97 }
98 
xgpu_ai_poll_msg(struct amdgpu_device * adev,enum idh_event event)99 static int xgpu_ai_poll_msg(struct amdgpu_device *adev, enum idh_event event)
100 {
101 	int r, timeout = AI_MAILBOX_POLL_MSG_TIMEDOUT;
102 
103 	do {
104 		r = xgpu_ai_mailbox_rcv_msg(adev, event);
105 		if (!r)
106 			return 0;
107 
108 		msleep(10);
109 		timeout -= 10;
110 	} while (timeout > 1);
111 
112 	pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r);
113 
114 	return -ETIME;
115 }
116 
xgpu_ai_mailbox_trans_msg(struct amdgpu_device * adev,enum idh_request req,u32 data1,u32 data2,u32 data3)117 static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev,
118 	      enum idh_request req, u32 data1, u32 data2, u32 data3) {
119 	u32 reg;
120 	int r;
121 	uint8_t trn;
122 
123 	/* IMPORTANT:
124 	 * clear TRN_MSG_VALID valid to clear host's RCV_MSG_ACK
125 	 * and with host's RCV_MSG_ACK cleared hw automatically clear host's RCV_MSG_ACK
126 	 * which lead to VF's TRN_MSG_ACK cleared, otherwise below xgpu_ai_poll_ack()
127 	 * will return immediatly
128 	 */
129 	do {
130 		xgpu_ai_mailbox_set_valid(adev, false);
131 		trn = xgpu_ai_peek_ack(adev);
132 		if (trn) {
133 			pr_err("trn=%x ACK should not assert! wait again !\n", trn);
134 			msleep(1);
135 		}
136 	} while(trn);
137 
138 	reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
139 					     mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0));
140 	reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0,
141 			    MSGBUF_DATA, req);
142 	WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0),
143 		      reg);
144 	WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW1),
145 				data1);
146 	WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW2),
147 				data2);
148 	WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW3),
149 				data3);
150 
151 	xgpu_ai_mailbox_set_valid(adev, true);
152 
153 	/* start to poll ack */
154 	r = xgpu_ai_poll_ack(adev);
155 	if (r)
156 		pr_err("Doesn't get ack from pf, continue\n");
157 
158 	xgpu_ai_mailbox_set_valid(adev, false);
159 }
160 
xgpu_ai_send_access_requests(struct amdgpu_device * adev,enum idh_request req)161 static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
162 					enum idh_request req)
163 {
164 	int r;
165 
166 	xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0);
167 
168 	/* start to check msg if request is idh_req_gpu_init_access */
169 	if (req == IDH_REQ_GPU_INIT_ACCESS ||
170 		req == IDH_REQ_GPU_FINI_ACCESS ||
171 		req == IDH_REQ_GPU_RESET_ACCESS) {
172 		r = xgpu_ai_poll_msg(adev, IDH_READY_TO_ACCESS_GPU);
173 		if (r) {
174 			pr_err("Doesn't get READY_TO_ACCESS_GPU from pf, give up\n");
175 			return r;
176 		}
177 		/* Retrieve checksum from mailbox2 */
178 		if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
179 			adev->virt.fw_reserve.checksum_key =
180 				RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
181 					mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2));
182 		}
183 	}
184 
185 	return 0;
186 }
187 
xgpu_ai_request_reset(struct amdgpu_device * adev)188 static int xgpu_ai_request_reset(struct amdgpu_device *adev)
189 {
190 	return xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
191 }
192 
xgpu_ai_request_full_gpu_access(struct amdgpu_device * adev,bool init)193 static int xgpu_ai_request_full_gpu_access(struct amdgpu_device *adev,
194 					   bool init)
195 {
196 	enum idh_request req;
197 
198 	req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS;
199 	return xgpu_ai_send_access_requests(adev, req);
200 }
201 
xgpu_ai_release_full_gpu_access(struct amdgpu_device * adev,bool init)202 static int xgpu_ai_release_full_gpu_access(struct amdgpu_device *adev,
203 					   bool init)
204 {
205 	enum idh_request req;
206 	int r = 0;
207 
208 	req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS;
209 	r = xgpu_ai_send_access_requests(adev, req);
210 
211 	return r;
212 }
213 
xgpu_ai_mailbox_ack_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)214 static int xgpu_ai_mailbox_ack_irq(struct amdgpu_device *adev,
215 					struct amdgpu_irq_src *source,
216 					struct amdgpu_iv_entry *entry)
217 {
218 	DRM_DEBUG("get ack intr and do nothing.\n");
219 	return 0;
220 }
221 
xgpu_ai_set_mailbox_ack_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned type,enum amdgpu_interrupt_state state)222 static int xgpu_ai_set_mailbox_ack_irq(struct amdgpu_device *adev,
223 					struct amdgpu_irq_src *source,
224 					unsigned type,
225 					enum amdgpu_interrupt_state state)
226 {
227 	u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL));
228 
229 	tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, ACK_INT_EN,
230 				(state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
231 	WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp);
232 
233 	return 0;
234 }
235 
xgpu_ai_mailbox_flr_work(struct work_struct * work)236 static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
237 {
238 	struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
239 	struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
240 	int timeout = AI_MAILBOX_POLL_FLR_TIMEDOUT;
241 
242 	/* block amdgpu_gpu_recover till msg FLR COMPLETE received,
243 	 * otherwise the mailbox msg will be ruined/reseted by
244 	 * the VF FLR.
245 	 */
246 	if (!down_read_trylock(&adev->reset_sem))
247 		return;
248 
249 	atomic_set(&adev->in_gpu_reset, 1);
250 
251 	do {
252 		if (xgpu_ai_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
253 			goto flr_done;
254 
255 		msleep(10);
256 		timeout -= 10;
257 	} while (timeout > 1);
258 
259 flr_done:
260 	atomic_set(&adev->in_gpu_reset, 0);
261 	up_read(&adev->reset_sem);
262 
263 	/* Trigger recovery for world switch failure if no TDR */
264 	if (amdgpu_device_should_recover_gpu(adev)
265 		&& (!amdgpu_device_has_job_running(adev) ||
266 		adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT))
267 		amdgpu_device_gpu_recover(adev, NULL);
268 }
269 
xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned type,enum amdgpu_interrupt_state state)270 static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev,
271 				       struct amdgpu_irq_src *src,
272 				       unsigned type,
273 				       enum amdgpu_interrupt_state state)
274 {
275 	u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL));
276 
277 	tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, VALID_INT_EN,
278 			    (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
279 	WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp);
280 
281 	return 0;
282 }
283 
xgpu_ai_mailbox_rcv_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)284 static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev,
285 				   struct amdgpu_irq_src *source,
286 				   struct amdgpu_iv_entry *entry)
287 {
288 	enum idh_event event = xgpu_ai_mailbox_peek_msg(adev);
289 
290 	switch (event) {
291 		case IDH_FLR_NOTIFICATION:
292 		if (amdgpu_sriov_runtime(adev))
293 			schedule_work(&adev->virt.flr_work);
294 		break;
295 		case IDH_QUERY_ALIVE:
296 			xgpu_ai_mailbox_send_ack(adev);
297 			break;
298 		/* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
299 		 * it byfar since that polling thread will handle it,
300 		 * other msg like flr complete is not handled here.
301 		 */
302 		case IDH_CLR_MSG_BUF:
303 		case IDH_FLR_NOTIFICATION_CMPL:
304 		case IDH_READY_TO_ACCESS_GPU:
305 		default:
306 		break;
307 	}
308 
309 	return 0;
310 }
311 
312 static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_ack_irq_funcs = {
313 	.set = xgpu_ai_set_mailbox_ack_irq,
314 	.process = xgpu_ai_mailbox_ack_irq,
315 };
316 
317 static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_rcv_irq_funcs = {
318 	.set = xgpu_ai_set_mailbox_rcv_irq,
319 	.process = xgpu_ai_mailbox_rcv_irq,
320 };
321 
xgpu_ai_mailbox_set_irq_funcs(struct amdgpu_device * adev)322 void xgpu_ai_mailbox_set_irq_funcs(struct amdgpu_device *adev)
323 {
324 	adev->virt.ack_irq.num_types = 1;
325 	adev->virt.ack_irq.funcs = &xgpu_ai_mailbox_ack_irq_funcs;
326 	adev->virt.rcv_irq.num_types = 1;
327 	adev->virt.rcv_irq.funcs = &xgpu_ai_mailbox_rcv_irq_funcs;
328 }
329 
xgpu_ai_mailbox_add_irq_id(struct amdgpu_device * adev)330 int xgpu_ai_mailbox_add_irq_id(struct amdgpu_device *adev)
331 {
332 	int r;
333 
334 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq);
335 	if (r)
336 		return r;
337 
338 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq);
339 	if (r) {
340 		amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
341 		return r;
342 	}
343 
344 	return 0;
345 }
346 
xgpu_ai_mailbox_get_irq(struct amdgpu_device * adev)347 int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev)
348 {
349 	int r;
350 
351 	r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
352 	if (r)
353 		return r;
354 	r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
355 	if (r) {
356 		amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
357 		return r;
358 	}
359 
360 	INIT_WORK(&adev->virt.flr_work, xgpu_ai_mailbox_flr_work);
361 
362 	return 0;
363 }
364 
xgpu_ai_mailbox_put_irq(struct amdgpu_device * adev)365 void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev)
366 {
367 	amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
368 	amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
369 }
370 
371 const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
372 	.req_full_gpu	= xgpu_ai_request_full_gpu_access,
373 	.rel_full_gpu	= xgpu_ai_release_full_gpu_access,
374 	.reset_gpu = xgpu_ai_request_reset,
375 	.wait_reset = NULL,
376 	.trans_msg = xgpu_ai_mailbox_trans_msg,
377 };
378