1 /*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include "amdgpu.h"
25 #include "nbio/nbio_2_3_offset.h"
26 #include "nbio/nbio_2_3_sh_mask.h"
27 #include "gc/gc_10_1_0_offset.h"
28 #include "gc/gc_10_1_0_sh_mask.h"
29 #include "soc15.h"
30 #include "navi10_ih.h"
31 #include "soc15_common.h"
32 #include "mxgpu_nv.h"
33
xgpu_nv_mailbox_send_ack(struct amdgpu_device * adev)34 static void xgpu_nv_mailbox_send_ack(struct amdgpu_device *adev)
35 {
36 WREG8(NV_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2);
37 }
38
xgpu_nv_mailbox_set_valid(struct amdgpu_device * adev,bool val)39 static void xgpu_nv_mailbox_set_valid(struct amdgpu_device *adev, bool val)
40 {
41 WREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE, val ? 1 : 0);
42 }
43
44 /*
45 * this peek_msg could *only* be called in IRQ routine becuase in IRQ routine
46 * RCV_MSG_VALID filed of BIF_BX_PF_MAILBOX_CONTROL must already be set to 1
47 * by host.
48 *
49 * if called no in IRQ routine, this peek_msg cannot guaranteed to return the
50 * correct value since it doesn't return the RCV_DW0 under the case that
51 * RCV_MSG_VALID is set by host.
52 */
xgpu_nv_mailbox_peek_msg(struct amdgpu_device * adev)53 static enum idh_event xgpu_nv_mailbox_peek_msg(struct amdgpu_device *adev)
54 {
55 return RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
56 }
57
58
xgpu_nv_mailbox_rcv_msg(struct amdgpu_device * adev,enum idh_event event)59 static int xgpu_nv_mailbox_rcv_msg(struct amdgpu_device *adev,
60 enum idh_event event)
61 {
62 u32 reg;
63
64 reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
65 if (reg != event)
66 return -ENOENT;
67
68 xgpu_nv_mailbox_send_ack(adev);
69
70 return 0;
71 }
72
xgpu_nv_peek_ack(struct amdgpu_device * adev)73 static uint8_t xgpu_nv_peek_ack(struct amdgpu_device *adev)
74 {
75 return RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE) & 2;
76 }
77
xgpu_nv_poll_ack(struct amdgpu_device * adev)78 static int xgpu_nv_poll_ack(struct amdgpu_device *adev)
79 {
80 int timeout = NV_MAILBOX_POLL_ACK_TIMEDOUT;
81 u8 reg;
82
83 do {
84 reg = RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE);
85 if (reg & 2)
86 return 0;
87
88 mdelay(5);
89 timeout -= 5;
90 } while (timeout > 1);
91
92 pr_err("Doesn't get TRN_MSG_ACK from pf in %d msec\n", NV_MAILBOX_POLL_ACK_TIMEDOUT);
93
94 return -ETIME;
95 }
96
xgpu_nv_poll_msg(struct amdgpu_device * adev,enum idh_event event)97 static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event)
98 {
99 int r, timeout = NV_MAILBOX_POLL_MSG_TIMEDOUT;
100
101 do {
102 r = xgpu_nv_mailbox_rcv_msg(adev, event);
103 if (!r)
104 return 0;
105
106 msleep(10);
107 timeout -= 10;
108 } while (timeout > 1);
109
110
111 return -ETIME;
112 }
113
xgpu_nv_mailbox_trans_msg(struct amdgpu_device * adev,enum idh_request req,u32 data1,u32 data2,u32 data3)114 static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev,
115 enum idh_request req, u32 data1, u32 data2, u32 data3)
116 {
117 int r;
118 uint8_t trn;
119
120 /* IMPORTANT:
121 * clear TRN_MSG_VALID valid to clear host's RCV_MSG_ACK
122 * and with host's RCV_MSG_ACK cleared hw automatically clear host's RCV_MSG_ACK
123 * which lead to VF's TRN_MSG_ACK cleared, otherwise below xgpu_nv_poll_ack()
124 * will return immediatly
125 */
126 do {
127 xgpu_nv_mailbox_set_valid(adev, false);
128 trn = xgpu_nv_peek_ack(adev);
129 if (trn) {
130 pr_err("trn=%x ACK should not assert! wait again !\n", trn);
131 msleep(1);
132 }
133 } while (trn);
134
135 WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW0, req);
136 WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW1, data1);
137 WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW2, data2);
138 WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW3, data3);
139 xgpu_nv_mailbox_set_valid(adev, true);
140
141 /* start to poll ack */
142 r = xgpu_nv_poll_ack(adev);
143 if (r)
144 pr_err("Doesn't get ack from pf, continue\n");
145
146 xgpu_nv_mailbox_set_valid(adev, false);
147 }
148
xgpu_nv_send_access_requests(struct amdgpu_device * adev,enum idh_request req)149 static int xgpu_nv_send_access_requests(struct amdgpu_device *adev,
150 enum idh_request req)
151 {
152 int r;
153 enum idh_event event = -1;
154
155 xgpu_nv_mailbox_trans_msg(adev, req, 0, 0, 0);
156
157 switch (req) {
158 case IDH_REQ_GPU_INIT_ACCESS:
159 case IDH_REQ_GPU_FINI_ACCESS:
160 case IDH_REQ_GPU_RESET_ACCESS:
161 event = IDH_READY_TO_ACCESS_GPU;
162 break;
163 case IDH_REQ_GPU_INIT_DATA:
164 event = IDH_REQ_GPU_INIT_DATA_READY;
165 break;
166 default:
167 break;
168 }
169
170 if (event != -1) {
171 r = xgpu_nv_poll_msg(adev, event);
172 if (r) {
173 if (req != IDH_REQ_GPU_INIT_DATA) {
174 pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r);
175 return r;
176 }
177 else /* host doesn't support REQ_GPU_INIT_DATA handshake */
178 adev->virt.req_init_data_ver = 0;
179 } else {
180 if (req == IDH_REQ_GPU_INIT_DATA)
181 {
182 adev->virt.req_init_data_ver =
183 RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW1);
184
185 /* assume V1 in case host doesn't set version number */
186 if (adev->virt.req_init_data_ver < 1)
187 adev->virt.req_init_data_ver = 1;
188 }
189 }
190
191 /* Retrieve checksum from mailbox2 */
192 if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
193 adev->virt.fw_reserve.checksum_key =
194 RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW2);
195 }
196 }
197
198 return 0;
199 }
200
xgpu_nv_request_reset(struct amdgpu_device * adev)201 static int xgpu_nv_request_reset(struct amdgpu_device *adev)
202 {
203 return xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
204 }
205
xgpu_nv_request_full_gpu_access(struct amdgpu_device * adev,bool init)206 static int xgpu_nv_request_full_gpu_access(struct amdgpu_device *adev,
207 bool init)
208 {
209 enum idh_request req;
210
211 req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS;
212 return xgpu_nv_send_access_requests(adev, req);
213 }
214
xgpu_nv_release_full_gpu_access(struct amdgpu_device * adev,bool init)215 static int xgpu_nv_release_full_gpu_access(struct amdgpu_device *adev,
216 bool init)
217 {
218 enum idh_request req;
219 int r = 0;
220
221 req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS;
222 r = xgpu_nv_send_access_requests(adev, req);
223
224 return r;
225 }
226
xgpu_nv_request_init_data(struct amdgpu_device * adev)227 static int xgpu_nv_request_init_data(struct amdgpu_device *adev)
228 {
229 return xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_INIT_DATA);
230 }
231
xgpu_nv_mailbox_ack_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)232 static int xgpu_nv_mailbox_ack_irq(struct amdgpu_device *adev,
233 struct amdgpu_irq_src *source,
234 struct amdgpu_iv_entry *entry)
235 {
236 DRM_DEBUG("get ack intr and do nothing.\n");
237 return 0;
238 }
239
xgpu_nv_set_mailbox_ack_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned type,enum amdgpu_interrupt_state state)240 static int xgpu_nv_set_mailbox_ack_irq(struct amdgpu_device *adev,
241 struct amdgpu_irq_src *source,
242 unsigned type,
243 enum amdgpu_interrupt_state state)
244 {
245 u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
246
247 if (state == AMDGPU_IRQ_STATE_ENABLE)
248 tmp |= 2;
249 else
250 tmp &= ~2;
251
252 WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
253
254 return 0;
255 }
256
xgpu_nv_mailbox_flr_work(struct work_struct * work)257 static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
258 {
259 struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
260 struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
261 int timeout = NV_MAILBOX_POLL_FLR_TIMEDOUT;
262
263 /* block amdgpu_gpu_recover till msg FLR COMPLETE received,
264 * otherwise the mailbox msg will be ruined/reseted by
265 * the VF FLR.
266 */
267 if (!down_read_trylock(&adev->reset_sem))
268 return;
269
270 atomic_set(&adev->in_gpu_reset, 1);
271
272 do {
273 if (xgpu_nv_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
274 goto flr_done;
275
276 msleep(10);
277 timeout -= 10;
278 } while (timeout > 1);
279
280 flr_done:
281 atomic_set(&adev->in_gpu_reset, 0);
282 up_read(&adev->reset_sem);
283
284 /* Trigger recovery for world switch failure if no TDR */
285 if (amdgpu_device_should_recover_gpu(adev)
286 && (!amdgpu_device_has_job_running(adev) ||
287 adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT ||
288 adev->gfx_timeout == MAX_SCHEDULE_TIMEOUT ||
289 adev->compute_timeout == MAX_SCHEDULE_TIMEOUT ||
290 adev->video_timeout == MAX_SCHEDULE_TIMEOUT))
291 amdgpu_device_gpu_recover(adev, NULL);
292 }
293
xgpu_nv_set_mailbox_rcv_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned type,enum amdgpu_interrupt_state state)294 static int xgpu_nv_set_mailbox_rcv_irq(struct amdgpu_device *adev,
295 struct amdgpu_irq_src *src,
296 unsigned type,
297 enum amdgpu_interrupt_state state)
298 {
299 u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
300
301 if (state == AMDGPU_IRQ_STATE_ENABLE)
302 tmp |= 1;
303 else
304 tmp &= ~1;
305
306 WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
307
308 return 0;
309 }
310
xgpu_nv_mailbox_rcv_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)311 static int xgpu_nv_mailbox_rcv_irq(struct amdgpu_device *adev,
312 struct amdgpu_irq_src *source,
313 struct amdgpu_iv_entry *entry)
314 {
315 enum idh_event event = xgpu_nv_mailbox_peek_msg(adev);
316
317 switch (event) {
318 case IDH_FLR_NOTIFICATION:
319 if (amdgpu_sriov_runtime(adev))
320 schedule_work(&adev->virt.flr_work);
321 break;
322 /* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
323 * it byfar since that polling thread will handle it,
324 * other msg like flr complete is not handled here.
325 */
326 case IDH_CLR_MSG_BUF:
327 case IDH_FLR_NOTIFICATION_CMPL:
328 case IDH_READY_TO_ACCESS_GPU:
329 default:
330 break;
331 }
332
333 return 0;
334 }
335
336 static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_ack_irq_funcs = {
337 .set = xgpu_nv_set_mailbox_ack_irq,
338 .process = xgpu_nv_mailbox_ack_irq,
339 };
340
341 static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_rcv_irq_funcs = {
342 .set = xgpu_nv_set_mailbox_rcv_irq,
343 .process = xgpu_nv_mailbox_rcv_irq,
344 };
345
xgpu_nv_mailbox_set_irq_funcs(struct amdgpu_device * adev)346 void xgpu_nv_mailbox_set_irq_funcs(struct amdgpu_device *adev)
347 {
348 adev->virt.ack_irq.num_types = 1;
349 adev->virt.ack_irq.funcs = &xgpu_nv_mailbox_ack_irq_funcs;
350 adev->virt.rcv_irq.num_types = 1;
351 adev->virt.rcv_irq.funcs = &xgpu_nv_mailbox_rcv_irq_funcs;
352 }
353
xgpu_nv_mailbox_add_irq_id(struct amdgpu_device * adev)354 int xgpu_nv_mailbox_add_irq_id(struct amdgpu_device *adev)
355 {
356 int r;
357
358 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq);
359 if (r)
360 return r;
361
362 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq);
363 if (r) {
364 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
365 return r;
366 }
367
368 return 0;
369 }
370
xgpu_nv_mailbox_get_irq(struct amdgpu_device * adev)371 int xgpu_nv_mailbox_get_irq(struct amdgpu_device *adev)
372 {
373 int r;
374
375 r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
376 if (r)
377 return r;
378 r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
379 if (r) {
380 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
381 return r;
382 }
383
384 INIT_WORK(&adev->virt.flr_work, xgpu_nv_mailbox_flr_work);
385
386 return 0;
387 }
388
xgpu_nv_mailbox_put_irq(struct amdgpu_device * adev)389 void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev)
390 {
391 amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
392 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
393 }
394
395 const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
396 .req_full_gpu = xgpu_nv_request_full_gpu_access,
397 .rel_full_gpu = xgpu_nv_release_full_gpu_access,
398 .req_init_data = xgpu_nv_request_init_data,
399 .reset_gpu = xgpu_nv_request_reset,
400 .wait_reset = NULL,
401 .trans_msg = xgpu_nv_mailbox_trans_msg,
402 };
403