1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2023 Advanced Micro Devices, Inc */
3
4 #include <linux/dynamic_debug.h>
5
6 #include "core.h"
7
pdsc_process_notifyq(struct pdsc_qcq * qcq)8 static int pdsc_process_notifyq(struct pdsc_qcq *qcq)
9 {
10 union pds_core_notifyq_comp *comp;
11 struct pdsc *pdsc = qcq->pdsc;
12 struct pdsc_cq *cq = &qcq->cq;
13 struct pdsc_cq_info *cq_info;
14 int nq_work = 0;
15 u64 eid;
16
17 cq_info = &cq->info[cq->tail_idx];
18 comp = cq_info->comp;
19 eid = le64_to_cpu(comp->event.eid);
20 while (eid > pdsc->last_eid) {
21 u16 ecode = le16_to_cpu(comp->event.ecode);
22
23 switch (ecode) {
24 case PDS_EVENT_LINK_CHANGE:
25 dev_info(pdsc->dev, "NotifyQ LINK_CHANGE ecode %d eid %lld\n",
26 ecode, eid);
27 pdsc_notify(PDS_EVENT_LINK_CHANGE, comp);
28 break;
29
30 case PDS_EVENT_RESET:
31 dev_info(pdsc->dev, "NotifyQ RESET ecode %d eid %lld\n",
32 ecode, eid);
33 pdsc_notify(PDS_EVENT_RESET, comp);
34 break;
35
36 case PDS_EVENT_XCVR:
37 dev_info(pdsc->dev, "NotifyQ XCVR ecode %d eid %lld\n",
38 ecode, eid);
39 break;
40
41 default:
42 dev_info(pdsc->dev, "NotifyQ ecode %d eid %lld\n",
43 ecode, eid);
44 break;
45 }
46
47 pdsc->last_eid = eid;
48 cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
49 cq_info = &cq->info[cq->tail_idx];
50 comp = cq_info->comp;
51 eid = le64_to_cpu(comp->event.eid);
52
53 nq_work++;
54 }
55
56 qcq->accum_work += nq_work;
57
58 return nq_work;
59 }
60
pdsc_adminq_inc_if_up(struct pdsc * pdsc)61 static bool pdsc_adminq_inc_if_up(struct pdsc *pdsc)
62 {
63 if (pdsc->state & BIT_ULL(PDSC_S_STOPPING_DRIVER) ||
64 pdsc->state & BIT_ULL(PDSC_S_FW_DEAD))
65 return false;
66
67 return refcount_inc_not_zero(&pdsc->adminq_refcnt);
68 }
69
pdsc_process_adminq(struct pdsc_qcq * qcq)70 void pdsc_process_adminq(struct pdsc_qcq *qcq)
71 {
72 union pds_core_adminq_comp *comp;
73 struct pdsc_queue *q = &qcq->q;
74 struct pdsc *pdsc = qcq->pdsc;
75 struct pdsc_cq *cq = &qcq->cq;
76 struct pdsc_q_info *q_info;
77 unsigned long irqflags;
78 int nq_work = 0;
79 int aq_work = 0;
80 int credits;
81
82 /* Don't process AdminQ when it's not up */
83 if (!pdsc_adminq_inc_if_up(pdsc)) {
84 dev_err(pdsc->dev, "%s: called while adminq is unavailable\n",
85 __func__);
86 return;
87 }
88
89 /* Check for NotifyQ event */
90 nq_work = pdsc_process_notifyq(&pdsc->notifyqcq);
91
92 /* Check for empty queue, which can happen if the interrupt was
93 * for a NotifyQ event and there are no new AdminQ completions.
94 */
95 if (q->tail_idx == q->head_idx)
96 goto credits;
97
98 /* Find the first completion to clean,
99 * run the callback in the related q_info,
100 * and continue while we still match done color
101 */
102 spin_lock_irqsave(&pdsc->adminq_lock, irqflags);
103 comp = cq->info[cq->tail_idx].comp;
104 while (pdsc_color_match(comp->color, cq->done_color)) {
105 q_info = &q->info[q->tail_idx];
106 q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
107
108 if (!completion_done(&q_info->completion)) {
109 memcpy(q_info->dest, comp, sizeof(*comp));
110 complete(&q_info->completion);
111 }
112
113 if (cq->tail_idx == cq->num_descs - 1)
114 cq->done_color = !cq->done_color;
115 cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
116 comp = cq->info[cq->tail_idx].comp;
117
118 aq_work++;
119 }
120 spin_unlock_irqrestore(&pdsc->adminq_lock, irqflags);
121
122 qcq->accum_work += aq_work;
123
124 credits:
125 /* Return the interrupt credits, one for each completion */
126 credits = nq_work + aq_work;
127 if (credits)
128 pds_core_intr_credits(&pdsc->intr_ctrl[qcq->intx],
129 credits,
130 PDS_CORE_INTR_CRED_REARM);
131 refcount_dec(&pdsc->adminq_refcnt);
132 }
133
pdsc_work_thread(struct work_struct * work)134 void pdsc_work_thread(struct work_struct *work)
135 {
136 struct pdsc_qcq *qcq = container_of(work, struct pdsc_qcq, work);
137
138 pdsc_process_adminq(qcq);
139 }
140
pdsc_adminq_isr(int irq,void * data)141 irqreturn_t pdsc_adminq_isr(int irq, void *data)
142 {
143 struct pdsc *pdsc = data;
144 struct pdsc_qcq *qcq;
145
146 /* Don't process AdminQ when it's not up */
147 if (!pdsc_adminq_inc_if_up(pdsc)) {
148 dev_err(pdsc->dev, "%s: called while adminq is unavailable\n",
149 __func__);
150 return IRQ_HANDLED;
151 }
152
153 qcq = &pdsc->adminqcq;
154 queue_work(pdsc->wq, &qcq->work);
155 pds_core_intr_mask(&pdsc->intr_ctrl[qcq->intx], PDS_CORE_INTR_MASK_CLEAR);
156 refcount_dec(&pdsc->adminq_refcnt);
157
158 return IRQ_HANDLED;
159 }
160
__pdsc_adminq_post(struct pdsc * pdsc,struct pdsc_qcq * qcq,union pds_core_adminq_cmd * cmd,union pds_core_adminq_comp * comp)161 static int __pdsc_adminq_post(struct pdsc *pdsc,
162 struct pdsc_qcq *qcq,
163 union pds_core_adminq_cmd *cmd,
164 union pds_core_adminq_comp *comp)
165 {
166 struct pdsc_queue *q = &qcq->q;
167 struct pdsc_q_info *q_info;
168 unsigned long irqflags;
169 unsigned int avail;
170 int index;
171 int ret;
172
173 spin_lock_irqsave(&pdsc->adminq_lock, irqflags);
174
175 /* Check for space in the queue */
176 avail = q->tail_idx;
177 if (q->head_idx >= avail)
178 avail += q->num_descs - q->head_idx - 1;
179 else
180 avail -= q->head_idx + 1;
181 if (!avail) {
182 ret = -ENOSPC;
183 goto err_out_unlock;
184 }
185
186 /* Check that the FW is running */
187 if (!pdsc_is_fw_running(pdsc)) {
188 if (pdsc->info_regs) {
189 u8 fw_status =
190 ioread8(&pdsc->info_regs->fw_status);
191
192 dev_info(pdsc->dev, "%s: post failed - fw not running %#02x:\n",
193 __func__, fw_status);
194 } else {
195 dev_info(pdsc->dev, "%s: post failed - BARs not setup\n",
196 __func__);
197 }
198 ret = -ENXIO;
199
200 goto err_out_unlock;
201 }
202
203 /* Post the request */
204 index = q->head_idx;
205 q_info = &q->info[index];
206 q_info->dest = comp;
207 memcpy(q_info->desc, cmd, sizeof(*cmd));
208 reinit_completion(&q_info->completion);
209
210 dev_dbg(pdsc->dev, "head_idx %d tail_idx %d\n",
211 q->head_idx, q->tail_idx);
212 dev_dbg(pdsc->dev, "post admin queue command:\n");
213 dynamic_hex_dump("cmd ", DUMP_PREFIX_OFFSET, 16, 1,
214 cmd, sizeof(*cmd), true);
215
216 q->head_idx = (q->head_idx + 1) & (q->num_descs - 1);
217
218 pds_core_dbell_ring(pdsc->kern_dbpage,
219 q->hw_type, q->dbval | q->head_idx);
220 ret = index;
221
222 err_out_unlock:
223 spin_unlock_irqrestore(&pdsc->adminq_lock, irqflags);
224 return ret;
225 }
226
pdsc_adminq_post(struct pdsc * pdsc,union pds_core_adminq_cmd * cmd,union pds_core_adminq_comp * comp,bool fast_poll)227 int pdsc_adminq_post(struct pdsc *pdsc,
228 union pds_core_adminq_cmd *cmd,
229 union pds_core_adminq_comp *comp,
230 bool fast_poll)
231 {
232 unsigned long poll_interval = 1;
233 unsigned long poll_jiffies;
234 unsigned long time_limit;
235 unsigned long time_start;
236 unsigned long time_done;
237 unsigned long remaining;
238 struct completion *wc;
239 int err = 0;
240 int index;
241
242 if (!pdsc_adminq_inc_if_up(pdsc)) {
243 dev_dbg(pdsc->dev, "%s: preventing adminq cmd %u\n",
244 __func__, cmd->opcode);
245 return -ENXIO;
246 }
247
248 index = __pdsc_adminq_post(pdsc, &pdsc->adminqcq, cmd, comp);
249 if (index < 0) {
250 err = index;
251 goto err_out;
252 }
253
254 wc = &pdsc->adminqcq.q.info[index].completion;
255 time_start = jiffies;
256 time_limit = time_start + HZ * pdsc->devcmd_timeout;
257 do {
258 /* Timeslice the actual wait to catch IO errors etc early */
259 poll_jiffies = msecs_to_jiffies(poll_interval);
260 remaining = wait_for_completion_timeout(wc, poll_jiffies);
261 if (remaining)
262 break;
263
264 if (!pdsc_is_fw_running(pdsc)) {
265 if (pdsc->info_regs) {
266 u8 fw_status =
267 ioread8(&pdsc->info_regs->fw_status);
268
269 dev_dbg(pdsc->dev, "%s: post wait failed - fw not running %#02x:\n",
270 __func__, fw_status);
271 } else {
272 dev_dbg(pdsc->dev, "%s: post wait failed - BARs not setup\n",
273 __func__);
274 }
275 err = -ENXIO;
276 break;
277 }
278
279 /* When fast_poll is not requested, prevent aggressive polling
280 * on failures due to timeouts by doing exponential back off.
281 */
282 if (!fast_poll && poll_interval < PDSC_ADMINQ_MAX_POLL_INTERVAL)
283 poll_interval <<= 1;
284 } while (time_before(jiffies, time_limit));
285 time_done = jiffies;
286 dev_dbg(pdsc->dev, "%s: elapsed %d msecs\n",
287 __func__, jiffies_to_msecs(time_done - time_start));
288
289 /* Check the results and clear an un-completed timeout */
290 if (time_after_eq(time_done, time_limit) && !completion_done(wc)) {
291 err = -ETIMEDOUT;
292 complete(wc);
293 }
294
295 dev_dbg(pdsc->dev, "read admin queue completion idx %d:\n", index);
296 dynamic_hex_dump("comp ", DUMP_PREFIX_OFFSET, 16, 1,
297 comp, sizeof(*comp), true);
298
299 if (remaining && comp->status)
300 err = pdsc_err_to_errno(comp->status);
301
302 err_out:
303 if (err) {
304 dev_dbg(pdsc->dev, "%s: opcode %d status %d err %pe\n",
305 __func__, cmd->opcode, comp->status, ERR_PTR(err));
306 if (err == -ENXIO || err == -ETIMEDOUT)
307 queue_work(pdsc->wq, &pdsc->health_work);
308 }
309
310 refcount_dec(&pdsc->adminq_refcnt);
311
312 return err;
313 }
314 EXPORT_SYMBOL_GPL(pdsc_adminq_post);
315