• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2023 Advanced Micro Devices, Inc */
3 
4 #include <linux/dynamic_debug.h>
5 
6 #include "core.h"
7 
8 struct pdsc_wait_context {
9 	struct pdsc_qcq *qcq;
10 	struct completion wait_completion;
11 };
12 
pdsc_process_notifyq(struct pdsc_qcq * qcq)13 static int pdsc_process_notifyq(struct pdsc_qcq *qcq)
14 {
15 	union pds_core_notifyq_comp *comp;
16 	struct pdsc *pdsc = qcq->pdsc;
17 	struct pdsc_cq *cq = &qcq->cq;
18 	struct pdsc_cq_info *cq_info;
19 	int nq_work = 0;
20 	u64 eid;
21 
22 	cq_info = &cq->info[cq->tail_idx];
23 	comp = cq_info->comp;
24 	eid = le64_to_cpu(comp->event.eid);
25 	while (eid > pdsc->last_eid) {
26 		u16 ecode = le16_to_cpu(comp->event.ecode);
27 
28 		switch (ecode) {
29 		case PDS_EVENT_LINK_CHANGE:
30 			dev_info(pdsc->dev, "NotifyQ LINK_CHANGE ecode %d eid %lld\n",
31 				 ecode, eid);
32 			pdsc_notify(PDS_EVENT_LINK_CHANGE, comp);
33 			break;
34 
35 		case PDS_EVENT_RESET:
36 			dev_info(pdsc->dev, "NotifyQ RESET ecode %d eid %lld\n",
37 				 ecode, eid);
38 			pdsc_notify(PDS_EVENT_RESET, comp);
39 			break;
40 
41 		case PDS_EVENT_XCVR:
42 			dev_info(pdsc->dev, "NotifyQ XCVR ecode %d eid %lld\n",
43 				 ecode, eid);
44 			break;
45 
46 		default:
47 			dev_info(pdsc->dev, "NotifyQ ecode %d eid %lld\n",
48 				 ecode, eid);
49 			break;
50 		}
51 
52 		pdsc->last_eid = eid;
53 		cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
54 		cq_info = &cq->info[cq->tail_idx];
55 		comp = cq_info->comp;
56 		eid = le64_to_cpu(comp->event.eid);
57 
58 		nq_work++;
59 	}
60 
61 	qcq->accum_work += nq_work;
62 
63 	return nq_work;
64 }
65 
pdsc_adminq_inc_if_up(struct pdsc * pdsc)66 static bool pdsc_adminq_inc_if_up(struct pdsc *pdsc)
67 {
68 	if (pdsc->state & BIT_ULL(PDSC_S_STOPPING_DRIVER) ||
69 	    pdsc->state & BIT_ULL(PDSC_S_FW_DEAD))
70 		return false;
71 
72 	return refcount_inc_not_zero(&pdsc->adminq_refcnt);
73 }
74 
pdsc_process_adminq(struct pdsc_qcq * qcq)75 void pdsc_process_adminq(struct pdsc_qcq *qcq)
76 {
77 	union pds_core_adminq_comp *comp;
78 	struct pdsc_queue *q = &qcq->q;
79 	struct pdsc *pdsc = qcq->pdsc;
80 	struct pdsc_cq *cq = &qcq->cq;
81 	struct pdsc_q_info *q_info;
82 	unsigned long irqflags;
83 	int nq_work = 0;
84 	int aq_work = 0;
85 	int credits;
86 
87 	/* Don't process AdminQ when it's not up */
88 	if (!pdsc_adminq_inc_if_up(pdsc)) {
89 		dev_err(pdsc->dev, "%s: called while adminq is unavailable\n",
90 			__func__);
91 		return;
92 	}
93 
94 	/* Check for NotifyQ event */
95 	nq_work = pdsc_process_notifyq(&pdsc->notifyqcq);
96 
97 	/* Check for empty queue, which can happen if the interrupt was
98 	 * for a NotifyQ event and there are no new AdminQ completions.
99 	 */
100 	if (q->tail_idx == q->head_idx)
101 		goto credits;
102 
103 	/* Find the first completion to clean,
104 	 * run the callback in the related q_info,
105 	 * and continue while we still match done color
106 	 */
107 	spin_lock_irqsave(&pdsc->adminq_lock, irqflags);
108 	comp = cq->info[cq->tail_idx].comp;
109 	while (pdsc_color_match(comp->color, cq->done_color)) {
110 		q_info = &q->info[q->tail_idx];
111 		q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
112 
113 		/* Copy out the completion data */
114 		memcpy(q_info->dest, comp, sizeof(*comp));
115 
116 		complete_all(&q_info->wc->wait_completion);
117 
118 		if (cq->tail_idx == cq->num_descs - 1)
119 			cq->done_color = !cq->done_color;
120 		cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
121 		comp = cq->info[cq->tail_idx].comp;
122 
123 		aq_work++;
124 	}
125 	spin_unlock_irqrestore(&pdsc->adminq_lock, irqflags);
126 
127 	qcq->accum_work += aq_work;
128 
129 credits:
130 	/* Return the interrupt credits, one for each completion */
131 	credits = nq_work + aq_work;
132 	if (credits)
133 		pds_core_intr_credits(&pdsc->intr_ctrl[qcq->intx],
134 				      credits,
135 				      PDS_CORE_INTR_CRED_REARM);
136 	refcount_dec(&pdsc->adminq_refcnt);
137 }
138 
pdsc_work_thread(struct work_struct * work)139 void pdsc_work_thread(struct work_struct *work)
140 {
141 	struct pdsc_qcq *qcq = container_of(work, struct pdsc_qcq, work);
142 
143 	pdsc_process_adminq(qcq);
144 }
145 
pdsc_adminq_isr(int irq,void * data)146 irqreturn_t pdsc_adminq_isr(int irq, void *data)
147 {
148 	struct pdsc *pdsc = data;
149 	struct pdsc_qcq *qcq;
150 
151 	/* Don't process AdminQ when it's not up */
152 	if (!pdsc_adminq_inc_if_up(pdsc)) {
153 		dev_err(pdsc->dev, "%s: called while adminq is unavailable\n",
154 			__func__);
155 		return IRQ_HANDLED;
156 	}
157 
158 	qcq = &pdsc->adminqcq;
159 	queue_work(pdsc->wq, &qcq->work);
160 	pds_core_intr_mask(&pdsc->intr_ctrl[qcq->intx], PDS_CORE_INTR_MASK_CLEAR);
161 	refcount_dec(&pdsc->adminq_refcnt);
162 
163 	return IRQ_HANDLED;
164 }
165 
__pdsc_adminq_post(struct pdsc * pdsc,struct pdsc_qcq * qcq,union pds_core_adminq_cmd * cmd,union pds_core_adminq_comp * comp,struct pdsc_wait_context * wc)166 static int __pdsc_adminq_post(struct pdsc *pdsc,
167 			      struct pdsc_qcq *qcq,
168 			      union pds_core_adminq_cmd *cmd,
169 			      union pds_core_adminq_comp *comp,
170 			      struct pdsc_wait_context *wc)
171 {
172 	struct pdsc_queue *q = &qcq->q;
173 	struct pdsc_q_info *q_info;
174 	unsigned long irqflags;
175 	unsigned int avail;
176 	int index;
177 	int ret;
178 
179 	spin_lock_irqsave(&pdsc->adminq_lock, irqflags);
180 
181 	/* Check for space in the queue */
182 	avail = q->tail_idx;
183 	if (q->head_idx >= avail)
184 		avail += q->num_descs - q->head_idx - 1;
185 	else
186 		avail -= q->head_idx + 1;
187 	if (!avail) {
188 		ret = -ENOSPC;
189 		goto err_out_unlock;
190 	}
191 
192 	/* Check that the FW is running */
193 	if (!pdsc_is_fw_running(pdsc)) {
194 		if (pdsc->info_regs) {
195 			u8 fw_status =
196 				ioread8(&pdsc->info_regs->fw_status);
197 
198 			dev_info(pdsc->dev, "%s: post failed - fw not running %#02x:\n",
199 				 __func__, fw_status);
200 		} else {
201 			dev_info(pdsc->dev, "%s: post failed - BARs not setup\n",
202 				 __func__);
203 		}
204 		ret = -ENXIO;
205 
206 		goto err_out_unlock;
207 	}
208 
209 	/* Post the request */
210 	index = q->head_idx;
211 	q_info = &q->info[index];
212 	q_info->wc = wc;
213 	q_info->dest = comp;
214 	memcpy(q_info->desc, cmd, sizeof(*cmd));
215 
216 	dev_dbg(pdsc->dev, "head_idx %d tail_idx %d\n",
217 		q->head_idx, q->tail_idx);
218 	dev_dbg(pdsc->dev, "post admin queue command:\n");
219 	dynamic_hex_dump("cmd ", DUMP_PREFIX_OFFSET, 16, 1,
220 			 cmd, sizeof(*cmd), true);
221 
222 	q->head_idx = (q->head_idx + 1) & (q->num_descs - 1);
223 
224 	pds_core_dbell_ring(pdsc->kern_dbpage,
225 			    q->hw_type, q->dbval | q->head_idx);
226 	ret = index;
227 
228 err_out_unlock:
229 	spin_unlock_irqrestore(&pdsc->adminq_lock, irqflags);
230 	return ret;
231 }
232 
pdsc_adminq_post(struct pdsc * pdsc,union pds_core_adminq_cmd * cmd,union pds_core_adminq_comp * comp,bool fast_poll)233 int pdsc_adminq_post(struct pdsc *pdsc,
234 		     union pds_core_adminq_cmd *cmd,
235 		     union pds_core_adminq_comp *comp,
236 		     bool fast_poll)
237 {
238 	struct pdsc_wait_context wc = {
239 		.wait_completion =
240 			COMPLETION_INITIALIZER_ONSTACK(wc.wait_completion),
241 	};
242 	unsigned long poll_interval = 1;
243 	unsigned long poll_jiffies;
244 	unsigned long time_limit;
245 	unsigned long time_start;
246 	unsigned long time_done;
247 	unsigned long remaining;
248 	int err = 0;
249 	int index;
250 
251 	if (!pdsc_adminq_inc_if_up(pdsc)) {
252 		dev_dbg(pdsc->dev, "%s: preventing adminq cmd %u\n",
253 			__func__, cmd->opcode);
254 		return -ENXIO;
255 	}
256 
257 	wc.qcq = &pdsc->adminqcq;
258 	index = __pdsc_adminq_post(pdsc, &pdsc->adminqcq, cmd, comp, &wc);
259 	if (index < 0) {
260 		err = index;
261 		goto err_out;
262 	}
263 
264 	time_start = jiffies;
265 	time_limit = time_start + HZ * pdsc->devcmd_timeout;
266 	do {
267 		/* Timeslice the actual wait to catch IO errors etc early */
268 		poll_jiffies = msecs_to_jiffies(poll_interval);
269 		remaining = wait_for_completion_timeout(&wc.wait_completion,
270 							poll_jiffies);
271 		if (remaining)
272 			break;
273 
274 		if (!pdsc_is_fw_running(pdsc)) {
275 			if (pdsc->info_regs) {
276 				u8 fw_status =
277 					ioread8(&pdsc->info_regs->fw_status);
278 
279 				dev_dbg(pdsc->dev, "%s: post wait failed - fw not running %#02x:\n",
280 					__func__, fw_status);
281 			} else {
282 				dev_dbg(pdsc->dev, "%s: post wait failed - BARs not setup\n",
283 					__func__);
284 			}
285 			err = -ENXIO;
286 			break;
287 		}
288 
289 		/* When fast_poll is not requested, prevent aggressive polling
290 		 * on failures due to timeouts by doing exponential back off.
291 		 */
292 		if (!fast_poll && poll_interval < PDSC_ADMINQ_MAX_POLL_INTERVAL)
293 			poll_interval <<= 1;
294 	} while (time_before(jiffies, time_limit));
295 	time_done = jiffies;
296 	dev_dbg(pdsc->dev, "%s: elapsed %d msecs\n",
297 		__func__, jiffies_to_msecs(time_done - time_start));
298 
299 	/* Check the results */
300 	if (time_after_eq(time_done, time_limit))
301 		err = -ETIMEDOUT;
302 
303 	dev_dbg(pdsc->dev, "read admin queue completion idx %d:\n", index);
304 	dynamic_hex_dump("comp ", DUMP_PREFIX_OFFSET, 16, 1,
305 			 comp, sizeof(*comp), true);
306 
307 	if (remaining && comp->status)
308 		err = pdsc_err_to_errno(comp->status);
309 
310 err_out:
311 	if (err) {
312 		dev_dbg(pdsc->dev, "%s: opcode %d status %d err %pe\n",
313 			__func__, cmd->opcode, comp->status, ERR_PTR(err));
314 		if (err == -ENXIO || err == -ETIMEDOUT)
315 			queue_work(pdsc->wq, &pdsc->health_work);
316 	}
317 
318 	refcount_dec(&pdsc->adminq_refcnt);
319 
320 	return err;
321 }
322 EXPORT_SYMBOL_GPL(pdsc_adminq_post);
323