• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Admin Function driver
3  *
4  * Copyright (C) 2018 Marvell International Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 
11 #include <linux/module.h>
12 #include <linux/interrupt.h>
13 #include <linux/pci.h>
14 
15 #include "rvu_reg.h"
16 #include "mbox.h"
17 #include "rvu_trace.h"
18 
19 static const u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
20 
__otx2_mbox_reset(struct otx2_mbox * mbox,int devid)21 void __otx2_mbox_reset(struct otx2_mbox *mbox, int devid)
22 {
23 	void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE);
24 	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
25 	struct mbox_hdr *tx_hdr, *rx_hdr;
26 
27 	tx_hdr = hw_mbase + mbox->tx_start;
28 	rx_hdr = hw_mbase + mbox->rx_start;
29 
30 	mdev->msg_size = 0;
31 	mdev->rsp_size = 0;
32 	tx_hdr->num_msgs = 0;
33 	tx_hdr->msg_size = 0;
34 	rx_hdr->num_msgs = 0;
35 	rx_hdr->msg_size = 0;
36 }
37 EXPORT_SYMBOL(__otx2_mbox_reset);
38 
otx2_mbox_reset(struct otx2_mbox * mbox,int devid)39 void otx2_mbox_reset(struct otx2_mbox *mbox, int devid)
40 {
41 	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
42 
43 	spin_lock(&mdev->mbox_lock);
44 	__otx2_mbox_reset(mbox, devid);
45 	spin_unlock(&mdev->mbox_lock);
46 }
47 EXPORT_SYMBOL(otx2_mbox_reset);
48 
otx2_mbox_destroy(struct otx2_mbox * mbox)49 void otx2_mbox_destroy(struct otx2_mbox *mbox)
50 {
51 	mbox->reg_base = NULL;
52 	mbox->hwbase = NULL;
53 
54 	kfree(mbox->dev);
55 	mbox->dev = NULL;
56 }
57 EXPORT_SYMBOL(otx2_mbox_destroy);
58 
otx2_mbox_init(struct otx2_mbox * mbox,void * hwbase,struct pci_dev * pdev,void * reg_base,int direction,int ndevs)59 int otx2_mbox_init(struct otx2_mbox *mbox, void *hwbase, struct pci_dev *pdev,
60 		   void *reg_base, int direction, int ndevs)
61 {
62 	struct otx2_mbox_dev *mdev;
63 	int devid;
64 
65 	switch (direction) {
66 	case MBOX_DIR_AFPF:
67 	case MBOX_DIR_PFVF:
68 		mbox->tx_start = MBOX_DOWN_TX_START;
69 		mbox->rx_start = MBOX_DOWN_RX_START;
70 		mbox->tx_size  = MBOX_DOWN_TX_SIZE;
71 		mbox->rx_size  = MBOX_DOWN_RX_SIZE;
72 		break;
73 	case MBOX_DIR_PFAF:
74 	case MBOX_DIR_VFPF:
75 		mbox->tx_start = MBOX_DOWN_RX_START;
76 		mbox->rx_start = MBOX_DOWN_TX_START;
77 		mbox->tx_size  = MBOX_DOWN_RX_SIZE;
78 		mbox->rx_size  = MBOX_DOWN_TX_SIZE;
79 		break;
80 	case MBOX_DIR_AFPF_UP:
81 	case MBOX_DIR_PFVF_UP:
82 		mbox->tx_start = MBOX_UP_TX_START;
83 		mbox->rx_start = MBOX_UP_RX_START;
84 		mbox->tx_size  = MBOX_UP_TX_SIZE;
85 		mbox->rx_size  = MBOX_UP_RX_SIZE;
86 		break;
87 	case MBOX_DIR_PFAF_UP:
88 	case MBOX_DIR_VFPF_UP:
89 		mbox->tx_start = MBOX_UP_RX_START;
90 		mbox->rx_start = MBOX_UP_TX_START;
91 		mbox->tx_size  = MBOX_UP_RX_SIZE;
92 		mbox->rx_size  = MBOX_UP_TX_SIZE;
93 		break;
94 	default:
95 		return -ENODEV;
96 	}
97 
98 	switch (direction) {
99 	case MBOX_DIR_AFPF:
100 	case MBOX_DIR_AFPF_UP:
101 		mbox->trigger = RVU_AF_AFPF_MBOX0;
102 		mbox->tr_shift = 4;
103 		break;
104 	case MBOX_DIR_PFAF:
105 	case MBOX_DIR_PFAF_UP:
106 		mbox->trigger = RVU_PF_PFAF_MBOX1;
107 		mbox->tr_shift = 0;
108 		break;
109 	case MBOX_DIR_PFVF:
110 	case MBOX_DIR_PFVF_UP:
111 		mbox->trigger = RVU_PF_VFX_PFVF_MBOX0;
112 		mbox->tr_shift = 12;
113 		break;
114 	case MBOX_DIR_VFPF:
115 	case MBOX_DIR_VFPF_UP:
116 		mbox->trigger = RVU_VF_VFPF_MBOX1;
117 		mbox->tr_shift = 0;
118 		break;
119 	default:
120 		return -ENODEV;
121 	}
122 
123 	mbox->reg_base = reg_base;
124 	mbox->hwbase = hwbase;
125 	mbox->pdev = pdev;
126 
127 	mbox->dev = kcalloc(ndevs, sizeof(struct otx2_mbox_dev), GFP_KERNEL);
128 	if (!mbox->dev) {
129 		otx2_mbox_destroy(mbox);
130 		return -ENOMEM;
131 	}
132 
133 	mbox->ndevs = ndevs;
134 	for (devid = 0; devid < ndevs; devid++) {
135 		mdev = &mbox->dev[devid];
136 		mdev->mbase = mbox->hwbase + (devid * MBOX_SIZE);
137 		spin_lock_init(&mdev->mbox_lock);
138 		/* Init header to reset value */
139 		otx2_mbox_reset(mbox, devid);
140 	}
141 
142 	return 0;
143 }
144 EXPORT_SYMBOL(otx2_mbox_init);
145 
otx2_mbox_wait_for_rsp(struct otx2_mbox * mbox,int devid)146 int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid)
147 {
148 	unsigned long timeout = jiffies + msecs_to_jiffies(MBOX_RSP_TIMEOUT);
149 	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
150 	struct device *sender = &mbox->pdev->dev;
151 
152 	while (!time_after(jiffies, timeout)) {
153 		if (mdev->num_msgs == mdev->msgs_acked)
154 			return 0;
155 		usleep_range(800, 1000);
156 	}
157 	dev_dbg(sender, "timed out while waiting for rsp\n");
158 	return -EIO;
159 }
160 EXPORT_SYMBOL(otx2_mbox_wait_for_rsp);
161 
otx2_mbox_busy_poll_for_rsp(struct otx2_mbox * mbox,int devid)162 int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid)
163 {
164 	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
165 	unsigned long timeout = jiffies + 1 * HZ;
166 
167 	while (!time_after(jiffies, timeout)) {
168 		if (mdev->num_msgs == mdev->msgs_acked)
169 			return 0;
170 		cpu_relax();
171 	}
172 	return -EIO;
173 }
174 EXPORT_SYMBOL(otx2_mbox_busy_poll_for_rsp);
175 
otx2_mbox_msg_send(struct otx2_mbox * mbox,int devid)176 void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid)
177 {
178 	void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE);
179 	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
180 	struct mbox_hdr *tx_hdr, *rx_hdr;
181 
182 	tx_hdr = hw_mbase + mbox->tx_start;
183 	rx_hdr = hw_mbase + mbox->rx_start;
184 
185 	/* If bounce buffer is implemented copy mbox messages from
186 	 * bounce buffer to hw mbox memory.
187 	 */
188 	if (mdev->mbase != hw_mbase)
189 		memcpy(hw_mbase + mbox->tx_start + msgs_offset,
190 		       mdev->mbase + mbox->tx_start + msgs_offset,
191 		       mdev->msg_size);
192 
193 	spin_lock(&mdev->mbox_lock);
194 
195 	tx_hdr->msg_size = mdev->msg_size;
196 
197 	/* Reset header for next messages */
198 	mdev->msg_size = 0;
199 	mdev->rsp_size = 0;
200 	mdev->msgs_acked = 0;
201 
202 	/* Sync mbox data into memory */
203 	smp_wmb();
204 
205 	/* num_msgs != 0 signals to the peer that the buffer has a number of
206 	 * messages.  So this should be written after writing all the messages
207 	 * to the shared memory.
208 	 */
209 	tx_hdr->num_msgs = mdev->num_msgs;
210 	rx_hdr->num_msgs = 0;
211 
212 	trace_otx2_msg_send(mbox->pdev, tx_hdr->num_msgs, tx_hdr->msg_size);
213 
214 	spin_unlock(&mdev->mbox_lock);
215 
216 	/* The interrupt should be fired after num_msgs is written
217 	 * to the shared memory
218 	 */
219 	writeq(1, (void __iomem *)mbox->reg_base +
220 	       (mbox->trigger | (devid << mbox->tr_shift)));
221 }
222 EXPORT_SYMBOL(otx2_mbox_msg_send);
223 
otx2_mbox_alloc_msg_rsp(struct otx2_mbox * mbox,int devid,int size,int size_rsp)224 struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
225 					    int size, int size_rsp)
226 {
227 	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
228 	struct mbox_msghdr *msghdr = NULL;
229 
230 	spin_lock(&mdev->mbox_lock);
231 	size = ALIGN(size, MBOX_MSG_ALIGN);
232 	size_rsp = ALIGN(size_rsp, MBOX_MSG_ALIGN);
233 	/* Check if there is space in mailbox */
234 	if ((mdev->msg_size + size) > mbox->tx_size - msgs_offset)
235 		goto exit;
236 	if ((mdev->rsp_size + size_rsp) > mbox->rx_size - msgs_offset)
237 		goto exit;
238 
239 	if (mdev->msg_size == 0)
240 		mdev->num_msgs = 0;
241 	mdev->num_msgs++;
242 
243 	msghdr = mdev->mbase + mbox->tx_start + msgs_offset + mdev->msg_size;
244 
245 	/* Clear the whole msg region */
246 	memset(msghdr, 0, size);
247 	/* Init message header with reset values */
248 	msghdr->ver = OTX2_MBOX_VERSION;
249 	mdev->msg_size += size;
250 	mdev->rsp_size += size_rsp;
251 	msghdr->next_msgoff = mdev->msg_size + msgs_offset;
252 exit:
253 	spin_unlock(&mdev->mbox_lock);
254 
255 	return msghdr;
256 }
257 EXPORT_SYMBOL(otx2_mbox_alloc_msg_rsp);
258 
otx2_mbox_get_rsp(struct otx2_mbox * mbox,int devid,struct mbox_msghdr * msg)259 struct mbox_msghdr *otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid,
260 				      struct mbox_msghdr *msg)
261 {
262 	unsigned long imsg = mbox->tx_start + msgs_offset;
263 	unsigned long irsp = mbox->rx_start + msgs_offset;
264 	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
265 	u16 msgs;
266 
267 	spin_lock(&mdev->mbox_lock);
268 
269 	if (mdev->num_msgs != mdev->msgs_acked)
270 		goto error;
271 
272 	for (msgs = 0; msgs < mdev->msgs_acked; msgs++) {
273 		struct mbox_msghdr *pmsg = mdev->mbase + imsg;
274 		struct mbox_msghdr *prsp = mdev->mbase + irsp;
275 
276 		if (msg == pmsg) {
277 			if (pmsg->id != prsp->id)
278 				goto error;
279 			spin_unlock(&mdev->mbox_lock);
280 			return prsp;
281 		}
282 
283 		imsg = mbox->tx_start + pmsg->next_msgoff;
284 		irsp = mbox->rx_start + prsp->next_msgoff;
285 	}
286 
287 error:
288 	spin_unlock(&mdev->mbox_lock);
289 	return ERR_PTR(-ENODEV);
290 }
291 EXPORT_SYMBOL(otx2_mbox_get_rsp);
292 
otx2_mbox_check_rsp_msgs(struct otx2_mbox * mbox,int devid)293 int otx2_mbox_check_rsp_msgs(struct otx2_mbox *mbox, int devid)
294 {
295 	unsigned long ireq = mbox->tx_start + msgs_offset;
296 	unsigned long irsp = mbox->rx_start + msgs_offset;
297 	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
298 	int rc = -ENODEV;
299 	u16 msgs;
300 
301 	spin_lock(&mdev->mbox_lock);
302 
303 	if (mdev->num_msgs != mdev->msgs_acked)
304 		goto exit;
305 
306 	for (msgs = 0; msgs < mdev->msgs_acked; msgs++) {
307 		struct mbox_msghdr *preq = mdev->mbase + ireq;
308 		struct mbox_msghdr *prsp = mdev->mbase + irsp;
309 
310 		if (preq->id != prsp->id) {
311 			trace_otx2_msg_check(mbox->pdev, preq->id,
312 					     prsp->id, prsp->rc);
313 			goto exit;
314 		}
315 		if (prsp->rc) {
316 			rc = prsp->rc;
317 			trace_otx2_msg_check(mbox->pdev, preq->id,
318 					     prsp->id, prsp->rc);
319 			goto exit;
320 		}
321 
322 		ireq = mbox->tx_start + preq->next_msgoff;
323 		irsp = mbox->rx_start + prsp->next_msgoff;
324 	}
325 	rc = 0;
326 exit:
327 	spin_unlock(&mdev->mbox_lock);
328 	return rc;
329 }
330 EXPORT_SYMBOL(otx2_mbox_check_rsp_msgs);
331 
332 int
otx2_reply_invalid_msg(struct otx2_mbox * mbox,int devid,u16 pcifunc,u16 id)333 otx2_reply_invalid_msg(struct otx2_mbox *mbox, int devid, u16 pcifunc, u16 id)
334 {
335 	struct msg_rsp *rsp;
336 
337 	rsp = (struct msg_rsp *)
338 	       otx2_mbox_alloc_msg(mbox, devid, sizeof(*rsp));
339 	if (!rsp)
340 		return -ENOMEM;
341 	rsp->hdr.id = id;
342 	rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
343 	rsp->hdr.rc = MBOX_MSG_INVALID;
344 	rsp->hdr.pcifunc = pcifunc;
345 	return 0;
346 }
347 EXPORT_SYMBOL(otx2_reply_invalid_msg);
348 
otx2_mbox_nonempty(struct otx2_mbox * mbox,int devid)349 bool otx2_mbox_nonempty(struct otx2_mbox *mbox, int devid)
350 {
351 	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
352 	bool ret;
353 
354 	spin_lock(&mdev->mbox_lock);
355 	ret = mdev->num_msgs != 0;
356 	spin_unlock(&mdev->mbox_lock);
357 
358 	return ret;
359 }
360 EXPORT_SYMBOL(otx2_mbox_nonempty);
361 
otx2_mbox_id2name(u16 id)362 const char *otx2_mbox_id2name(u16 id)
363 {
364 	switch (id) {
365 #define M(_name, _id, _1, _2, _3) case _id: return # _name;
366 	MBOX_MESSAGES
367 #undef M
368 	default:
369 		return "INVALID ID";
370 	}
371 }
372 EXPORT_SYMBOL(otx2_mbox_id2name);
373 
374 MODULE_AUTHOR("Marvell International Ltd.");
375 MODULE_LICENSE("GPL v2");
376