• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Mellanox BlueField SoC TmFifo driver
4  *
5  * Copyright (C) 2019 Mellanox Technologies
6  */
7 
8 #include <linux/acpi.h>
9 #include <linux/bitfield.h>
10 #include <linux/circ_buf.h>
11 #include <linux/efi.h>
12 #include <linux/irq.h>
13 #include <linux/module.h>
14 #include <linux/mutex.h>
15 #include <linux/platform_device.h>
16 #include <linux/types.h>
17 
18 #include <linux/virtio_config.h>
19 #include <linux/virtio_console.h>
20 #include <linux/virtio_ids.h>
21 #include <linux/virtio_net.h>
22 #include <linux/virtio_ring.h>
23 
24 #include "mlxbf-tmfifo-regs.h"
25 
26 /* Vring size. */
27 #define MLXBF_TMFIFO_VRING_SIZE			SZ_1K
28 
29 /* Console Tx buffer size. */
30 #define MLXBF_TMFIFO_CON_TX_BUF_SIZE		SZ_32K
31 
32 /* Console Tx buffer reserved space. */
33 #define MLXBF_TMFIFO_CON_TX_BUF_RSV_SIZE	8
34 
35 /* House-keeping timer interval. */
36 #define MLXBF_TMFIFO_TIMER_INTERVAL		(HZ / 10)
37 
38 /* Virtual devices sharing the TM FIFO. */
39 #define MLXBF_TMFIFO_VDEV_MAX		(VIRTIO_ID_CONSOLE + 1)
40 
41 /*
42  * Reserve 1/16 of TmFifo space, so console messages are not starved by
43  * the networking traffic.
44  */
45 #define MLXBF_TMFIFO_RESERVE_RATIO		16
46 
47 /* Message with data needs at least two words (for header & data). */
48 #define MLXBF_TMFIFO_DATA_MIN_WORDS		2
49 
50 struct mlxbf_tmfifo;
51 
52 /**
53  * mlxbf_tmfifo_vring - Structure of the TmFifo virtual ring
54  * @va: virtual address of the ring
55  * @dma: dma address of the ring
56  * @vq: pointer to the virtio virtqueue
57  * @desc: current descriptor of the pending packet
58  * @desc_head: head descriptor of the pending packet
59  * @drop_desc: dummy desc for packet dropping
60  * @cur_len: processed length of the current descriptor
61  * @rem_len: remaining length of the pending packet
62  * @pkt_len: total length of the pending packet
63  * @next_avail: next avail descriptor id
64  * @num: vring size (number of descriptors)
65  * @align: vring alignment size
66  * @index: vring index
67  * @vdev_id: vring virtio id (VIRTIO_ID_xxx)
68  * @fifo: pointer to the tmfifo structure
69  */
70 struct mlxbf_tmfifo_vring {
71 	void *va;
72 	dma_addr_t dma;
73 	struct virtqueue *vq;
74 	struct vring_desc *desc;
75 	struct vring_desc *desc_head;
76 	struct vring_desc drop_desc;
77 	int cur_len;
78 	int rem_len;
79 	u32 pkt_len;
80 	u16 next_avail;
81 	int num;
82 	int align;
83 	int index;
84 	int vdev_id;
85 	struct mlxbf_tmfifo *fifo;
86 };
87 
88 /* Check whether vring is in drop mode. */
89 #define IS_VRING_DROP(_r) ({ \
90 	typeof(_r) (r) = (_r); \
91 	(r->desc_head == &r->drop_desc ? true : false); })
92 
93 /* A stub length to drop maximum length packet. */
94 #define VRING_DROP_DESC_MAX_LEN		GENMASK(15, 0)
95 
96 /* Interrupt types. */
97 enum {
98 	MLXBF_TM_RX_LWM_IRQ,
99 	MLXBF_TM_RX_HWM_IRQ,
100 	MLXBF_TM_TX_LWM_IRQ,
101 	MLXBF_TM_TX_HWM_IRQ,
102 	MLXBF_TM_MAX_IRQ
103 };
104 
105 /* Ring types (Rx & Tx). */
106 enum {
107 	MLXBF_TMFIFO_VRING_RX,
108 	MLXBF_TMFIFO_VRING_TX,
109 	MLXBF_TMFIFO_VRING_MAX
110 };
111 
112 /**
113  * mlxbf_tmfifo_vdev - Structure of the TmFifo virtual device
114  * @vdev: virtio device, in which the vdev.id.device field has the
115  *        VIRTIO_ID_xxx id to distinguish the virtual device.
116  * @status: status of the device
117  * @features: supported features of the device
118  * @vrings: array of tmfifo vrings of this device
119  * @config.cons: virtual console config -
120  *               select if vdev.id.device is VIRTIO_ID_CONSOLE
121  * @config.net: virtual network config -
122  *              select if vdev.id.device is VIRTIO_ID_NET
123  * @tx_buf: tx buffer used to buffer data before writing into the FIFO
124  */
125 struct mlxbf_tmfifo_vdev {
126 	struct virtio_device vdev;
127 	u8 status;
128 	u64 features;
129 	struct mlxbf_tmfifo_vring vrings[MLXBF_TMFIFO_VRING_MAX];
130 	union {
131 		struct virtio_console_config cons;
132 		struct virtio_net_config net;
133 	} config;
134 	struct circ_buf tx_buf;
135 };
136 
137 /**
138  * mlxbf_tmfifo_irq_info - Structure of the interrupt information
139  * @fifo: pointer to the tmfifo structure
140  * @irq: interrupt number
141  * @index: index into the interrupt array
142  */
143 struct mlxbf_tmfifo_irq_info {
144 	struct mlxbf_tmfifo *fifo;
145 	int irq;
146 	int index;
147 };
148 
149 /**
150  * mlxbf_tmfifo - Structure of the TmFifo
151  * @vdev: array of the virtual devices running over the TmFifo
152  * @lock: lock to protect the TmFifo access
153  * @rx_base: mapped register base address for the Rx FIFO
154  * @tx_base: mapped register base address for the Tx FIFO
155  * @rx_fifo_size: number of entries of the Rx FIFO
156  * @tx_fifo_size: number of entries of the Tx FIFO
157  * @pend_events: pending bits for deferred events
158  * @irq_info: interrupt information
159  * @work: work struct for deferred process
160  * @timer: background timer
161  * @vring: Tx/Rx ring
162  * @spin_lock: Tx/Rx spin lock
163  * @is_ready: ready flag
164  */
165 struct mlxbf_tmfifo {
166 	struct mlxbf_tmfifo_vdev *vdev[MLXBF_TMFIFO_VDEV_MAX];
167 	struct mutex lock;		/* TmFifo lock */
168 	void __iomem *rx_base;
169 	void __iomem *tx_base;
170 	int rx_fifo_size;
171 	int tx_fifo_size;
172 	unsigned long pend_events;
173 	struct mlxbf_tmfifo_irq_info irq_info[MLXBF_TM_MAX_IRQ];
174 	struct work_struct work;
175 	struct timer_list timer;
176 	struct mlxbf_tmfifo_vring *vring[2];
177 	spinlock_t spin_lock[2];	/* spin lock */
178 	bool is_ready;
179 };
180 
181 /**
182  * mlxbf_tmfifo_msg_hdr - Structure of the TmFifo message header
183  * @type: message type
184  * @len: payload length in network byte order. Messages sent into the FIFO
185  *       will be read by the other side as data stream in the same byte order.
186  *       The length needs to be encoded into network order so both sides
187  *       could understand it.
188  */
189 struct mlxbf_tmfifo_msg_hdr {
190 	u8 type;
191 	__be16 len;
192 	u8 unused[5];
193 } __packed __aligned(sizeof(u64));
194 
195 /*
196  * Default MAC.
197  * This MAC address will be read from EFI persistent variable if configured.
198  * It can also be reconfigured with standard Linux tools.
199  */
200 static u8 mlxbf_tmfifo_net_default_mac[ETH_ALEN] = {
201 	0x00, 0x1A, 0xCA, 0xFF, 0xFF, 0x01
202 };
203 
204 /* EFI variable name of the MAC address. */
205 static efi_char16_t mlxbf_tmfifo_efi_name[] = L"RshimMacAddr";
206 
207 /* Maximum L2 header length. */
208 #define MLXBF_TMFIFO_NET_L2_OVERHEAD	(ETH_HLEN + VLAN_HLEN)
209 
210 /* Supported virtio-net features. */
211 #define MLXBF_TMFIFO_NET_FEATURES \
212 	(BIT_ULL(VIRTIO_NET_F_MTU) | BIT_ULL(VIRTIO_NET_F_STATUS) | \
213 	 BIT_ULL(VIRTIO_NET_F_MAC))
214 
215 #define mlxbf_vdev_to_tmfifo(d) container_of(d, struct mlxbf_tmfifo_vdev, vdev)
216 
217 /* Free vrings of the FIFO device. */
mlxbf_tmfifo_free_vrings(struct mlxbf_tmfifo * fifo,struct mlxbf_tmfifo_vdev * tm_vdev)218 static void mlxbf_tmfifo_free_vrings(struct mlxbf_tmfifo *fifo,
219 				     struct mlxbf_tmfifo_vdev *tm_vdev)
220 {
221 	struct mlxbf_tmfifo_vring *vring;
222 	int i, size;
223 
224 	for (i = 0; i < ARRAY_SIZE(tm_vdev->vrings); i++) {
225 		vring = &tm_vdev->vrings[i];
226 		if (vring->va) {
227 			size = vring_size(vring->num, vring->align);
228 			dma_free_coherent(tm_vdev->vdev.dev.parent, size,
229 					  vring->va, vring->dma);
230 			vring->va = NULL;
231 			if (vring->vq) {
232 				vring_del_virtqueue(vring->vq);
233 				vring->vq = NULL;
234 			}
235 		}
236 	}
237 }
238 
239 /* Allocate vrings for the FIFO. */
mlxbf_tmfifo_alloc_vrings(struct mlxbf_tmfifo * fifo,struct mlxbf_tmfifo_vdev * tm_vdev)240 static int mlxbf_tmfifo_alloc_vrings(struct mlxbf_tmfifo *fifo,
241 				     struct mlxbf_tmfifo_vdev *tm_vdev)
242 {
243 	struct mlxbf_tmfifo_vring *vring;
244 	struct device *dev;
245 	dma_addr_t dma;
246 	int i, size;
247 	void *va;
248 
249 	for (i = 0; i < ARRAY_SIZE(tm_vdev->vrings); i++) {
250 		vring = &tm_vdev->vrings[i];
251 		vring->fifo = fifo;
252 		vring->num = MLXBF_TMFIFO_VRING_SIZE;
253 		vring->align = SMP_CACHE_BYTES;
254 		vring->index = i;
255 		vring->vdev_id = tm_vdev->vdev.id.device;
256 		vring->drop_desc.len = VRING_DROP_DESC_MAX_LEN;
257 		dev = &tm_vdev->vdev.dev;
258 
259 		size = vring_size(vring->num, vring->align);
260 		va = dma_alloc_coherent(dev->parent, size, &dma, GFP_KERNEL);
261 		if (!va) {
262 			mlxbf_tmfifo_free_vrings(fifo, tm_vdev);
263 			dev_err(dev->parent, "dma_alloc_coherent failed\n");
264 			return -ENOMEM;
265 		}
266 
267 		vring->va = va;
268 		vring->dma = dma;
269 	}
270 
271 	return 0;
272 }
273 
274 /* Disable interrupts of the FIFO device. */
mlxbf_tmfifo_disable_irqs(struct mlxbf_tmfifo * fifo)275 static void mlxbf_tmfifo_disable_irqs(struct mlxbf_tmfifo *fifo)
276 {
277 	int i, irq;
278 
279 	for (i = 0; i < MLXBF_TM_MAX_IRQ; i++) {
280 		irq = fifo->irq_info[i].irq;
281 		fifo->irq_info[i].irq = 0;
282 		disable_irq(irq);
283 	}
284 }
285 
286 /* Interrupt handler. */
mlxbf_tmfifo_irq_handler(int irq,void * arg)287 static irqreturn_t mlxbf_tmfifo_irq_handler(int irq, void *arg)
288 {
289 	struct mlxbf_tmfifo_irq_info *irq_info = arg;
290 
291 	if (!test_and_set_bit(irq_info->index, &irq_info->fifo->pend_events))
292 		schedule_work(&irq_info->fifo->work);
293 
294 	return IRQ_HANDLED;
295 }
296 
297 /* Get the next packet descriptor from the vring. */
298 static struct vring_desc *
mlxbf_tmfifo_get_next_desc(struct mlxbf_tmfifo_vring * vring)299 mlxbf_tmfifo_get_next_desc(struct mlxbf_tmfifo_vring *vring)
300 {
301 	const struct vring *vr = virtqueue_get_vring(vring->vq);
302 	struct virtio_device *vdev = vring->vq->vdev;
303 	unsigned int idx, head;
304 
305 	if (vring->next_avail == virtio16_to_cpu(vdev, vr->avail->idx))
306 		return NULL;
307 
308 	/* Make sure 'avail->idx' is visible already. */
309 	virtio_rmb(false);
310 
311 	idx = vring->next_avail % vr->num;
312 	head = virtio16_to_cpu(vdev, vr->avail->ring[idx]);
313 	if (WARN_ON(head >= vr->num))
314 		return NULL;
315 
316 	vring->next_avail++;
317 
318 	return &vr->desc[head];
319 }
320 
321 /* Release virtio descriptor. */
mlxbf_tmfifo_release_desc(struct mlxbf_tmfifo_vring * vring,struct vring_desc * desc,u32 len)322 static void mlxbf_tmfifo_release_desc(struct mlxbf_tmfifo_vring *vring,
323 				      struct vring_desc *desc, u32 len)
324 {
325 	const struct vring *vr = virtqueue_get_vring(vring->vq);
326 	struct virtio_device *vdev = vring->vq->vdev;
327 	u16 idx, vr_idx;
328 
329 	vr_idx = virtio16_to_cpu(vdev, vr->used->idx);
330 	idx = vr_idx % vr->num;
331 	vr->used->ring[idx].id = cpu_to_virtio32(vdev, desc - vr->desc);
332 	vr->used->ring[idx].len = cpu_to_virtio32(vdev, len);
333 
334 	/*
335 	 * Virtio could poll and check the 'idx' to decide whether the desc is
336 	 * done or not. Add a memory barrier here to make sure the update above
337 	 * completes before updating the idx.
338 	 */
339 	virtio_mb(false);
340 	vr->used->idx = cpu_to_virtio16(vdev, vr_idx + 1);
341 }
342 
343 /* Get the total length of the descriptor chain. */
mlxbf_tmfifo_get_pkt_len(struct mlxbf_tmfifo_vring * vring,struct vring_desc * desc)344 static u32 mlxbf_tmfifo_get_pkt_len(struct mlxbf_tmfifo_vring *vring,
345 				    struct vring_desc *desc)
346 {
347 	const struct vring *vr = virtqueue_get_vring(vring->vq);
348 	struct virtio_device *vdev = vring->vq->vdev;
349 	u32 len = 0, idx;
350 
351 	while (desc) {
352 		len += virtio32_to_cpu(vdev, desc->len);
353 		if (!(virtio16_to_cpu(vdev, desc->flags) & VRING_DESC_F_NEXT))
354 			break;
355 		idx = virtio16_to_cpu(vdev, desc->next);
356 		desc = &vr->desc[idx];
357 	}
358 
359 	return len;
360 }
361 
mlxbf_tmfifo_release_pkt(struct mlxbf_tmfifo_vring * vring)362 static void mlxbf_tmfifo_release_pkt(struct mlxbf_tmfifo_vring *vring)
363 {
364 	struct vring_desc *desc_head;
365 	u32 len = 0;
366 
367 	if (vring->desc_head) {
368 		desc_head = vring->desc_head;
369 		len = vring->pkt_len;
370 	} else {
371 		desc_head = mlxbf_tmfifo_get_next_desc(vring);
372 		len = mlxbf_tmfifo_get_pkt_len(vring, desc_head);
373 	}
374 
375 	if (desc_head)
376 		mlxbf_tmfifo_release_desc(vring, desc_head, len);
377 
378 	vring->pkt_len = 0;
379 	vring->desc = NULL;
380 	vring->desc_head = NULL;
381 }
382 
mlxbf_tmfifo_init_net_desc(struct mlxbf_tmfifo_vring * vring,struct vring_desc * desc,bool is_rx)383 static void mlxbf_tmfifo_init_net_desc(struct mlxbf_tmfifo_vring *vring,
384 				       struct vring_desc *desc, bool is_rx)
385 {
386 	struct virtio_device *vdev = vring->vq->vdev;
387 	struct virtio_net_hdr *net_hdr;
388 
389 	net_hdr = phys_to_virt(virtio64_to_cpu(vdev, desc->addr));
390 	memset(net_hdr, 0, sizeof(*net_hdr));
391 }
392 
393 /* Get and initialize the next packet. */
394 static struct vring_desc *
mlxbf_tmfifo_get_next_pkt(struct mlxbf_tmfifo_vring * vring,bool is_rx)395 mlxbf_tmfifo_get_next_pkt(struct mlxbf_tmfifo_vring *vring, bool is_rx)
396 {
397 	struct vring_desc *desc;
398 
399 	desc = mlxbf_tmfifo_get_next_desc(vring);
400 	if (desc && is_rx && vring->vdev_id == VIRTIO_ID_NET)
401 		mlxbf_tmfifo_init_net_desc(vring, desc, is_rx);
402 
403 	vring->desc_head = desc;
404 	vring->desc = desc;
405 
406 	return desc;
407 }
408 
409 /* House-keeping timer. */
mlxbf_tmfifo_timer(struct timer_list * t)410 static void mlxbf_tmfifo_timer(struct timer_list *t)
411 {
412 	struct mlxbf_tmfifo *fifo = container_of(t, struct mlxbf_tmfifo, timer);
413 	int rx, tx;
414 
415 	rx = !test_and_set_bit(MLXBF_TM_RX_HWM_IRQ, &fifo->pend_events);
416 	tx = !test_and_set_bit(MLXBF_TM_TX_LWM_IRQ, &fifo->pend_events);
417 
418 	if (rx || tx)
419 		schedule_work(&fifo->work);
420 
421 	mod_timer(&fifo->timer, jiffies + MLXBF_TMFIFO_TIMER_INTERVAL);
422 }
423 
424 /* Copy one console packet into the output buffer. */
mlxbf_tmfifo_console_output_one(struct mlxbf_tmfifo_vdev * cons,struct mlxbf_tmfifo_vring * vring,struct vring_desc * desc)425 static void mlxbf_tmfifo_console_output_one(struct mlxbf_tmfifo_vdev *cons,
426 					    struct mlxbf_tmfifo_vring *vring,
427 					    struct vring_desc *desc)
428 {
429 	const struct vring *vr = virtqueue_get_vring(vring->vq);
430 	struct virtio_device *vdev = &cons->vdev;
431 	u32 len, idx, seg;
432 	void *addr;
433 
434 	while (desc) {
435 		addr = phys_to_virt(virtio64_to_cpu(vdev, desc->addr));
436 		len = virtio32_to_cpu(vdev, desc->len);
437 
438 		seg = CIRC_SPACE_TO_END(cons->tx_buf.head, cons->tx_buf.tail,
439 					MLXBF_TMFIFO_CON_TX_BUF_SIZE);
440 		if (len <= seg) {
441 			memcpy(cons->tx_buf.buf + cons->tx_buf.head, addr, len);
442 		} else {
443 			memcpy(cons->tx_buf.buf + cons->tx_buf.head, addr, seg);
444 			addr += seg;
445 			memcpy(cons->tx_buf.buf, addr, len - seg);
446 		}
447 		cons->tx_buf.head = (cons->tx_buf.head + len) %
448 			MLXBF_TMFIFO_CON_TX_BUF_SIZE;
449 
450 		if (!(virtio16_to_cpu(vdev, desc->flags) & VRING_DESC_F_NEXT))
451 			break;
452 		idx = virtio16_to_cpu(vdev, desc->next);
453 		desc = &vr->desc[idx];
454 	}
455 }
456 
457 /* Copy console data into the output buffer. */
mlxbf_tmfifo_console_output(struct mlxbf_tmfifo_vdev * cons,struct mlxbf_tmfifo_vring * vring)458 static void mlxbf_tmfifo_console_output(struct mlxbf_tmfifo_vdev *cons,
459 					struct mlxbf_tmfifo_vring *vring)
460 {
461 	struct vring_desc *desc;
462 	u32 len, avail;
463 
464 	desc = mlxbf_tmfifo_get_next_desc(vring);
465 	while (desc) {
466 		/* Release the packet if not enough space. */
467 		len = mlxbf_tmfifo_get_pkt_len(vring, desc);
468 		avail = CIRC_SPACE(cons->tx_buf.head, cons->tx_buf.tail,
469 				   MLXBF_TMFIFO_CON_TX_BUF_SIZE);
470 		if (len + MLXBF_TMFIFO_CON_TX_BUF_RSV_SIZE > avail) {
471 			mlxbf_tmfifo_release_desc(vring, desc, len);
472 			break;
473 		}
474 
475 		mlxbf_tmfifo_console_output_one(cons, vring, desc);
476 		mlxbf_tmfifo_release_desc(vring, desc, len);
477 		desc = mlxbf_tmfifo_get_next_desc(vring);
478 	}
479 }
480 
481 /* Get the number of available words in Rx FIFO for receiving. */
mlxbf_tmfifo_get_rx_avail(struct mlxbf_tmfifo * fifo)482 static int mlxbf_tmfifo_get_rx_avail(struct mlxbf_tmfifo *fifo)
483 {
484 	u64 sts;
485 
486 	sts = readq(fifo->rx_base + MLXBF_TMFIFO_RX_STS);
487 	return FIELD_GET(MLXBF_TMFIFO_RX_STS__COUNT_MASK, sts);
488 }
489 
490 /* Get the number of available words in the TmFifo for sending. */
mlxbf_tmfifo_get_tx_avail(struct mlxbf_tmfifo * fifo,int vdev_id)491 static int mlxbf_tmfifo_get_tx_avail(struct mlxbf_tmfifo *fifo, int vdev_id)
492 {
493 	int tx_reserve;
494 	u32 count;
495 	u64 sts;
496 
497 	/* Reserve some room in FIFO for console messages. */
498 	if (vdev_id == VIRTIO_ID_NET)
499 		tx_reserve = fifo->tx_fifo_size / MLXBF_TMFIFO_RESERVE_RATIO;
500 	else
501 		tx_reserve = 1;
502 
503 	sts = readq(fifo->tx_base + MLXBF_TMFIFO_TX_STS);
504 	count = FIELD_GET(MLXBF_TMFIFO_TX_STS__COUNT_MASK, sts);
505 	return fifo->tx_fifo_size - tx_reserve - count;
506 }
507 
508 /* Console Tx (move data from the output buffer into the TmFifo). */
mlxbf_tmfifo_console_tx(struct mlxbf_tmfifo * fifo,int avail)509 static void mlxbf_tmfifo_console_tx(struct mlxbf_tmfifo *fifo, int avail)
510 {
511 	struct mlxbf_tmfifo_msg_hdr hdr;
512 	struct mlxbf_tmfifo_vdev *cons;
513 	unsigned long flags;
514 	int size, seg;
515 	void *addr;
516 	u64 data;
517 
518 	/* Return if not enough space available. */
519 	if (avail < MLXBF_TMFIFO_DATA_MIN_WORDS)
520 		return;
521 
522 	cons = fifo->vdev[VIRTIO_ID_CONSOLE];
523 	if (!cons || !cons->tx_buf.buf)
524 		return;
525 
526 	/* Return if no data to send. */
527 	size = CIRC_CNT(cons->tx_buf.head, cons->tx_buf.tail,
528 			MLXBF_TMFIFO_CON_TX_BUF_SIZE);
529 	if (size == 0)
530 		return;
531 
532 	/* Adjust the size to available space. */
533 	if (size + sizeof(hdr) > avail * sizeof(u64))
534 		size = avail * sizeof(u64) - sizeof(hdr);
535 
536 	/* Write header. */
537 	hdr.type = VIRTIO_ID_CONSOLE;
538 	hdr.len = htons(size);
539 	writeq(*(u64 *)&hdr, fifo->tx_base + MLXBF_TMFIFO_TX_DATA);
540 
541 	/* Use spin-lock to protect the 'cons->tx_buf'. */
542 	spin_lock_irqsave(&fifo->spin_lock[0], flags);
543 
544 	while (size > 0) {
545 		addr = cons->tx_buf.buf + cons->tx_buf.tail;
546 
547 		seg = CIRC_CNT_TO_END(cons->tx_buf.head, cons->tx_buf.tail,
548 				      MLXBF_TMFIFO_CON_TX_BUF_SIZE);
549 		if (seg >= sizeof(u64)) {
550 			memcpy(&data, addr, sizeof(u64));
551 		} else {
552 			memcpy(&data, addr, seg);
553 			memcpy((u8 *)&data + seg, cons->tx_buf.buf,
554 			       sizeof(u64) - seg);
555 		}
556 		writeq(data, fifo->tx_base + MLXBF_TMFIFO_TX_DATA);
557 
558 		if (size >= sizeof(u64)) {
559 			cons->tx_buf.tail = (cons->tx_buf.tail + sizeof(u64)) %
560 				MLXBF_TMFIFO_CON_TX_BUF_SIZE;
561 			size -= sizeof(u64);
562 		} else {
563 			cons->tx_buf.tail = (cons->tx_buf.tail + size) %
564 				MLXBF_TMFIFO_CON_TX_BUF_SIZE;
565 			size = 0;
566 		}
567 	}
568 
569 	spin_unlock_irqrestore(&fifo->spin_lock[0], flags);
570 }
571 
572 /* Rx/Tx one word in the descriptor buffer. */
mlxbf_tmfifo_rxtx_word(struct mlxbf_tmfifo_vring * vring,struct vring_desc * desc,bool is_rx,int len)573 static void mlxbf_tmfifo_rxtx_word(struct mlxbf_tmfifo_vring *vring,
574 				   struct vring_desc *desc,
575 				   bool is_rx, int len)
576 {
577 	struct virtio_device *vdev = vring->vq->vdev;
578 	struct mlxbf_tmfifo *fifo = vring->fifo;
579 	void *addr;
580 	u64 data;
581 
582 	/* Get the buffer address of this desc. */
583 	addr = phys_to_virt(virtio64_to_cpu(vdev, desc->addr));
584 
585 	/* Read a word from FIFO for Rx. */
586 	if (is_rx)
587 		data = readq(fifo->rx_base + MLXBF_TMFIFO_RX_DATA);
588 
589 	if (vring->cur_len + sizeof(u64) <= len) {
590 		/* The whole word. */
591 		if (is_rx) {
592 			if (!IS_VRING_DROP(vring))
593 				memcpy(addr + vring->cur_len, &data,
594 				       sizeof(u64));
595 		} else {
596 			memcpy(&data, addr + vring->cur_len,
597 			       sizeof(u64));
598 		}
599 		vring->cur_len += sizeof(u64);
600 	} else {
601 		/* Leftover bytes. */
602 		if (is_rx) {
603 			if (!IS_VRING_DROP(vring))
604 				memcpy(addr + vring->cur_len, &data,
605 				       len - vring->cur_len);
606 		} else {
607 			data = 0;
608 			memcpy(&data, addr + vring->cur_len,
609 			       len - vring->cur_len);
610 		}
611 		vring->cur_len = len;
612 	}
613 
614 	/* Write the word into FIFO for Tx. */
615 	if (!is_rx)
616 		writeq(data, fifo->tx_base + MLXBF_TMFIFO_TX_DATA);
617 }
618 
619 /*
620  * Rx/Tx packet header.
621  *
622  * In Rx case, the packet might be found to belong to a different vring since
623  * the TmFifo is shared by different services. In such case, the 'vring_change'
624  * flag is set.
625  */
mlxbf_tmfifo_rxtx_header(struct mlxbf_tmfifo_vring * vring,struct vring_desc ** desc,bool is_rx,bool * vring_change)626 static void mlxbf_tmfifo_rxtx_header(struct mlxbf_tmfifo_vring *vring,
627 				     struct vring_desc **desc,
628 				     bool is_rx, bool *vring_change)
629 {
630 	struct mlxbf_tmfifo *fifo = vring->fifo;
631 	struct virtio_net_config *config;
632 	struct mlxbf_tmfifo_msg_hdr hdr;
633 	int vdev_id, hdr_len;
634 	bool drop_rx = false;
635 
636 	/* Read/Write packet header. */
637 	if (is_rx) {
638 		/* Drain one word from the FIFO. */
639 		*(u64 *)&hdr = readq(fifo->rx_base + MLXBF_TMFIFO_RX_DATA);
640 
641 		/* Skip the length 0 packets (keepalive). */
642 		if (hdr.len == 0)
643 			return;
644 
645 		/* Check packet type. */
646 		if (hdr.type == VIRTIO_ID_NET) {
647 			vdev_id = VIRTIO_ID_NET;
648 			hdr_len = sizeof(struct virtio_net_hdr);
649 			config = &fifo->vdev[vdev_id]->config.net;
650 			/* A legacy-only interface for now. */
651 			if (ntohs(hdr.len) >
652 			    __virtio16_to_cpu(virtio_legacy_is_little_endian(),
653 					      config->mtu) +
654 					      MLXBF_TMFIFO_NET_L2_OVERHEAD)
655 				drop_rx = true;
656 		} else {
657 			vdev_id = VIRTIO_ID_CONSOLE;
658 			hdr_len = 0;
659 		}
660 
661 		/*
662 		 * Check whether the new packet still belongs to this vring.
663 		 * If not, update the pkt_len of the new vring.
664 		 */
665 		if (vdev_id != vring->vdev_id) {
666 			struct mlxbf_tmfifo_vdev *tm_dev2 = fifo->vdev[vdev_id];
667 
668 			if (!tm_dev2)
669 				return;
670 			vring->desc = *desc;
671 			vring = &tm_dev2->vrings[MLXBF_TMFIFO_VRING_RX];
672 			*vring_change = true;
673 		}
674 
675 		if (drop_rx && !IS_VRING_DROP(vring)) {
676 			if (vring->desc_head)
677 				mlxbf_tmfifo_release_pkt(vring);
678 			*desc = &vring->drop_desc;
679 			vring->desc_head = *desc;
680 			vring->desc = *desc;
681 		}
682 
683 		vring->pkt_len = ntohs(hdr.len) + hdr_len;
684 	} else {
685 		/* Network virtio has an extra header. */
686 		hdr_len = (vring->vdev_id == VIRTIO_ID_NET) ?
687 			   sizeof(struct virtio_net_hdr) : 0;
688 		vring->pkt_len = mlxbf_tmfifo_get_pkt_len(vring, *desc);
689 		hdr.type = (vring->vdev_id == VIRTIO_ID_NET) ?
690 			    VIRTIO_ID_NET : VIRTIO_ID_CONSOLE;
691 		hdr.len = htons(vring->pkt_len - hdr_len);
692 		writeq(*(u64 *)&hdr, fifo->tx_base + MLXBF_TMFIFO_TX_DATA);
693 	}
694 
695 	vring->cur_len = hdr_len;
696 	vring->rem_len = vring->pkt_len;
697 	fifo->vring[is_rx] = vring;
698 }
699 
700 /*
701  * Rx/Tx one descriptor.
702  *
703  * Return true to indicate more data available.
704  */
mlxbf_tmfifo_rxtx_one_desc(struct mlxbf_tmfifo_vring * vring,bool is_rx,int * avail)705 static bool mlxbf_tmfifo_rxtx_one_desc(struct mlxbf_tmfifo_vring *vring,
706 				       bool is_rx, int *avail)
707 {
708 	const struct vring *vr = virtqueue_get_vring(vring->vq);
709 	struct mlxbf_tmfifo *fifo = vring->fifo;
710 	struct virtio_device *vdev;
711 	bool vring_change = false;
712 	struct vring_desc *desc;
713 	unsigned long flags;
714 	u32 len, idx;
715 
716 	vdev = &fifo->vdev[vring->vdev_id]->vdev;
717 
718 	/* Get the descriptor of the next packet. */
719 	if (!vring->desc) {
720 		desc = mlxbf_tmfifo_get_next_pkt(vring, is_rx);
721 		if (!desc) {
722 			/* Drop next Rx packet to avoid stuck. */
723 			if (is_rx) {
724 				desc = &vring->drop_desc;
725 				vring->desc_head = desc;
726 				vring->desc = desc;
727 			} else {
728 				return false;
729 			}
730 		}
731 	} else {
732 		desc = vring->desc;
733 	}
734 
735 	/* Beginning of a packet. Start to Rx/Tx packet header. */
736 	if (vring->pkt_len == 0) {
737 		mlxbf_tmfifo_rxtx_header(vring, &desc, is_rx, &vring_change);
738 		(*avail)--;
739 
740 		/* Return if new packet is for another ring. */
741 		if (vring_change)
742 			return false;
743 		goto mlxbf_tmfifo_desc_done;
744 	}
745 
746 	/* Get the length of this desc. */
747 	len = virtio32_to_cpu(vdev, desc->len);
748 	if (len > vring->rem_len)
749 		len = vring->rem_len;
750 
751 	/* Rx/Tx one word (8 bytes) if not done. */
752 	if (vring->cur_len < len) {
753 		mlxbf_tmfifo_rxtx_word(vring, desc, is_rx, len);
754 		(*avail)--;
755 	}
756 
757 	/* Check again whether it's done. */
758 	if (vring->cur_len == len) {
759 		vring->cur_len = 0;
760 		vring->rem_len -= len;
761 
762 		/* Get the next desc on the chain. */
763 		if (!IS_VRING_DROP(vring) && vring->rem_len > 0 &&
764 		    (virtio16_to_cpu(vdev, desc->flags) & VRING_DESC_F_NEXT)) {
765 			idx = virtio16_to_cpu(vdev, desc->next);
766 			desc = &vr->desc[idx];
767 			goto mlxbf_tmfifo_desc_done;
768 		}
769 
770 		/* Done and release the packet. */
771 		desc = NULL;
772 		fifo->vring[is_rx] = NULL;
773 		if (!IS_VRING_DROP(vring)) {
774 			mlxbf_tmfifo_release_pkt(vring);
775 		} else {
776 			vring->pkt_len = 0;
777 			vring->desc_head = NULL;
778 			vring->desc = NULL;
779 			return false;
780 		}
781 
782 		/*
783 		 * Make sure the load/store are in order before
784 		 * returning back to virtio.
785 		 */
786 		virtio_mb(false);
787 
788 		/* Notify upper layer that packet is done. */
789 		spin_lock_irqsave(&fifo->spin_lock[is_rx], flags);
790 		vring_interrupt(0, vring->vq);
791 		spin_unlock_irqrestore(&fifo->spin_lock[is_rx], flags);
792 	}
793 
794 mlxbf_tmfifo_desc_done:
795 	/* Save the current desc. */
796 	vring->desc = desc;
797 
798 	return true;
799 }
800 
801 /* Rx & Tx processing of a queue. */
mlxbf_tmfifo_rxtx(struct mlxbf_tmfifo_vring * vring,bool is_rx)802 static void mlxbf_tmfifo_rxtx(struct mlxbf_tmfifo_vring *vring, bool is_rx)
803 {
804 	int avail = 0, devid = vring->vdev_id;
805 	struct mlxbf_tmfifo *fifo;
806 	bool more;
807 
808 	fifo = vring->fifo;
809 
810 	/* Return if vdev is not ready. */
811 	if (!fifo->vdev[devid])
812 		return;
813 
814 	/* Return if another vring is running. */
815 	if (fifo->vring[is_rx] && fifo->vring[is_rx] != vring)
816 		return;
817 
818 	/* Only handle console and network for now. */
819 	if (WARN_ON(devid != VIRTIO_ID_NET && devid != VIRTIO_ID_CONSOLE))
820 		return;
821 
822 	do {
823 		/* Get available FIFO space. */
824 		if (avail == 0) {
825 			if (is_rx)
826 				avail = mlxbf_tmfifo_get_rx_avail(fifo);
827 			else
828 				avail = mlxbf_tmfifo_get_tx_avail(fifo, devid);
829 			if (avail <= 0)
830 				break;
831 		}
832 
833 		/* Console output always comes from the Tx buffer. */
834 		if (!is_rx && devid == VIRTIO_ID_CONSOLE) {
835 			mlxbf_tmfifo_console_tx(fifo, avail);
836 			break;
837 		}
838 
839 		/* Handle one descriptor. */
840 		more = mlxbf_tmfifo_rxtx_one_desc(vring, is_rx, &avail);
841 	} while (more);
842 }
843 
844 /* Handle Rx or Tx queues. */
mlxbf_tmfifo_work_rxtx(struct mlxbf_tmfifo * fifo,int queue_id,int irq_id,bool is_rx)845 static void mlxbf_tmfifo_work_rxtx(struct mlxbf_tmfifo *fifo, int queue_id,
846 				   int irq_id, bool is_rx)
847 {
848 	struct mlxbf_tmfifo_vdev *tm_vdev;
849 	struct mlxbf_tmfifo_vring *vring;
850 	int i;
851 
852 	if (!test_and_clear_bit(irq_id, &fifo->pend_events) ||
853 	    !fifo->irq_info[irq_id].irq)
854 		return;
855 
856 	for (i = 0; i < MLXBF_TMFIFO_VDEV_MAX; i++) {
857 		tm_vdev = fifo->vdev[i];
858 		if (tm_vdev) {
859 			vring = &tm_vdev->vrings[queue_id];
860 			if (vring->vq)
861 				mlxbf_tmfifo_rxtx(vring, is_rx);
862 		}
863 	}
864 }
865 
866 /* Work handler for Rx and Tx case. */
mlxbf_tmfifo_work_handler(struct work_struct * work)867 static void mlxbf_tmfifo_work_handler(struct work_struct *work)
868 {
869 	struct mlxbf_tmfifo *fifo;
870 
871 	fifo = container_of(work, struct mlxbf_tmfifo, work);
872 	if (!fifo->is_ready)
873 		return;
874 
875 	mutex_lock(&fifo->lock);
876 
877 	/* Tx (Send data to the TmFifo). */
878 	mlxbf_tmfifo_work_rxtx(fifo, MLXBF_TMFIFO_VRING_TX,
879 			       MLXBF_TM_TX_LWM_IRQ, false);
880 
881 	/* Rx (Receive data from the TmFifo). */
882 	mlxbf_tmfifo_work_rxtx(fifo, MLXBF_TMFIFO_VRING_RX,
883 			       MLXBF_TM_RX_HWM_IRQ, true);
884 
885 	mutex_unlock(&fifo->lock);
886 }
887 
888 /* The notify function is called when new buffers are posted. */
mlxbf_tmfifo_virtio_notify(struct virtqueue * vq)889 static bool mlxbf_tmfifo_virtio_notify(struct virtqueue *vq)
890 {
891 	struct mlxbf_tmfifo_vring *vring = vq->priv;
892 	struct mlxbf_tmfifo_vdev *tm_vdev;
893 	struct mlxbf_tmfifo *fifo;
894 	unsigned long flags;
895 
896 	fifo = vring->fifo;
897 
898 	/*
899 	 * Virtio maintains vrings in pairs, even number ring for Rx
900 	 * and odd number ring for Tx.
901 	 */
902 	if (vring->index & BIT(0)) {
903 		/*
904 		 * Console could make blocking call with interrupts disabled.
905 		 * In such case, the vring needs to be served right away. For
906 		 * other cases, just set the TX LWM bit to start Tx in the
907 		 * worker handler.
908 		 */
909 		if (vring->vdev_id == VIRTIO_ID_CONSOLE) {
910 			spin_lock_irqsave(&fifo->spin_lock[0], flags);
911 			tm_vdev = fifo->vdev[VIRTIO_ID_CONSOLE];
912 			mlxbf_tmfifo_console_output(tm_vdev, vring);
913 			spin_unlock_irqrestore(&fifo->spin_lock[0], flags);
914 			set_bit(MLXBF_TM_TX_LWM_IRQ, &fifo->pend_events);
915 		} else if (test_and_set_bit(MLXBF_TM_TX_LWM_IRQ,
916 					    &fifo->pend_events)) {
917 			return true;
918 		}
919 	} else {
920 		if (test_and_set_bit(MLXBF_TM_RX_HWM_IRQ, &fifo->pend_events))
921 			return true;
922 	}
923 
924 	schedule_work(&fifo->work);
925 
926 	return true;
927 }
928 
929 /* Get the array of feature bits for this device. */
mlxbf_tmfifo_virtio_get_features(struct virtio_device * vdev)930 static u64 mlxbf_tmfifo_virtio_get_features(struct virtio_device *vdev)
931 {
932 	struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
933 
934 	return tm_vdev->features;
935 }
936 
937 /* Confirm device features to use. */
mlxbf_tmfifo_virtio_finalize_features(struct virtio_device * vdev)938 static int mlxbf_tmfifo_virtio_finalize_features(struct virtio_device *vdev)
939 {
940 	struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
941 
942 	tm_vdev->features = vdev->features;
943 
944 	return 0;
945 }
946 
947 /* Free virtqueues found by find_vqs(). */
mlxbf_tmfifo_virtio_del_vqs(struct virtio_device * vdev)948 static void mlxbf_tmfifo_virtio_del_vqs(struct virtio_device *vdev)
949 {
950 	struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
951 	struct mlxbf_tmfifo_vring *vring;
952 	struct virtqueue *vq;
953 	int i;
954 
955 	for (i = 0; i < ARRAY_SIZE(tm_vdev->vrings); i++) {
956 		vring = &tm_vdev->vrings[i];
957 
958 		/* Release the pending packet. */
959 		if (vring->desc)
960 			mlxbf_tmfifo_release_pkt(vring);
961 		vq = vring->vq;
962 		if (vq) {
963 			vring->vq = NULL;
964 			vring_del_virtqueue(vq);
965 		}
966 	}
967 }
968 
969 /* Create and initialize the virtual queues. */
mlxbf_tmfifo_virtio_find_vqs(struct virtio_device * vdev,unsigned int nvqs,struct virtqueue * vqs[],vq_callback_t * callbacks[],const char * const names[],const bool * ctx,struct irq_affinity * desc)970 static int mlxbf_tmfifo_virtio_find_vqs(struct virtio_device *vdev,
971 					unsigned int nvqs,
972 					struct virtqueue *vqs[],
973 					vq_callback_t *callbacks[],
974 					const char * const names[],
975 					const bool *ctx,
976 					struct irq_affinity *desc)
977 {
978 	struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
979 	struct mlxbf_tmfifo_vring *vring;
980 	struct virtqueue *vq;
981 	int i, ret, size;
982 
983 	if (nvqs > ARRAY_SIZE(tm_vdev->vrings))
984 		return -EINVAL;
985 
986 	for (i = 0; i < nvqs; ++i) {
987 		if (!names[i]) {
988 			ret = -EINVAL;
989 			goto error;
990 		}
991 		vring = &tm_vdev->vrings[i];
992 
993 		/* zero vring */
994 		size = vring_size(vring->num, vring->align);
995 		memset(vring->va, 0, size);
996 		vq = vring_new_virtqueue(i, vring->num, vring->align, vdev,
997 					 false, false, vring->va,
998 					 mlxbf_tmfifo_virtio_notify,
999 					 callbacks[i], names[i]);
1000 		if (!vq) {
1001 			dev_err(&vdev->dev, "vring_new_virtqueue failed\n");
1002 			ret = -ENOMEM;
1003 			goto error;
1004 		}
1005 
1006 		vqs[i] = vq;
1007 		vring->vq = vq;
1008 		vq->priv = vring;
1009 	}
1010 
1011 	return 0;
1012 
1013 error:
1014 	mlxbf_tmfifo_virtio_del_vqs(vdev);
1015 	return ret;
1016 }
1017 
1018 /* Read the status byte. */
mlxbf_tmfifo_virtio_get_status(struct virtio_device * vdev)1019 static u8 mlxbf_tmfifo_virtio_get_status(struct virtio_device *vdev)
1020 {
1021 	struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
1022 
1023 	return tm_vdev->status;
1024 }
1025 
1026 /* Write the status byte. */
mlxbf_tmfifo_virtio_set_status(struct virtio_device * vdev,u8 status)1027 static void mlxbf_tmfifo_virtio_set_status(struct virtio_device *vdev,
1028 					   u8 status)
1029 {
1030 	struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
1031 
1032 	tm_vdev->status = status;
1033 }
1034 
1035 /* Reset the device. Not much here for now. */
mlxbf_tmfifo_virtio_reset(struct virtio_device * vdev)1036 static void mlxbf_tmfifo_virtio_reset(struct virtio_device *vdev)
1037 {
1038 	struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
1039 
1040 	tm_vdev->status = 0;
1041 }
1042 
1043 /* Read the value of a configuration field. */
mlxbf_tmfifo_virtio_get(struct virtio_device * vdev,unsigned int offset,void * buf,unsigned int len)1044 static void mlxbf_tmfifo_virtio_get(struct virtio_device *vdev,
1045 				    unsigned int offset,
1046 				    void *buf,
1047 				    unsigned int len)
1048 {
1049 	struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
1050 
1051 	if ((u64)offset + len > sizeof(tm_vdev->config))
1052 		return;
1053 
1054 	memcpy(buf, (u8 *)&tm_vdev->config + offset, len);
1055 }
1056 
1057 /* Write the value of a configuration field. */
mlxbf_tmfifo_virtio_set(struct virtio_device * vdev,unsigned int offset,const void * buf,unsigned int len)1058 static void mlxbf_tmfifo_virtio_set(struct virtio_device *vdev,
1059 				    unsigned int offset,
1060 				    const void *buf,
1061 				    unsigned int len)
1062 {
1063 	struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
1064 
1065 	if ((u64)offset + len > sizeof(tm_vdev->config))
1066 		return;
1067 
1068 	memcpy((u8 *)&tm_vdev->config + offset, buf, len);
1069 }
1070 
tmfifo_virtio_dev_release(struct device * device)1071 static void tmfifo_virtio_dev_release(struct device *device)
1072 {
1073 	struct virtio_device *vdev =
1074 			container_of(device, struct virtio_device, dev);
1075 	struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
1076 
1077 	kfree(tm_vdev);
1078 }
1079 
1080 /* Virtio config operations. */
1081 static const struct virtio_config_ops mlxbf_tmfifo_virtio_config_ops = {
1082 	.get_features = mlxbf_tmfifo_virtio_get_features,
1083 	.finalize_features = mlxbf_tmfifo_virtio_finalize_features,
1084 	.find_vqs = mlxbf_tmfifo_virtio_find_vqs,
1085 	.del_vqs = mlxbf_tmfifo_virtio_del_vqs,
1086 	.reset = mlxbf_tmfifo_virtio_reset,
1087 	.set_status = mlxbf_tmfifo_virtio_set_status,
1088 	.get_status = mlxbf_tmfifo_virtio_get_status,
1089 	.get = mlxbf_tmfifo_virtio_get,
1090 	.set = mlxbf_tmfifo_virtio_set,
1091 };
1092 
1093 /* Create vdev for the FIFO. */
mlxbf_tmfifo_create_vdev(struct device * dev,struct mlxbf_tmfifo * fifo,int vdev_id,u64 features,void * config,u32 size)1094 static int mlxbf_tmfifo_create_vdev(struct device *dev,
1095 				    struct mlxbf_tmfifo *fifo,
1096 				    int vdev_id, u64 features,
1097 				    void *config, u32 size)
1098 {
1099 	struct mlxbf_tmfifo_vdev *tm_vdev, *reg_dev = NULL;
1100 	int ret;
1101 
1102 	mutex_lock(&fifo->lock);
1103 
1104 	tm_vdev = fifo->vdev[vdev_id];
1105 	if (tm_vdev) {
1106 		dev_err(dev, "vdev %d already exists\n", vdev_id);
1107 		ret = -EEXIST;
1108 		goto fail;
1109 	}
1110 
1111 	tm_vdev = kzalloc(sizeof(*tm_vdev), GFP_KERNEL);
1112 	if (!tm_vdev) {
1113 		ret = -ENOMEM;
1114 		goto fail;
1115 	}
1116 
1117 	tm_vdev->vdev.id.device = vdev_id;
1118 	tm_vdev->vdev.config = &mlxbf_tmfifo_virtio_config_ops;
1119 	tm_vdev->vdev.dev.parent = dev;
1120 	tm_vdev->vdev.dev.release = tmfifo_virtio_dev_release;
1121 	tm_vdev->features = features;
1122 	if (config)
1123 		memcpy(&tm_vdev->config, config, size);
1124 
1125 	if (mlxbf_tmfifo_alloc_vrings(fifo, tm_vdev)) {
1126 		dev_err(dev, "unable to allocate vring\n");
1127 		ret = -ENOMEM;
1128 		goto vdev_fail;
1129 	}
1130 
1131 	/* Allocate an output buffer for the console device. */
1132 	if (vdev_id == VIRTIO_ID_CONSOLE)
1133 		tm_vdev->tx_buf.buf = devm_kmalloc(dev,
1134 						   MLXBF_TMFIFO_CON_TX_BUF_SIZE,
1135 						   GFP_KERNEL);
1136 	fifo->vdev[vdev_id] = tm_vdev;
1137 
1138 	/* Register the virtio device. */
1139 	ret = register_virtio_device(&tm_vdev->vdev);
1140 	reg_dev = tm_vdev;
1141 	if (ret) {
1142 		dev_err(dev, "register_virtio_device failed\n");
1143 		goto vdev_fail;
1144 	}
1145 
1146 	mutex_unlock(&fifo->lock);
1147 	return 0;
1148 
1149 vdev_fail:
1150 	mlxbf_tmfifo_free_vrings(fifo, tm_vdev);
1151 	fifo->vdev[vdev_id] = NULL;
1152 	if (reg_dev)
1153 		put_device(&tm_vdev->vdev.dev);
1154 	else
1155 		kfree(tm_vdev);
1156 fail:
1157 	mutex_unlock(&fifo->lock);
1158 	return ret;
1159 }
1160 
1161 /* Delete vdev for the FIFO. */
mlxbf_tmfifo_delete_vdev(struct mlxbf_tmfifo * fifo,int vdev_id)1162 static int mlxbf_tmfifo_delete_vdev(struct mlxbf_tmfifo *fifo, int vdev_id)
1163 {
1164 	struct mlxbf_tmfifo_vdev *tm_vdev;
1165 
1166 	mutex_lock(&fifo->lock);
1167 
1168 	/* Unregister vdev. */
1169 	tm_vdev = fifo->vdev[vdev_id];
1170 	if (tm_vdev) {
1171 		unregister_virtio_device(&tm_vdev->vdev);
1172 		mlxbf_tmfifo_free_vrings(fifo, tm_vdev);
1173 		fifo->vdev[vdev_id] = NULL;
1174 	}
1175 
1176 	mutex_unlock(&fifo->lock);
1177 
1178 	return 0;
1179 }
1180 
1181 /* Read the configured network MAC address from efi variable. */
mlxbf_tmfifo_get_cfg_mac(u8 * mac)1182 static void mlxbf_tmfifo_get_cfg_mac(u8 *mac)
1183 {
1184 	efi_guid_t guid = EFI_GLOBAL_VARIABLE_GUID;
1185 	unsigned long size = ETH_ALEN;
1186 	u8 buf[ETH_ALEN];
1187 	efi_status_t rc;
1188 
1189 	rc = efi.get_variable(mlxbf_tmfifo_efi_name, &guid, NULL, &size, buf);
1190 	if (rc == EFI_SUCCESS && size == ETH_ALEN)
1191 		ether_addr_copy(mac, buf);
1192 	else
1193 		ether_addr_copy(mac, mlxbf_tmfifo_net_default_mac);
1194 }
1195 
1196 /* Set TmFifo thresolds which is used to trigger interrupts. */
mlxbf_tmfifo_set_threshold(struct mlxbf_tmfifo * fifo)1197 static void mlxbf_tmfifo_set_threshold(struct mlxbf_tmfifo *fifo)
1198 {
1199 	u64 ctl;
1200 
1201 	/* Get Tx FIFO size and set the low/high watermark. */
1202 	ctl = readq(fifo->tx_base + MLXBF_TMFIFO_TX_CTL);
1203 	fifo->tx_fifo_size =
1204 		FIELD_GET(MLXBF_TMFIFO_TX_CTL__MAX_ENTRIES_MASK, ctl);
1205 	ctl = (ctl & ~MLXBF_TMFIFO_TX_CTL__LWM_MASK) |
1206 		FIELD_PREP(MLXBF_TMFIFO_TX_CTL__LWM_MASK,
1207 			   fifo->tx_fifo_size / 2);
1208 	ctl = (ctl & ~MLXBF_TMFIFO_TX_CTL__HWM_MASK) |
1209 		FIELD_PREP(MLXBF_TMFIFO_TX_CTL__HWM_MASK,
1210 			   fifo->tx_fifo_size - 1);
1211 	writeq(ctl, fifo->tx_base + MLXBF_TMFIFO_TX_CTL);
1212 
1213 	/* Get Rx FIFO size and set the low/high watermark. */
1214 	ctl = readq(fifo->rx_base + MLXBF_TMFIFO_RX_CTL);
1215 	fifo->rx_fifo_size =
1216 		FIELD_GET(MLXBF_TMFIFO_RX_CTL__MAX_ENTRIES_MASK, ctl);
1217 	ctl = (ctl & ~MLXBF_TMFIFO_RX_CTL__LWM_MASK) |
1218 		FIELD_PREP(MLXBF_TMFIFO_RX_CTL__LWM_MASK, 0);
1219 	ctl = (ctl & ~MLXBF_TMFIFO_RX_CTL__HWM_MASK) |
1220 		FIELD_PREP(MLXBF_TMFIFO_RX_CTL__HWM_MASK, 1);
1221 	writeq(ctl, fifo->rx_base + MLXBF_TMFIFO_RX_CTL);
1222 }
1223 
mlxbf_tmfifo_cleanup(struct mlxbf_tmfifo * fifo)1224 static void mlxbf_tmfifo_cleanup(struct mlxbf_tmfifo *fifo)
1225 {
1226 	int i;
1227 
1228 	fifo->is_ready = false;
1229 	del_timer_sync(&fifo->timer);
1230 	mlxbf_tmfifo_disable_irqs(fifo);
1231 	cancel_work_sync(&fifo->work);
1232 	for (i = 0; i < MLXBF_TMFIFO_VDEV_MAX; i++)
1233 		mlxbf_tmfifo_delete_vdev(fifo, i);
1234 }
1235 
1236 /* Probe the TMFIFO. */
mlxbf_tmfifo_probe(struct platform_device * pdev)1237 static int mlxbf_tmfifo_probe(struct platform_device *pdev)
1238 {
1239 	struct virtio_net_config net_config;
1240 	struct device *dev = &pdev->dev;
1241 	struct mlxbf_tmfifo *fifo;
1242 	int i, rc;
1243 
1244 	fifo = devm_kzalloc(dev, sizeof(*fifo), GFP_KERNEL);
1245 	if (!fifo)
1246 		return -ENOMEM;
1247 
1248 	spin_lock_init(&fifo->spin_lock[0]);
1249 	spin_lock_init(&fifo->spin_lock[1]);
1250 	INIT_WORK(&fifo->work, mlxbf_tmfifo_work_handler);
1251 	mutex_init(&fifo->lock);
1252 
1253 	/* Get the resource of the Rx FIFO. */
1254 	fifo->rx_base = devm_platform_ioremap_resource(pdev, 0);
1255 	if (IS_ERR(fifo->rx_base))
1256 		return PTR_ERR(fifo->rx_base);
1257 
1258 	/* Get the resource of the Tx FIFO. */
1259 	fifo->tx_base = devm_platform_ioremap_resource(pdev, 1);
1260 	if (IS_ERR(fifo->tx_base))
1261 		return PTR_ERR(fifo->tx_base);
1262 
1263 	platform_set_drvdata(pdev, fifo);
1264 
1265 	timer_setup(&fifo->timer, mlxbf_tmfifo_timer, 0);
1266 
1267 	for (i = 0; i < MLXBF_TM_MAX_IRQ; i++) {
1268 		fifo->irq_info[i].index = i;
1269 		fifo->irq_info[i].fifo = fifo;
1270 		fifo->irq_info[i].irq = platform_get_irq(pdev, i);
1271 		rc = devm_request_irq(dev, fifo->irq_info[i].irq,
1272 				      mlxbf_tmfifo_irq_handler, 0,
1273 				      "tmfifo", &fifo->irq_info[i]);
1274 		if (rc) {
1275 			dev_err(dev, "devm_request_irq failed\n");
1276 			fifo->irq_info[i].irq = 0;
1277 			return rc;
1278 		}
1279 	}
1280 
1281 	mlxbf_tmfifo_set_threshold(fifo);
1282 
1283 	/* Create the console vdev. */
1284 	rc = mlxbf_tmfifo_create_vdev(dev, fifo, VIRTIO_ID_CONSOLE, 0, NULL, 0);
1285 	if (rc)
1286 		goto fail;
1287 
1288 	/* Create the network vdev. */
1289 	memset(&net_config, 0, sizeof(net_config));
1290 
1291 	/* A legacy-only interface for now. */
1292 	net_config.mtu = __cpu_to_virtio16(virtio_legacy_is_little_endian(),
1293 					   ETH_DATA_LEN);
1294 	net_config.status = __cpu_to_virtio16(virtio_legacy_is_little_endian(),
1295 					      VIRTIO_NET_S_LINK_UP);
1296 	mlxbf_tmfifo_get_cfg_mac(net_config.mac);
1297 	rc = mlxbf_tmfifo_create_vdev(dev, fifo, VIRTIO_ID_NET,
1298 				      MLXBF_TMFIFO_NET_FEATURES, &net_config,
1299 				      sizeof(net_config));
1300 	if (rc)
1301 		goto fail;
1302 
1303 	mod_timer(&fifo->timer, jiffies + MLXBF_TMFIFO_TIMER_INTERVAL);
1304 
1305 	fifo->is_ready = true;
1306 	return 0;
1307 
1308 fail:
1309 	mlxbf_tmfifo_cleanup(fifo);
1310 	return rc;
1311 }
1312 
1313 /* Device remove function. */
mlxbf_tmfifo_remove(struct platform_device * pdev)1314 static int mlxbf_tmfifo_remove(struct platform_device *pdev)
1315 {
1316 	struct mlxbf_tmfifo *fifo = platform_get_drvdata(pdev);
1317 
1318 	mlxbf_tmfifo_cleanup(fifo);
1319 
1320 	return 0;
1321 }
1322 
1323 static const struct acpi_device_id mlxbf_tmfifo_acpi_match[] = {
1324 	{ "MLNXBF01", 0 },
1325 	{}
1326 };
1327 MODULE_DEVICE_TABLE(acpi, mlxbf_tmfifo_acpi_match);
1328 
1329 static struct platform_driver mlxbf_tmfifo_driver = {
1330 	.probe = mlxbf_tmfifo_probe,
1331 	.remove = mlxbf_tmfifo_remove,
1332 	.driver = {
1333 		.name = "bf-tmfifo",
1334 		.acpi_match_table = mlxbf_tmfifo_acpi_match,
1335 	},
1336 };
1337 
1338 module_platform_driver(mlxbf_tmfifo_driver);
1339 
1340 MODULE_DESCRIPTION("Mellanox BlueField SoC TmFifo Driver");
1341 MODULE_LICENSE("GPL v2");
1342 MODULE_AUTHOR("Mellanox Technologies");
1343