1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
4 * Copyright (C) 2017 Linaro Ltd.
5 */
6
7 #include <linux/delay.h>
8 #include <linux/device.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/interrupt.h>
11 #include <linux/iopoll.h>
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
14
15 #include "core.h"
16 #include "hfi_cmds.h"
17 #include "hfi_msgs.h"
18 #include "hfi_venus.h"
19 #include "hfi_venus_io.h"
20 #include "firmware.h"
21
22 #define HFI_MASK_QHDR_TX_TYPE 0xff000000
23 #define HFI_MASK_QHDR_RX_TYPE 0x00ff0000
24 #define HFI_MASK_QHDR_PRI_TYPE 0x0000ff00
25 #define HFI_MASK_QHDR_ID_TYPE 0x000000ff
26
27 #define HFI_HOST_TO_CTRL_CMD_Q 0
28 #define HFI_CTRL_TO_HOST_MSG_Q 1
29 #define HFI_CTRL_TO_HOST_DBG_Q 2
30 #define HFI_MASK_QHDR_STATUS 0x000000ff
31
32 #define IFACEQ_NUM 3
33 #define IFACEQ_CMD_IDX 0
34 #define IFACEQ_MSG_IDX 1
35 #define IFACEQ_DBG_IDX 2
36 #define IFACEQ_MAX_BUF_COUNT 50
37 #define IFACEQ_MAX_PARALLEL_CLNTS 16
38 #define IFACEQ_DFLT_QHDR 0x01010000
39
40 #define POLL_INTERVAL_US 50
41
42 #define IFACEQ_MAX_PKT_SIZE 1024
43 #define IFACEQ_MED_PKT_SIZE 768
44 #define IFACEQ_MIN_PKT_SIZE 8
45 #define IFACEQ_VAR_SMALL_PKT_SIZE 100
46 #define IFACEQ_VAR_LARGE_PKT_SIZE 512
47 #define IFACEQ_VAR_HUGE_PKT_SIZE (1024 * 12)
48
49 struct hfi_queue_table_header {
50 u32 version;
51 u32 size;
52 u32 qhdr0_offset;
53 u32 qhdr_size;
54 u32 num_q;
55 u32 num_active_q;
56 };
57
58 struct hfi_queue_header {
59 u32 status;
60 u32 start_addr;
61 u32 type;
62 u32 q_size;
63 u32 pkt_size;
64 u32 pkt_drop_cnt;
65 u32 rx_wm;
66 u32 tx_wm;
67 u32 rx_req;
68 u32 tx_req;
69 u32 rx_irq_status;
70 u32 tx_irq_status;
71 u32 read_idx;
72 u32 write_idx;
73 };
74
75 #define IFACEQ_TABLE_SIZE \
76 (sizeof(struct hfi_queue_table_header) + \
77 sizeof(struct hfi_queue_header) * IFACEQ_NUM)
78
79 #define IFACEQ_QUEUE_SIZE (IFACEQ_MAX_PKT_SIZE * \
80 IFACEQ_MAX_BUF_COUNT * IFACEQ_MAX_PARALLEL_CLNTS)
81
82 #define IFACEQ_GET_QHDR_START_ADDR(ptr, i) \
83 (void *)(((ptr) + sizeof(struct hfi_queue_table_header)) + \
84 ((i) * sizeof(struct hfi_queue_header)))
85
86 #define QDSS_SIZE SZ_4K
87 #define SFR_SIZE SZ_4K
88 #define QUEUE_SIZE \
89 (IFACEQ_TABLE_SIZE + (IFACEQ_QUEUE_SIZE * IFACEQ_NUM))
90
91 #define ALIGNED_QDSS_SIZE ALIGN(QDSS_SIZE, SZ_4K)
92 #define ALIGNED_SFR_SIZE ALIGN(SFR_SIZE, SZ_4K)
93 #define ALIGNED_QUEUE_SIZE ALIGN(QUEUE_SIZE, SZ_4K)
94 #define SHARED_QSIZE ALIGN(ALIGNED_SFR_SIZE + ALIGNED_QUEUE_SIZE + \
95 ALIGNED_QDSS_SIZE, SZ_1M)
96
97 struct mem_desc {
98 dma_addr_t da; /* device address */
99 void *kva; /* kernel virtual address */
100 u32 size;
101 unsigned long attrs;
102 };
103
104 struct iface_queue {
105 struct hfi_queue_header *qhdr;
106 struct mem_desc qmem;
107 };
108
109 enum venus_state {
110 VENUS_STATE_DEINIT = 1,
111 VENUS_STATE_INIT,
112 };
113
114 struct venus_hfi_device {
115 struct venus_core *core;
116 u32 irq_status;
117 u32 last_packet_type;
118 bool power_enabled;
119 bool suspended;
120 enum venus_state state;
121 /* serialize read / write to the shared memory */
122 struct mutex lock;
123 struct completion pwr_collapse_prep;
124 struct completion release_resource;
125 struct mem_desc ifaceq_table;
126 struct mem_desc sfr;
127 struct iface_queue queues[IFACEQ_NUM];
128 u8 pkt_buf[IFACEQ_VAR_HUGE_PKT_SIZE];
129 u8 dbg_buf[IFACEQ_VAR_HUGE_PKT_SIZE];
130 };
131
132 static bool venus_pkt_debug;
133 int venus_fw_debug = HFI_DEBUG_MSG_ERROR | HFI_DEBUG_MSG_FATAL;
134 static bool venus_sys_idle_indicator;
135 static bool venus_fw_low_power_mode = true;
136 static int venus_hw_rsp_timeout = 1000;
137 static bool venus_fw_coverage;
138
venus_set_state(struct venus_hfi_device * hdev,enum venus_state state)139 static void venus_set_state(struct venus_hfi_device *hdev,
140 enum venus_state state)
141 {
142 mutex_lock(&hdev->lock);
143 hdev->state = state;
144 mutex_unlock(&hdev->lock);
145 }
146
venus_is_valid_state(struct venus_hfi_device * hdev)147 static bool venus_is_valid_state(struct venus_hfi_device *hdev)
148 {
149 return hdev->state != VENUS_STATE_DEINIT;
150 }
151
venus_dump_packet(struct venus_hfi_device * hdev,const void * packet)152 static void venus_dump_packet(struct venus_hfi_device *hdev, const void *packet)
153 {
154 size_t pkt_size = *(u32 *)packet;
155
156 if (!venus_pkt_debug)
157 return;
158
159 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 1, packet,
160 pkt_size, true);
161 }
162
venus_write_queue(struct venus_hfi_device * hdev,struct iface_queue * queue,void * packet,u32 * rx_req)163 static int venus_write_queue(struct venus_hfi_device *hdev,
164 struct iface_queue *queue,
165 void *packet, u32 *rx_req)
166 {
167 struct hfi_queue_header *qhdr;
168 u32 dwords, new_wr_idx;
169 u32 empty_space, rd_idx, wr_idx, qsize;
170 u32 *wr_ptr;
171
172 if (!queue->qmem.kva)
173 return -EINVAL;
174
175 qhdr = queue->qhdr;
176 if (!qhdr)
177 return -EINVAL;
178
179 venus_dump_packet(hdev, packet);
180
181 dwords = (*(u32 *)packet) >> 2;
182 if (!dwords)
183 return -EINVAL;
184
185 rd_idx = qhdr->read_idx;
186 wr_idx = qhdr->write_idx;
187 qsize = qhdr->q_size;
188 /* ensure rd/wr indices's are read from memory */
189 rmb();
190
191 if (wr_idx >= rd_idx)
192 empty_space = qsize - (wr_idx - rd_idx);
193 else
194 empty_space = rd_idx - wr_idx;
195
196 if (empty_space <= dwords) {
197 qhdr->tx_req = 1;
198 /* ensure tx_req is updated in memory */
199 wmb();
200 return -ENOSPC;
201 }
202
203 qhdr->tx_req = 0;
204 /* ensure tx_req is updated in memory */
205 wmb();
206
207 new_wr_idx = wr_idx + dwords;
208 wr_ptr = (u32 *)(queue->qmem.kva + (wr_idx << 2));
209
210 if (wr_ptr < (u32 *)queue->qmem.kva ||
211 wr_ptr > (u32 *)(queue->qmem.kva + queue->qmem.size - sizeof(*wr_ptr)))
212 return -EINVAL;
213
214 if (new_wr_idx < qsize) {
215 memcpy(wr_ptr, packet, dwords << 2);
216 } else {
217 size_t len;
218
219 new_wr_idx -= qsize;
220 len = (dwords - new_wr_idx) << 2;
221 memcpy(wr_ptr, packet, len);
222 memcpy(queue->qmem.kva, packet + len, new_wr_idx << 2);
223 }
224
225 /* make sure packet is written before updating the write index */
226 wmb();
227
228 qhdr->write_idx = new_wr_idx;
229 *rx_req = qhdr->rx_req ? 1 : 0;
230
231 /* make sure write index is updated before an interrupt is raised */
232 mb();
233
234 return 0;
235 }
236
venus_read_queue(struct venus_hfi_device * hdev,struct iface_queue * queue,void * pkt,u32 * tx_req)237 static int venus_read_queue(struct venus_hfi_device *hdev,
238 struct iface_queue *queue, void *pkt, u32 *tx_req)
239 {
240 struct hfi_queue_header *qhdr;
241 u32 dwords, new_rd_idx;
242 u32 rd_idx, wr_idx, type, qsize;
243 u32 *rd_ptr;
244 u32 recv_request = 0;
245 int ret = 0;
246
247 if (!queue->qmem.kva)
248 return -EINVAL;
249
250 qhdr = queue->qhdr;
251 if (!qhdr)
252 return -EINVAL;
253
254 type = qhdr->type;
255 rd_idx = qhdr->read_idx;
256 wr_idx = qhdr->write_idx;
257 qsize = qhdr->q_size;
258
259 /* make sure data is valid before using it */
260 rmb();
261
262 /*
263 * Do not set receive request for debug queue, if set, Venus generates
264 * interrupt for debug messages even when there is no response message
265 * available. In general debug queue will not become full as it is being
266 * emptied out for every interrupt from Venus. Venus will anyway
267 * generates interrupt if it is full.
268 */
269 if (type & HFI_CTRL_TO_HOST_MSG_Q)
270 recv_request = 1;
271
272 if (rd_idx == wr_idx) {
273 qhdr->rx_req = recv_request;
274 *tx_req = 0;
275 /* update rx_req field in memory */
276 wmb();
277 return -ENODATA;
278 }
279
280 rd_ptr = (u32 *)(queue->qmem.kva + (rd_idx << 2));
281
282 if (rd_ptr < (u32 *)queue->qmem.kva ||
283 rd_ptr > (u32 *)(queue->qmem.kva + queue->qmem.size - sizeof(*rd_ptr)))
284 return -EINVAL;
285
286 dwords = *rd_ptr >> 2;
287 if (!dwords)
288 return -EINVAL;
289
290 new_rd_idx = rd_idx + dwords;
291 if (((dwords << 2) <= IFACEQ_VAR_HUGE_PKT_SIZE) && rd_idx <= qsize) {
292 if (new_rd_idx < qsize) {
293 memcpy(pkt, rd_ptr, dwords << 2);
294 } else {
295 size_t len;
296
297 new_rd_idx -= qsize;
298 len = (dwords - new_rd_idx) << 2;
299 memcpy(pkt, rd_ptr, len);
300 memcpy(pkt + len, queue->qmem.kva, new_rd_idx << 2);
301 }
302 } else {
303 /* bad packet received, dropping */
304 new_rd_idx = qhdr->write_idx;
305 ret = -EBADMSG;
306 }
307
308 /* ensure the packet is read before updating read index */
309 rmb();
310
311 qhdr->read_idx = new_rd_idx;
312 /* ensure updating read index */
313 wmb();
314
315 rd_idx = qhdr->read_idx;
316 wr_idx = qhdr->write_idx;
317 /* ensure rd/wr indices are read from memory */
318 rmb();
319
320 if (rd_idx != wr_idx)
321 qhdr->rx_req = 0;
322 else
323 qhdr->rx_req = recv_request;
324
325 *tx_req = qhdr->tx_req ? 1 : 0;
326
327 /* ensure rx_req is stored to memory and tx_req is loaded from memory */
328 mb();
329
330 venus_dump_packet(hdev, pkt);
331
332 return ret;
333 }
334
venus_alloc(struct venus_hfi_device * hdev,struct mem_desc * desc,u32 size)335 static int venus_alloc(struct venus_hfi_device *hdev, struct mem_desc *desc,
336 u32 size)
337 {
338 struct device *dev = hdev->core->dev;
339
340 desc->attrs = DMA_ATTR_WRITE_COMBINE;
341 desc->size = ALIGN(size, SZ_4K);
342
343 desc->kva = dma_alloc_attrs(dev, desc->size, &desc->da, GFP_KERNEL,
344 desc->attrs);
345 if (!desc->kva)
346 return -ENOMEM;
347
348 return 0;
349 }
350
venus_free(struct venus_hfi_device * hdev,struct mem_desc * mem)351 static void venus_free(struct venus_hfi_device *hdev, struct mem_desc *mem)
352 {
353 struct device *dev = hdev->core->dev;
354
355 dma_free_attrs(dev, mem->size, mem->kva, mem->da, mem->attrs);
356 }
357
venus_set_registers(struct venus_hfi_device * hdev)358 static void venus_set_registers(struct venus_hfi_device *hdev)
359 {
360 const struct venus_resources *res = hdev->core->res;
361 const struct reg_val *tbl = res->reg_tbl;
362 unsigned int count = res->reg_tbl_size;
363 unsigned int i;
364
365 for (i = 0; i < count; i++)
366 writel(tbl[i].value, hdev->core->base + tbl[i].reg);
367 }
368
venus_soft_int(struct venus_hfi_device * hdev)369 static void venus_soft_int(struct venus_hfi_device *hdev)
370 {
371 void __iomem *cpu_ic_base = hdev->core->cpu_ic_base;
372
373 writel(BIT(CPU_IC_SOFTINT_H2A_SHIFT), cpu_ic_base + CPU_IC_SOFTINT);
374 }
375
venus_iface_cmdq_write_nolock(struct venus_hfi_device * hdev,void * pkt)376 static int venus_iface_cmdq_write_nolock(struct venus_hfi_device *hdev,
377 void *pkt)
378 {
379 struct device *dev = hdev->core->dev;
380 struct hfi_pkt_hdr *cmd_packet;
381 struct iface_queue *queue;
382 u32 rx_req;
383 int ret;
384
385 if (!venus_is_valid_state(hdev))
386 return -EINVAL;
387
388 cmd_packet = (struct hfi_pkt_hdr *)pkt;
389 hdev->last_packet_type = cmd_packet->pkt_type;
390
391 queue = &hdev->queues[IFACEQ_CMD_IDX];
392
393 ret = venus_write_queue(hdev, queue, pkt, &rx_req);
394 if (ret) {
395 dev_err(dev, "write to iface cmd queue failed (%d)\n", ret);
396 return ret;
397 }
398
399 if (rx_req)
400 venus_soft_int(hdev);
401
402 return 0;
403 }
404
venus_iface_cmdq_write(struct venus_hfi_device * hdev,void * pkt)405 static int venus_iface_cmdq_write(struct venus_hfi_device *hdev, void *pkt)
406 {
407 int ret;
408
409 mutex_lock(&hdev->lock);
410 ret = venus_iface_cmdq_write_nolock(hdev, pkt);
411 mutex_unlock(&hdev->lock);
412
413 return ret;
414 }
415
venus_hfi_core_set_resource(struct venus_core * core,u32 id,u32 size,u32 addr,void * cookie)416 static int venus_hfi_core_set_resource(struct venus_core *core, u32 id,
417 u32 size, u32 addr, void *cookie)
418 {
419 struct venus_hfi_device *hdev = to_hfi_priv(core);
420 struct hfi_sys_set_resource_pkt *pkt;
421 u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
422 int ret;
423
424 if (id == VIDC_RESOURCE_NONE)
425 return 0;
426
427 pkt = (struct hfi_sys_set_resource_pkt *)packet;
428
429 ret = pkt_sys_set_resource(pkt, id, size, addr, cookie);
430 if (ret)
431 return ret;
432
433 ret = venus_iface_cmdq_write(hdev, pkt);
434 if (ret)
435 return ret;
436
437 return 0;
438 }
439
venus_boot_core(struct venus_hfi_device * hdev)440 static int venus_boot_core(struct venus_hfi_device *hdev)
441 {
442 struct device *dev = hdev->core->dev;
443 static const unsigned int max_tries = 100;
444 u32 ctrl_status = 0, mask_val;
445 unsigned int count = 0;
446 void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
447 void __iomem *wrapper_base = hdev->core->wrapper_base;
448 int ret = 0;
449
450 if (IS_V6(hdev->core)) {
451 mask_val = readl(wrapper_base + WRAPPER_INTR_MASK);
452 mask_val &= ~(WRAPPER_INTR_MASK_A2HWD_BASK_V6 |
453 WRAPPER_INTR_MASK_A2HCPU_MASK);
454 } else {
455 mask_val = WRAPPER_INTR_MASK_A2HVCODEC_MASK;
456 }
457 writel(mask_val, wrapper_base + WRAPPER_INTR_MASK);
458 writel(1, cpu_cs_base + CPU_CS_SCIACMDARG3);
459
460 writel(BIT(VIDC_CTRL_INIT_CTRL_SHIFT), cpu_cs_base + VIDC_CTRL_INIT);
461 while (!ctrl_status && count < max_tries) {
462 ctrl_status = readl(cpu_cs_base + CPU_CS_SCIACMDARG0);
463 if ((ctrl_status & CPU_CS_SCIACMDARG0_ERROR_STATUS_MASK) == 4) {
464 dev_err(dev, "invalid setting for UC_REGION\n");
465 ret = -EINVAL;
466 break;
467 }
468
469 usleep_range(500, 1000);
470 count++;
471 }
472
473 if (count >= max_tries)
474 ret = -ETIMEDOUT;
475
476 if (IS_V6(hdev->core))
477 writel(0x0, cpu_cs_base + CPU_CS_X2RPMH_V6);
478
479 return ret;
480 }
481
venus_hwversion(struct venus_hfi_device * hdev)482 static u32 venus_hwversion(struct venus_hfi_device *hdev)
483 {
484 struct device *dev = hdev->core->dev;
485 void __iomem *wrapper_base = hdev->core->wrapper_base;
486 u32 ver;
487 u32 major, minor, step;
488
489 ver = readl(wrapper_base + WRAPPER_HW_VERSION);
490 major = ver & WRAPPER_HW_VERSION_MAJOR_VERSION_MASK;
491 major = major >> WRAPPER_HW_VERSION_MAJOR_VERSION_SHIFT;
492 minor = ver & WRAPPER_HW_VERSION_MINOR_VERSION_MASK;
493 minor = minor >> WRAPPER_HW_VERSION_MINOR_VERSION_SHIFT;
494 step = ver & WRAPPER_HW_VERSION_STEP_VERSION_MASK;
495
496 dev_dbg(dev, VDBGL "venus hw version %x.%x.%x\n", major, minor, step);
497
498 return major;
499 }
500
venus_run(struct venus_hfi_device * hdev)501 static int venus_run(struct venus_hfi_device *hdev)
502 {
503 struct device *dev = hdev->core->dev;
504 void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
505 int ret;
506
507 /*
508 * Re-program all of the registers that get reset as a result of
509 * regulator_disable() and _enable()
510 */
511 venus_set_registers(hdev);
512
513 writel(hdev->ifaceq_table.da, cpu_cs_base + UC_REGION_ADDR);
514 writel(SHARED_QSIZE, cpu_cs_base + UC_REGION_SIZE);
515 writel(hdev->ifaceq_table.da, cpu_cs_base + CPU_CS_SCIACMDARG2);
516 writel(0x01, cpu_cs_base + CPU_CS_SCIACMDARG1);
517 if (hdev->sfr.da)
518 writel(hdev->sfr.da, cpu_cs_base + SFR_ADDR);
519
520 ret = venus_boot_core(hdev);
521 if (ret) {
522 dev_err(dev, "failed to reset venus core\n");
523 return ret;
524 }
525
526 venus_hwversion(hdev);
527
528 return 0;
529 }
530
venus_halt_axi(struct venus_hfi_device * hdev)531 static int venus_halt_axi(struct venus_hfi_device *hdev)
532 {
533 void __iomem *wrapper_base = hdev->core->wrapper_base;
534 void __iomem *vbif_base = hdev->core->vbif_base;
535 struct device *dev = hdev->core->dev;
536 u32 val;
537 int ret;
538
539 if (IS_V4(hdev->core)) {
540 val = readl(wrapper_base + WRAPPER_CPU_AXI_HALT);
541 val |= WRAPPER_CPU_AXI_HALT_HALT;
542 writel(val, wrapper_base + WRAPPER_CPU_AXI_HALT);
543
544 ret = readl_poll_timeout(wrapper_base + WRAPPER_CPU_AXI_HALT_STATUS,
545 val,
546 val & WRAPPER_CPU_AXI_HALT_STATUS_IDLE,
547 POLL_INTERVAL_US,
548 VBIF_AXI_HALT_ACK_TIMEOUT_US);
549 if (ret) {
550 dev_err(dev, "AXI bus port halt timeout\n");
551 return ret;
552 }
553
554 return 0;
555 }
556
557 /* Halt AXI and AXI IMEM VBIF Access */
558 val = readl(vbif_base + VBIF_AXI_HALT_CTRL0);
559 val |= VBIF_AXI_HALT_CTRL0_HALT_REQ;
560 writel(val, vbif_base + VBIF_AXI_HALT_CTRL0);
561
562 /* Request for AXI bus port halt */
563 ret = readl_poll_timeout(vbif_base + VBIF_AXI_HALT_CTRL1, val,
564 val & VBIF_AXI_HALT_CTRL1_HALT_ACK,
565 POLL_INTERVAL_US,
566 VBIF_AXI_HALT_ACK_TIMEOUT_US);
567 if (ret) {
568 dev_err(dev, "AXI bus port halt timeout\n");
569 return ret;
570 }
571
572 return 0;
573 }
574
venus_power_off(struct venus_hfi_device * hdev)575 static int venus_power_off(struct venus_hfi_device *hdev)
576 {
577 int ret;
578
579 if (!hdev->power_enabled)
580 return 0;
581
582 ret = venus_set_hw_state_suspend(hdev->core);
583 if (ret)
584 return ret;
585
586 ret = venus_halt_axi(hdev);
587 if (ret)
588 return ret;
589
590 hdev->power_enabled = false;
591
592 return 0;
593 }
594
venus_power_on(struct venus_hfi_device * hdev)595 static int venus_power_on(struct venus_hfi_device *hdev)
596 {
597 int ret;
598
599 if (hdev->power_enabled)
600 return 0;
601
602 ret = venus_set_hw_state_resume(hdev->core);
603 if (ret)
604 goto err;
605
606 ret = venus_run(hdev);
607 if (ret)
608 goto err_suspend;
609
610 hdev->power_enabled = true;
611
612 return 0;
613
614 err_suspend:
615 venus_set_hw_state_suspend(hdev->core);
616 err:
617 hdev->power_enabled = false;
618 return ret;
619 }
620
venus_iface_msgq_read_nolock(struct venus_hfi_device * hdev,void * pkt)621 static int venus_iface_msgq_read_nolock(struct venus_hfi_device *hdev,
622 void *pkt)
623 {
624 struct iface_queue *queue;
625 u32 tx_req;
626 int ret;
627
628 if (!venus_is_valid_state(hdev))
629 return -EINVAL;
630
631 queue = &hdev->queues[IFACEQ_MSG_IDX];
632
633 ret = venus_read_queue(hdev, queue, pkt, &tx_req);
634 if (ret)
635 return ret;
636
637 if (tx_req)
638 venus_soft_int(hdev);
639
640 return 0;
641 }
642
venus_iface_msgq_read(struct venus_hfi_device * hdev,void * pkt)643 static int venus_iface_msgq_read(struct venus_hfi_device *hdev, void *pkt)
644 {
645 int ret;
646
647 mutex_lock(&hdev->lock);
648 ret = venus_iface_msgq_read_nolock(hdev, pkt);
649 mutex_unlock(&hdev->lock);
650
651 return ret;
652 }
653
venus_iface_dbgq_read_nolock(struct venus_hfi_device * hdev,void * pkt)654 static int venus_iface_dbgq_read_nolock(struct venus_hfi_device *hdev,
655 void *pkt)
656 {
657 struct iface_queue *queue;
658 u32 tx_req;
659 int ret;
660
661 ret = venus_is_valid_state(hdev);
662 if (!ret)
663 return -EINVAL;
664
665 queue = &hdev->queues[IFACEQ_DBG_IDX];
666
667 ret = venus_read_queue(hdev, queue, pkt, &tx_req);
668 if (ret)
669 return ret;
670
671 if (tx_req)
672 venus_soft_int(hdev);
673
674 return 0;
675 }
676
venus_iface_dbgq_read(struct venus_hfi_device * hdev,void * pkt)677 static int venus_iface_dbgq_read(struct venus_hfi_device *hdev, void *pkt)
678 {
679 int ret;
680
681 if (!pkt)
682 return -EINVAL;
683
684 mutex_lock(&hdev->lock);
685 ret = venus_iface_dbgq_read_nolock(hdev, pkt);
686 mutex_unlock(&hdev->lock);
687
688 return ret;
689 }
690
venus_set_qhdr_defaults(struct hfi_queue_header * qhdr)691 static void venus_set_qhdr_defaults(struct hfi_queue_header *qhdr)
692 {
693 qhdr->status = 1;
694 qhdr->type = IFACEQ_DFLT_QHDR;
695 qhdr->q_size = IFACEQ_QUEUE_SIZE / 4;
696 qhdr->pkt_size = 0;
697 qhdr->rx_wm = 1;
698 qhdr->tx_wm = 1;
699 qhdr->rx_req = 1;
700 qhdr->tx_req = 0;
701 qhdr->rx_irq_status = 0;
702 qhdr->tx_irq_status = 0;
703 qhdr->read_idx = 0;
704 qhdr->write_idx = 0;
705 }
706
venus_interface_queues_release(struct venus_hfi_device * hdev)707 static void venus_interface_queues_release(struct venus_hfi_device *hdev)
708 {
709 mutex_lock(&hdev->lock);
710
711 venus_free(hdev, &hdev->ifaceq_table);
712 venus_free(hdev, &hdev->sfr);
713
714 memset(hdev->queues, 0, sizeof(hdev->queues));
715 memset(&hdev->ifaceq_table, 0, sizeof(hdev->ifaceq_table));
716 memset(&hdev->sfr, 0, sizeof(hdev->sfr));
717
718 mutex_unlock(&hdev->lock);
719 }
720
venus_interface_queues_init(struct venus_hfi_device * hdev)721 static int venus_interface_queues_init(struct venus_hfi_device *hdev)
722 {
723 struct hfi_queue_table_header *tbl_hdr;
724 struct iface_queue *queue;
725 struct hfi_sfr *sfr;
726 struct mem_desc desc = {0};
727 unsigned int offset;
728 unsigned int i;
729 int ret;
730
731 ret = venus_alloc(hdev, &desc, ALIGNED_QUEUE_SIZE);
732 if (ret)
733 return ret;
734
735 hdev->ifaceq_table = desc;
736 offset = IFACEQ_TABLE_SIZE;
737
738 for (i = 0; i < IFACEQ_NUM; i++) {
739 queue = &hdev->queues[i];
740 queue->qmem.da = desc.da + offset;
741 queue->qmem.kva = desc.kva + offset;
742 queue->qmem.size = IFACEQ_QUEUE_SIZE;
743 offset += queue->qmem.size;
744 queue->qhdr =
745 IFACEQ_GET_QHDR_START_ADDR(hdev->ifaceq_table.kva, i);
746
747 venus_set_qhdr_defaults(queue->qhdr);
748
749 queue->qhdr->start_addr = queue->qmem.da;
750
751 if (i == IFACEQ_CMD_IDX)
752 queue->qhdr->type |= HFI_HOST_TO_CTRL_CMD_Q;
753 else if (i == IFACEQ_MSG_IDX)
754 queue->qhdr->type |= HFI_CTRL_TO_HOST_MSG_Q;
755 else if (i == IFACEQ_DBG_IDX)
756 queue->qhdr->type |= HFI_CTRL_TO_HOST_DBG_Q;
757 }
758
759 tbl_hdr = hdev->ifaceq_table.kva;
760 tbl_hdr->version = 0;
761 tbl_hdr->size = IFACEQ_TABLE_SIZE;
762 tbl_hdr->qhdr0_offset = sizeof(struct hfi_queue_table_header);
763 tbl_hdr->qhdr_size = sizeof(struct hfi_queue_header);
764 tbl_hdr->num_q = IFACEQ_NUM;
765 tbl_hdr->num_active_q = IFACEQ_NUM;
766
767 /*
768 * Set receive request to zero on debug queue as there is no
769 * need of interrupt from video hardware for debug messages
770 */
771 queue = &hdev->queues[IFACEQ_DBG_IDX];
772 queue->qhdr->rx_req = 0;
773
774 ret = venus_alloc(hdev, &desc, ALIGNED_SFR_SIZE);
775 if (ret) {
776 hdev->sfr.da = 0;
777 } else {
778 hdev->sfr = desc;
779 sfr = hdev->sfr.kva;
780 sfr->buf_size = ALIGNED_SFR_SIZE;
781 }
782
783 /* ensure table and queue header structs are settled in memory */
784 wmb();
785
786 return 0;
787 }
788
venus_sys_set_debug(struct venus_hfi_device * hdev,u32 debug)789 static int venus_sys_set_debug(struct venus_hfi_device *hdev, u32 debug)
790 {
791 struct hfi_sys_set_property_pkt *pkt;
792 u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
793 int ret;
794
795 pkt = (struct hfi_sys_set_property_pkt *)packet;
796
797 pkt_sys_debug_config(pkt, HFI_DEBUG_MODE_QUEUE, debug);
798
799 ret = venus_iface_cmdq_write(hdev, pkt);
800 if (ret)
801 return ret;
802
803 return 0;
804 }
805
venus_sys_set_coverage(struct venus_hfi_device * hdev,u32 mode)806 static int venus_sys_set_coverage(struct venus_hfi_device *hdev, u32 mode)
807 {
808 struct hfi_sys_set_property_pkt *pkt;
809 u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
810 int ret;
811
812 pkt = (struct hfi_sys_set_property_pkt *)packet;
813
814 pkt_sys_coverage_config(pkt, mode);
815
816 ret = venus_iface_cmdq_write(hdev, pkt);
817 if (ret)
818 return ret;
819
820 return 0;
821 }
822
venus_sys_set_idle_message(struct venus_hfi_device * hdev,bool enable)823 static int venus_sys_set_idle_message(struct venus_hfi_device *hdev,
824 bool enable)
825 {
826 struct hfi_sys_set_property_pkt *pkt;
827 u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
828 int ret;
829
830 if (!enable)
831 return 0;
832
833 pkt = (struct hfi_sys_set_property_pkt *)packet;
834
835 pkt_sys_idle_indicator(pkt, enable);
836
837 ret = venus_iface_cmdq_write(hdev, pkt);
838 if (ret)
839 return ret;
840
841 return 0;
842 }
843
venus_sys_set_power_control(struct venus_hfi_device * hdev,bool enable)844 static int venus_sys_set_power_control(struct venus_hfi_device *hdev,
845 bool enable)
846 {
847 struct hfi_sys_set_property_pkt *pkt;
848 u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
849 int ret;
850
851 pkt = (struct hfi_sys_set_property_pkt *)packet;
852
853 pkt_sys_power_control(pkt, enable);
854
855 ret = venus_iface_cmdq_write(hdev, pkt);
856 if (ret)
857 return ret;
858
859 return 0;
860 }
861
venus_get_queue_size(struct venus_hfi_device * hdev,unsigned int index)862 static int venus_get_queue_size(struct venus_hfi_device *hdev,
863 unsigned int index)
864 {
865 struct hfi_queue_header *qhdr;
866
867 if (index >= IFACEQ_NUM)
868 return -EINVAL;
869
870 qhdr = hdev->queues[index].qhdr;
871 if (!qhdr)
872 return -EINVAL;
873
874 return abs(qhdr->read_idx - qhdr->write_idx);
875 }
876
venus_sys_set_default_properties(struct venus_hfi_device * hdev)877 static int venus_sys_set_default_properties(struct venus_hfi_device *hdev)
878 {
879 struct device *dev = hdev->core->dev;
880 int ret;
881
882 ret = venus_sys_set_debug(hdev, venus_fw_debug);
883 if (ret)
884 dev_warn(dev, "setting fw debug msg ON failed (%d)\n", ret);
885
886 /*
887 * Idle indicator is disabled by default on some 4xx firmware versions,
888 * enable it explicitly in order to make suspend functional by checking
889 * WFI (wait-for-interrupt) bit.
890 */
891 if (IS_V4(hdev->core))
892 venus_sys_idle_indicator = true;
893
894 ret = venus_sys_set_idle_message(hdev, venus_sys_idle_indicator);
895 if (ret)
896 dev_warn(dev, "setting idle response ON failed (%d)\n", ret);
897
898 ret = venus_sys_set_power_control(hdev, venus_fw_low_power_mode);
899 if (ret)
900 dev_warn(dev, "setting hw power collapse ON failed (%d)\n",
901 ret);
902
903 return ret;
904 }
905
venus_session_cmd(struct venus_inst * inst,u32 pkt_type)906 static int venus_session_cmd(struct venus_inst *inst, u32 pkt_type)
907 {
908 struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
909 struct hfi_session_pkt pkt;
910
911 pkt_session_cmd(&pkt, pkt_type, inst);
912
913 return venus_iface_cmdq_write(hdev, &pkt);
914 }
915
venus_flush_debug_queue(struct venus_hfi_device * hdev)916 static void venus_flush_debug_queue(struct venus_hfi_device *hdev)
917 {
918 struct device *dev = hdev->core->dev;
919 void *packet = hdev->dbg_buf;
920
921 while (!venus_iface_dbgq_read(hdev, packet)) {
922 struct hfi_msg_sys_coverage_pkt *pkt = packet;
923
924 if (pkt->hdr.pkt_type != HFI_MSG_SYS_COV) {
925 struct hfi_msg_sys_debug_pkt *pkt = packet;
926
927 dev_dbg(dev, VDBGFW "%s", pkt->msg_data);
928 }
929 }
930 }
931
venus_prepare_power_collapse(struct venus_hfi_device * hdev,bool wait)932 static int venus_prepare_power_collapse(struct venus_hfi_device *hdev,
933 bool wait)
934 {
935 unsigned long timeout = msecs_to_jiffies(venus_hw_rsp_timeout);
936 struct hfi_sys_pc_prep_pkt pkt;
937 int ret;
938
939 init_completion(&hdev->pwr_collapse_prep);
940
941 pkt_sys_pc_prep(&pkt);
942
943 ret = venus_iface_cmdq_write(hdev, &pkt);
944 if (ret)
945 return ret;
946
947 if (!wait)
948 return 0;
949
950 ret = wait_for_completion_timeout(&hdev->pwr_collapse_prep, timeout);
951 if (!ret) {
952 venus_flush_debug_queue(hdev);
953 return -ETIMEDOUT;
954 }
955
956 return 0;
957 }
958
venus_are_queues_empty(struct venus_hfi_device * hdev)959 static int venus_are_queues_empty(struct venus_hfi_device *hdev)
960 {
961 int ret1, ret2;
962
963 ret1 = venus_get_queue_size(hdev, IFACEQ_MSG_IDX);
964 if (ret1 < 0)
965 return ret1;
966
967 ret2 = venus_get_queue_size(hdev, IFACEQ_CMD_IDX);
968 if (ret2 < 0)
969 return ret2;
970
971 if (!ret1 && !ret2)
972 return 1;
973
974 return 0;
975 }
976
venus_sfr_print(struct venus_hfi_device * hdev)977 static void venus_sfr_print(struct venus_hfi_device *hdev)
978 {
979 struct device *dev = hdev->core->dev;
980 struct hfi_sfr *sfr = hdev->sfr.kva;
981 void *p;
982
983 if (!sfr)
984 return;
985
986 p = memchr(sfr->data, '\0', sfr->buf_size);
987 /*
988 * SFR isn't guaranteed to be NULL terminated since SYS_ERROR indicates
989 * that Venus is in the process of crashing.
990 */
991 if (!p)
992 sfr->data[sfr->buf_size - 1] = '\0';
993
994 dev_err_ratelimited(dev, "SFR message from FW: %s\n", sfr->data);
995 }
996
venus_process_msg_sys_error(struct venus_hfi_device * hdev,void * packet)997 static void venus_process_msg_sys_error(struct venus_hfi_device *hdev,
998 void *packet)
999 {
1000 struct hfi_msg_event_notify_pkt *event_pkt = packet;
1001
1002 if (event_pkt->event_id != HFI_EVENT_SYS_ERROR)
1003 return;
1004
1005 venus_set_state(hdev, VENUS_STATE_DEINIT);
1006
1007 venus_sfr_print(hdev);
1008 }
1009
venus_isr_thread(struct venus_core * core)1010 static irqreturn_t venus_isr_thread(struct venus_core *core)
1011 {
1012 struct venus_hfi_device *hdev = to_hfi_priv(core);
1013 const struct venus_resources *res;
1014 void *pkt;
1015 u32 msg_ret;
1016
1017 if (!hdev)
1018 return IRQ_NONE;
1019
1020 res = hdev->core->res;
1021 pkt = hdev->pkt_buf;
1022
1023
1024 while (!venus_iface_msgq_read(hdev, pkt)) {
1025 msg_ret = hfi_process_msg_packet(core, pkt);
1026 switch (msg_ret) {
1027 case HFI_MSG_EVENT_NOTIFY:
1028 venus_process_msg_sys_error(hdev, pkt);
1029 break;
1030 case HFI_MSG_SYS_INIT:
1031 venus_hfi_core_set_resource(core, res->vmem_id,
1032 res->vmem_size,
1033 res->vmem_addr,
1034 hdev);
1035 break;
1036 case HFI_MSG_SYS_RELEASE_RESOURCE:
1037 complete(&hdev->release_resource);
1038 break;
1039 case HFI_MSG_SYS_PC_PREP:
1040 complete(&hdev->pwr_collapse_prep);
1041 break;
1042 default:
1043 break;
1044 }
1045 }
1046
1047 venus_flush_debug_queue(hdev);
1048
1049 return IRQ_HANDLED;
1050 }
1051
venus_isr(struct venus_core * core)1052 static irqreturn_t venus_isr(struct venus_core *core)
1053 {
1054 struct venus_hfi_device *hdev = to_hfi_priv(core);
1055 u32 status;
1056 void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
1057 void __iomem *wrapper_base = hdev->core->wrapper_base;
1058
1059 if (!hdev)
1060 return IRQ_NONE;
1061
1062 status = readl(wrapper_base + WRAPPER_INTR_STATUS);
1063
1064 if (status & WRAPPER_INTR_STATUS_A2H_MASK ||
1065 status & WRAPPER_INTR_STATUS_A2HWD_MASK ||
1066 status & CPU_CS_SCIACMDARG0_INIT_IDLE_MSG_MASK)
1067 hdev->irq_status = status;
1068
1069 writel(1, cpu_cs_base + CPU_CS_A2HSOFTINTCLR);
1070 writel(status, wrapper_base + WRAPPER_INTR_CLEAR);
1071
1072 return IRQ_WAKE_THREAD;
1073 }
1074
venus_core_init(struct venus_core * core)1075 static int venus_core_init(struct venus_core *core)
1076 {
1077 struct venus_hfi_device *hdev = to_hfi_priv(core);
1078 struct device *dev = core->dev;
1079 struct hfi_sys_get_property_pkt version_pkt;
1080 struct hfi_sys_init_pkt pkt;
1081 int ret;
1082
1083 pkt_sys_init(&pkt, HFI_VIDEO_ARCH_OX);
1084
1085 venus_set_state(hdev, VENUS_STATE_INIT);
1086
1087 ret = venus_iface_cmdq_write(hdev, &pkt);
1088 if (ret)
1089 return ret;
1090
1091 pkt_sys_image_version(&version_pkt);
1092
1093 ret = venus_iface_cmdq_write(hdev, &version_pkt);
1094 if (ret)
1095 dev_warn(dev, "failed to send image version pkt to fw\n");
1096
1097 ret = venus_sys_set_default_properties(hdev);
1098 if (ret)
1099 return ret;
1100
1101 return 0;
1102 }
1103
venus_core_deinit(struct venus_core * core)1104 static int venus_core_deinit(struct venus_core *core)
1105 {
1106 struct venus_hfi_device *hdev = to_hfi_priv(core);
1107
1108 venus_set_state(hdev, VENUS_STATE_DEINIT);
1109 hdev->suspended = true;
1110 hdev->power_enabled = false;
1111
1112 return 0;
1113 }
1114
venus_core_ping(struct venus_core * core,u32 cookie)1115 static int venus_core_ping(struct venus_core *core, u32 cookie)
1116 {
1117 struct venus_hfi_device *hdev = to_hfi_priv(core);
1118 struct hfi_sys_ping_pkt pkt;
1119
1120 pkt_sys_ping(&pkt, cookie);
1121
1122 return venus_iface_cmdq_write(hdev, &pkt);
1123 }
1124
venus_core_trigger_ssr(struct venus_core * core,u32 trigger_type)1125 static int venus_core_trigger_ssr(struct venus_core *core, u32 trigger_type)
1126 {
1127 struct venus_hfi_device *hdev = to_hfi_priv(core);
1128 struct hfi_sys_test_ssr_pkt pkt;
1129 int ret;
1130
1131 ret = pkt_sys_ssr_cmd(&pkt, trigger_type);
1132 if (ret)
1133 return ret;
1134
1135 return venus_iface_cmdq_write(hdev, &pkt);
1136 }
1137
venus_session_init(struct venus_inst * inst,u32 session_type,u32 codec)1138 static int venus_session_init(struct venus_inst *inst, u32 session_type,
1139 u32 codec)
1140 {
1141 struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1142 struct hfi_session_init_pkt pkt;
1143 int ret;
1144
1145 ret = venus_sys_set_debug(hdev, venus_fw_debug);
1146 if (ret)
1147 goto err;
1148
1149 ret = pkt_session_init(&pkt, inst, session_type, codec);
1150 if (ret)
1151 goto err;
1152
1153 ret = venus_iface_cmdq_write(hdev, &pkt);
1154 if (ret)
1155 goto err;
1156
1157 return 0;
1158
1159 err:
1160 venus_flush_debug_queue(hdev);
1161 return ret;
1162 }
1163
venus_session_end(struct venus_inst * inst)1164 static int venus_session_end(struct venus_inst *inst)
1165 {
1166 struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1167 struct device *dev = hdev->core->dev;
1168
1169 if (venus_fw_coverage) {
1170 if (venus_sys_set_coverage(hdev, venus_fw_coverage))
1171 dev_warn(dev, "fw coverage msg ON failed\n");
1172 }
1173
1174 return venus_session_cmd(inst, HFI_CMD_SYS_SESSION_END);
1175 }
1176
venus_session_abort(struct venus_inst * inst)1177 static int venus_session_abort(struct venus_inst *inst)
1178 {
1179 struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1180
1181 venus_flush_debug_queue(hdev);
1182
1183 return venus_session_cmd(inst, HFI_CMD_SYS_SESSION_ABORT);
1184 }
1185
venus_session_flush(struct venus_inst * inst,u32 flush_mode)1186 static int venus_session_flush(struct venus_inst *inst, u32 flush_mode)
1187 {
1188 struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1189 struct hfi_session_flush_pkt pkt;
1190 int ret;
1191
1192 ret = pkt_session_flush(&pkt, inst, flush_mode);
1193 if (ret)
1194 return ret;
1195
1196 return venus_iface_cmdq_write(hdev, &pkt);
1197 }
1198
venus_session_start(struct venus_inst * inst)1199 static int venus_session_start(struct venus_inst *inst)
1200 {
1201 return venus_session_cmd(inst, HFI_CMD_SESSION_START);
1202 }
1203
venus_session_stop(struct venus_inst * inst)1204 static int venus_session_stop(struct venus_inst *inst)
1205 {
1206 return venus_session_cmd(inst, HFI_CMD_SESSION_STOP);
1207 }
1208
venus_session_continue(struct venus_inst * inst)1209 static int venus_session_continue(struct venus_inst *inst)
1210 {
1211 return venus_session_cmd(inst, HFI_CMD_SESSION_CONTINUE);
1212 }
1213
venus_session_etb(struct venus_inst * inst,struct hfi_frame_data * in_frame)1214 static int venus_session_etb(struct venus_inst *inst,
1215 struct hfi_frame_data *in_frame)
1216 {
1217 struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1218 u32 session_type = inst->session_type;
1219 int ret;
1220
1221 if (session_type == VIDC_SESSION_TYPE_DEC) {
1222 struct hfi_session_empty_buffer_compressed_pkt pkt;
1223
1224 ret = pkt_session_etb_decoder(&pkt, inst, in_frame);
1225 if (ret)
1226 return ret;
1227
1228 ret = venus_iface_cmdq_write(hdev, &pkt);
1229 } else if (session_type == VIDC_SESSION_TYPE_ENC) {
1230 struct hfi_session_empty_buffer_uncompressed_plane0_pkt pkt;
1231
1232 ret = pkt_session_etb_encoder(&pkt, inst, in_frame);
1233 if (ret)
1234 return ret;
1235
1236 ret = venus_iface_cmdq_write(hdev, &pkt);
1237 } else {
1238 ret = -EINVAL;
1239 }
1240
1241 return ret;
1242 }
1243
venus_session_ftb(struct venus_inst * inst,struct hfi_frame_data * out_frame)1244 static int venus_session_ftb(struct venus_inst *inst,
1245 struct hfi_frame_data *out_frame)
1246 {
1247 struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1248 struct hfi_session_fill_buffer_pkt pkt;
1249 int ret;
1250
1251 ret = pkt_session_ftb(&pkt, inst, out_frame);
1252 if (ret)
1253 return ret;
1254
1255 return venus_iface_cmdq_write(hdev, &pkt);
1256 }
1257
venus_session_set_buffers(struct venus_inst * inst,struct hfi_buffer_desc * bd)1258 static int venus_session_set_buffers(struct venus_inst *inst,
1259 struct hfi_buffer_desc *bd)
1260 {
1261 struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1262 struct hfi_session_set_buffers_pkt *pkt;
1263 u8 packet[IFACEQ_VAR_LARGE_PKT_SIZE];
1264 int ret;
1265
1266 if (bd->buffer_type == HFI_BUFFER_INPUT)
1267 return 0;
1268
1269 pkt = (struct hfi_session_set_buffers_pkt *)packet;
1270
1271 ret = pkt_session_set_buffers(pkt, inst, bd);
1272 if (ret)
1273 return ret;
1274
1275 return venus_iface_cmdq_write(hdev, pkt);
1276 }
1277
venus_session_unset_buffers(struct venus_inst * inst,struct hfi_buffer_desc * bd)1278 static int venus_session_unset_buffers(struct venus_inst *inst,
1279 struct hfi_buffer_desc *bd)
1280 {
1281 struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1282 struct hfi_session_release_buffer_pkt *pkt;
1283 u8 packet[IFACEQ_VAR_LARGE_PKT_SIZE];
1284 int ret;
1285
1286 if (bd->buffer_type == HFI_BUFFER_INPUT)
1287 return 0;
1288
1289 pkt = (struct hfi_session_release_buffer_pkt *)packet;
1290
1291 ret = pkt_session_unset_buffers(pkt, inst, bd);
1292 if (ret)
1293 return ret;
1294
1295 return venus_iface_cmdq_write(hdev, pkt);
1296 }
1297
venus_session_load_res(struct venus_inst * inst)1298 static int venus_session_load_res(struct venus_inst *inst)
1299 {
1300 return venus_session_cmd(inst, HFI_CMD_SESSION_LOAD_RESOURCES);
1301 }
1302
venus_session_release_res(struct venus_inst * inst)1303 static int venus_session_release_res(struct venus_inst *inst)
1304 {
1305 return venus_session_cmd(inst, HFI_CMD_SESSION_RELEASE_RESOURCES);
1306 }
1307
venus_session_parse_seq_hdr(struct venus_inst * inst,u32 seq_hdr,u32 seq_hdr_len)1308 static int venus_session_parse_seq_hdr(struct venus_inst *inst, u32 seq_hdr,
1309 u32 seq_hdr_len)
1310 {
1311 struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1312 struct hfi_session_parse_sequence_header_pkt *pkt;
1313 u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
1314 int ret;
1315
1316 pkt = (struct hfi_session_parse_sequence_header_pkt *)packet;
1317
1318 ret = pkt_session_parse_seq_header(pkt, inst, seq_hdr, seq_hdr_len);
1319 if (ret)
1320 return ret;
1321
1322 ret = venus_iface_cmdq_write(hdev, pkt);
1323 if (ret)
1324 return ret;
1325
1326 return 0;
1327 }
1328
venus_session_get_seq_hdr(struct venus_inst * inst,u32 seq_hdr,u32 seq_hdr_len)1329 static int venus_session_get_seq_hdr(struct venus_inst *inst, u32 seq_hdr,
1330 u32 seq_hdr_len)
1331 {
1332 struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1333 struct hfi_session_get_sequence_header_pkt *pkt;
1334 u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
1335 int ret;
1336
1337 pkt = (struct hfi_session_get_sequence_header_pkt *)packet;
1338
1339 ret = pkt_session_get_seq_hdr(pkt, inst, seq_hdr, seq_hdr_len);
1340 if (ret)
1341 return ret;
1342
1343 return venus_iface_cmdq_write(hdev, pkt);
1344 }
1345
venus_session_set_property(struct venus_inst * inst,u32 ptype,void * pdata)1346 static int venus_session_set_property(struct venus_inst *inst, u32 ptype,
1347 void *pdata)
1348 {
1349 struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1350 struct hfi_session_set_property_pkt *pkt;
1351 u8 packet[IFACEQ_VAR_LARGE_PKT_SIZE];
1352 int ret;
1353
1354 pkt = (struct hfi_session_set_property_pkt *)packet;
1355
1356 ret = pkt_session_set_property(pkt, inst, ptype, pdata);
1357 if (ret == -ENOTSUPP)
1358 return 0;
1359 if (ret)
1360 return ret;
1361
1362 return venus_iface_cmdq_write(hdev, pkt);
1363 }
1364
venus_session_get_property(struct venus_inst * inst,u32 ptype)1365 static int venus_session_get_property(struct venus_inst *inst, u32 ptype)
1366 {
1367 struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1368 struct hfi_session_get_property_pkt pkt;
1369 int ret;
1370
1371 ret = pkt_session_get_property(&pkt, inst, ptype);
1372 if (ret)
1373 return ret;
1374
1375 return venus_iface_cmdq_write(hdev, &pkt);
1376 }
1377
venus_resume(struct venus_core * core)1378 static int venus_resume(struct venus_core *core)
1379 {
1380 struct venus_hfi_device *hdev = to_hfi_priv(core);
1381 int ret = 0;
1382
1383 mutex_lock(&hdev->lock);
1384
1385 if (!hdev->suspended)
1386 goto unlock;
1387
1388 ret = venus_power_on(hdev);
1389
1390 unlock:
1391 if (!ret)
1392 hdev->suspended = false;
1393
1394 mutex_unlock(&hdev->lock);
1395
1396 return ret;
1397 }
1398
venus_suspend_1xx(struct venus_core * core)1399 static int venus_suspend_1xx(struct venus_core *core)
1400 {
1401 struct venus_hfi_device *hdev = to_hfi_priv(core);
1402 struct device *dev = core->dev;
1403 void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
1404 u32 ctrl_status;
1405 int ret;
1406
1407 if (!hdev->power_enabled || hdev->suspended)
1408 return 0;
1409
1410 mutex_lock(&hdev->lock);
1411 ret = venus_is_valid_state(hdev);
1412 mutex_unlock(&hdev->lock);
1413
1414 if (!ret) {
1415 dev_err(dev, "bad state, cannot suspend\n");
1416 return -EINVAL;
1417 }
1418
1419 ret = venus_prepare_power_collapse(hdev, true);
1420 if (ret) {
1421 dev_err(dev, "prepare for power collapse fail (%d)\n", ret);
1422 return ret;
1423 }
1424
1425 mutex_lock(&hdev->lock);
1426
1427 if (hdev->last_packet_type != HFI_CMD_SYS_PC_PREP) {
1428 mutex_unlock(&hdev->lock);
1429 return -EINVAL;
1430 }
1431
1432 ret = venus_are_queues_empty(hdev);
1433 if (ret < 0 || !ret) {
1434 mutex_unlock(&hdev->lock);
1435 return -EINVAL;
1436 }
1437
1438 ctrl_status = readl(cpu_cs_base + CPU_CS_SCIACMDARG0);
1439 if (!(ctrl_status & CPU_CS_SCIACMDARG0_PC_READY)) {
1440 mutex_unlock(&hdev->lock);
1441 return -EINVAL;
1442 }
1443
1444 ret = venus_power_off(hdev);
1445 if (ret) {
1446 mutex_unlock(&hdev->lock);
1447 return ret;
1448 }
1449
1450 hdev->suspended = true;
1451
1452 mutex_unlock(&hdev->lock);
1453
1454 return 0;
1455 }
1456
venus_cpu_and_video_core_idle(struct venus_hfi_device * hdev)1457 static bool venus_cpu_and_video_core_idle(struct venus_hfi_device *hdev)
1458 {
1459 void __iomem *wrapper_base = hdev->core->wrapper_base;
1460 void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
1461 u32 ctrl_status, cpu_status;
1462
1463 cpu_status = readl(wrapper_base + WRAPPER_CPU_STATUS);
1464 ctrl_status = readl(cpu_cs_base + CPU_CS_SCIACMDARG0);
1465
1466 if (cpu_status & WRAPPER_CPU_STATUS_WFI &&
1467 ctrl_status & CPU_CS_SCIACMDARG0_INIT_IDLE_MSG_MASK)
1468 return true;
1469
1470 return false;
1471 }
1472
venus_cpu_idle_and_pc_ready(struct venus_hfi_device * hdev)1473 static bool venus_cpu_idle_and_pc_ready(struct venus_hfi_device *hdev)
1474 {
1475 void __iomem *wrapper_base = hdev->core->wrapper_base;
1476 void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
1477 u32 ctrl_status, cpu_status;
1478
1479 cpu_status = readl(wrapper_base + WRAPPER_CPU_STATUS);
1480 ctrl_status = readl(cpu_cs_base + CPU_CS_SCIACMDARG0);
1481
1482 if (cpu_status & WRAPPER_CPU_STATUS_WFI &&
1483 ctrl_status & CPU_CS_SCIACMDARG0_PC_READY)
1484 return true;
1485
1486 return false;
1487 }
1488
venus_suspend_3xx(struct venus_core * core)1489 static int venus_suspend_3xx(struct venus_core *core)
1490 {
1491 struct venus_hfi_device *hdev = to_hfi_priv(core);
1492 struct device *dev = core->dev;
1493 void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
1494 u32 ctrl_status;
1495 bool val;
1496 int ret;
1497
1498 if (!hdev->power_enabled || hdev->suspended)
1499 return 0;
1500
1501 mutex_lock(&hdev->lock);
1502 ret = venus_is_valid_state(hdev);
1503 mutex_unlock(&hdev->lock);
1504
1505 if (!ret) {
1506 dev_err(dev, "bad state, cannot suspend\n");
1507 return -EINVAL;
1508 }
1509
1510 ctrl_status = readl(cpu_cs_base + CPU_CS_SCIACMDARG0);
1511 if (ctrl_status & CPU_CS_SCIACMDARG0_PC_READY)
1512 goto power_off;
1513
1514 /*
1515 * Power collapse sequence for Venus 3xx and 4xx versions:
1516 * 1. Check for ARM9 and video core to be idle by checking WFI bit
1517 * (bit 0) in CPU status register and by checking Idle (bit 30) in
1518 * Control status register for video core.
1519 * 2. Send a command to prepare for power collapse.
1520 * 3. Check for WFI and PC_READY bits.
1521 */
1522 ret = readx_poll_timeout(venus_cpu_and_video_core_idle, hdev, val, val,
1523 1500, 100 * 1500);
1524 if (ret)
1525 return ret;
1526
1527 ret = venus_prepare_power_collapse(hdev, false);
1528 if (ret) {
1529 dev_err(dev, "prepare for power collapse fail (%d)\n", ret);
1530 return ret;
1531 }
1532
1533 ret = readx_poll_timeout(venus_cpu_idle_and_pc_ready, hdev, val, val,
1534 1500, 100 * 1500);
1535 if (ret)
1536 return ret;
1537
1538 power_off:
1539 mutex_lock(&hdev->lock);
1540
1541 ret = venus_power_off(hdev);
1542 if (ret) {
1543 dev_err(dev, "venus_power_off (%d)\n", ret);
1544 mutex_unlock(&hdev->lock);
1545 return ret;
1546 }
1547
1548 hdev->suspended = true;
1549
1550 mutex_unlock(&hdev->lock);
1551
1552 return 0;
1553 }
1554
venus_suspend(struct venus_core * core)1555 static int venus_suspend(struct venus_core *core)
1556 {
1557 if (IS_V3(core) || IS_V4(core))
1558 return venus_suspend_3xx(core);
1559
1560 return venus_suspend_1xx(core);
1561 }
1562
1563 static const struct hfi_ops venus_hfi_ops = {
1564 .core_init = venus_core_init,
1565 .core_deinit = venus_core_deinit,
1566 .core_ping = venus_core_ping,
1567 .core_trigger_ssr = venus_core_trigger_ssr,
1568
1569 .session_init = venus_session_init,
1570 .session_end = venus_session_end,
1571 .session_abort = venus_session_abort,
1572 .session_flush = venus_session_flush,
1573 .session_start = venus_session_start,
1574 .session_stop = venus_session_stop,
1575 .session_continue = venus_session_continue,
1576 .session_etb = venus_session_etb,
1577 .session_ftb = venus_session_ftb,
1578 .session_set_buffers = venus_session_set_buffers,
1579 .session_unset_buffers = venus_session_unset_buffers,
1580 .session_load_res = venus_session_load_res,
1581 .session_release_res = venus_session_release_res,
1582 .session_parse_seq_hdr = venus_session_parse_seq_hdr,
1583 .session_get_seq_hdr = venus_session_get_seq_hdr,
1584 .session_set_property = venus_session_set_property,
1585 .session_get_property = venus_session_get_property,
1586
1587 .resume = venus_resume,
1588 .suspend = venus_suspend,
1589
1590 .isr = venus_isr,
1591 .isr_thread = venus_isr_thread,
1592 };
1593
venus_hfi_destroy(struct venus_core * core)1594 void venus_hfi_destroy(struct venus_core *core)
1595 {
1596 struct venus_hfi_device *hdev = to_hfi_priv(core);
1597
1598 venus_interface_queues_release(hdev);
1599 mutex_destroy(&hdev->lock);
1600 kfree(hdev);
1601 core->priv = NULL;
1602 core->ops = NULL;
1603 }
1604
venus_hfi_create(struct venus_core * core)1605 int venus_hfi_create(struct venus_core *core)
1606 {
1607 struct venus_hfi_device *hdev;
1608 int ret;
1609
1610 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
1611 if (!hdev)
1612 return -ENOMEM;
1613
1614 mutex_init(&hdev->lock);
1615
1616 hdev->core = core;
1617 hdev->suspended = true;
1618 core->priv = hdev;
1619 core->ops = &venus_hfi_ops;
1620 core->core_caps = ENC_ROTATION_CAPABILITY | ENC_SCALING_CAPABILITY |
1621 ENC_DEINTERLACE_CAPABILITY |
1622 DEC_MULTI_STREAM_CAPABILITY;
1623
1624 ret = venus_interface_queues_init(hdev);
1625 if (ret)
1626 goto err_kfree;
1627
1628 return 0;
1629
1630 err_kfree:
1631 kfree(hdev);
1632 core->priv = NULL;
1633 core->ops = NULL;
1634 return ret;
1635 }
1636
venus_hfi_queues_reinit(struct venus_core * core)1637 void venus_hfi_queues_reinit(struct venus_core *core)
1638 {
1639 struct venus_hfi_device *hdev = to_hfi_priv(core);
1640 struct hfi_queue_table_header *tbl_hdr;
1641 struct iface_queue *queue;
1642 struct hfi_sfr *sfr;
1643 unsigned int i;
1644
1645 mutex_lock(&hdev->lock);
1646
1647 for (i = 0; i < IFACEQ_NUM; i++) {
1648 queue = &hdev->queues[i];
1649 queue->qhdr =
1650 IFACEQ_GET_QHDR_START_ADDR(hdev->ifaceq_table.kva, i);
1651
1652 venus_set_qhdr_defaults(queue->qhdr);
1653
1654 queue->qhdr->start_addr = queue->qmem.da;
1655
1656 if (i == IFACEQ_CMD_IDX)
1657 queue->qhdr->type |= HFI_HOST_TO_CTRL_CMD_Q;
1658 else if (i == IFACEQ_MSG_IDX)
1659 queue->qhdr->type |= HFI_CTRL_TO_HOST_MSG_Q;
1660 else if (i == IFACEQ_DBG_IDX)
1661 queue->qhdr->type |= HFI_CTRL_TO_HOST_DBG_Q;
1662 }
1663
1664 tbl_hdr = hdev->ifaceq_table.kva;
1665 tbl_hdr->version = 0;
1666 tbl_hdr->size = IFACEQ_TABLE_SIZE;
1667 tbl_hdr->qhdr0_offset = sizeof(struct hfi_queue_table_header);
1668 tbl_hdr->qhdr_size = sizeof(struct hfi_queue_header);
1669 tbl_hdr->num_q = IFACEQ_NUM;
1670 tbl_hdr->num_active_q = IFACEQ_NUM;
1671
1672 /*
1673 * Set receive request to zero on debug queue as there is no
1674 * need of interrupt from video hardware for debug messages
1675 */
1676 queue = &hdev->queues[IFACEQ_DBG_IDX];
1677 queue->qhdr->rx_req = 0;
1678
1679 sfr = hdev->sfr.kva;
1680 sfr->buf_size = ALIGNED_SFR_SIZE;
1681
1682 /* ensure table and queue header structs are settled in memory */
1683 wmb();
1684
1685 mutex_unlock(&hdev->lock);
1686 }
1687