Lines Matching +full:gce +full:- +full:mailbox
1 // SPDX-License-Identifier: GPL-2.0
7 #include <linux/clk-provider.h>
8 #include <linux/dma-mapping.h>
17 #include <linux/mailbox/mtk-cmdq-mailbox.h>
21 #define CMDQ_NUM_CMD(t) (t->cmd_buf_size / CMDQ_INST_SIZE)
66 struct cmdq_pkt *pkt; /* the packet sent from mailbox client */
88 struct cmdq *cmdq = container_of(chan->mbox, struct cmdq, mbox); in cmdq_get_shift_pa()
90 return cmdq->shift_pa; in cmdq_get_shift_pa()
98 writel(CMDQ_THR_SUSPEND, thread->base + CMDQ_THR_SUSPEND_TASK); in cmdq_thread_suspend()
101 if (!(readl(thread->base + CMDQ_THR_ENABLE_TASK) & CMDQ_THR_ENABLED)) in cmdq_thread_suspend()
104 if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_CURR_STATUS, in cmdq_thread_suspend()
106 dev_err(cmdq->mbox.dev, "suspend GCE thread 0x%x failed\n", in cmdq_thread_suspend()
107 (u32)(thread->base - cmdq->base)); in cmdq_thread_suspend()
108 return -EFAULT; in cmdq_thread_suspend()
116 writel(CMDQ_THR_RESUME, thread->base + CMDQ_THR_SUSPEND_TASK); in cmdq_thread_resume()
123 WARN_ON(clk_enable(cmdq->clock) < 0); in cmdq_init()
124 writel(CMDQ_THR_ACTIVE_SLOT_CYCLES, cmdq->base + CMDQ_THR_SLOT_CYCLES); in cmdq_init()
126 writel(i, cmdq->base + CMDQ_SYNC_TOKEN_UPDATE); in cmdq_init()
127 clk_disable(cmdq->clock); in cmdq_init()
134 writel(CMDQ_THR_DO_WARM_RESET, thread->base + CMDQ_THR_WARM_RESET); in cmdq_thread_reset()
135 if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_WARM_RESET, in cmdq_thread_reset()
138 dev_err(cmdq->mbox.dev, "reset GCE thread 0x%x failed\n", in cmdq_thread_reset()
139 (u32)(thread->base - cmdq->base)); in cmdq_thread_reset()
140 return -EFAULT; in cmdq_thread_reset()
149 writel(CMDQ_THR_DISABLED, thread->base + CMDQ_THR_ENABLE_TASK); in cmdq_thread_disable()
152 /* notify GCE to re-fetch commands by setting GCE thread PC */
155 writel(readl(thread->base + CMDQ_THR_CURR_ADDR), in cmdq_thread_invalidate_fetched_data()
156 thread->base + CMDQ_THR_CURR_ADDR); in cmdq_thread_invalidate_fetched_data()
161 struct device *dev = task->cmdq->mbox.dev; in cmdq_task_insert_into_thread()
162 struct cmdq_thread *thread = task->thread; in cmdq_task_insert_into_thread()
164 &thread->task_busy_list, typeof(*task), list_entry); in cmdq_task_insert_into_thread()
165 u64 *prev_task_base = prev_task->pkt->va_base; in cmdq_task_insert_into_thread()
168 dma_sync_single_for_cpu(dev, prev_task->pa_base, in cmdq_task_insert_into_thread()
169 prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE); in cmdq_task_insert_into_thread()
170 prev_task_base[CMDQ_NUM_CMD(prev_task->pkt) - 1] = in cmdq_task_insert_into_thread()
172 (task->pa_base >> task->cmdq->shift_pa); in cmdq_task_insert_into_thread()
173 dma_sync_single_for_device(dev, prev_task->pa_base, in cmdq_task_insert_into_thread()
174 prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE); in cmdq_task_insert_into_thread()
181 return readl(thread->base + CMDQ_THR_WAIT_TOKEN) & CMDQ_THR_IS_WAITING; in cmdq_thread_is_in_wfe()
186 struct cmdq_task_cb *cb = &task->pkt->async_cb; in cmdq_task_exec_done()
189 WARN_ON(cb->cb == (cmdq_async_flush_cb)NULL); in cmdq_task_exec_done()
191 data.data = cb->data; in cmdq_task_exec_done()
192 cb->cb(data); in cmdq_task_exec_done()
194 list_del(&task->list_entry); in cmdq_task_exec_done()
199 struct cmdq_thread *thread = task->thread; in cmdq_task_handle_error()
201 struct cmdq *cmdq = task->cmdq; in cmdq_task_handle_error()
203 dev_err(cmdq->mbox.dev, "task 0x%p error\n", task); in cmdq_task_handle_error()
205 next_task = list_first_entry_or_null(&thread->task_busy_list, in cmdq_task_handle_error()
208 writel(next_task->pa_base >> cmdq->shift_pa, in cmdq_task_handle_error()
209 thread->base + CMDQ_THR_CURR_ADDR); in cmdq_task_handle_error()
220 irq_flag = readl(thread->base + CMDQ_THR_IRQ_STATUS); in cmdq_thread_irq_handler()
221 writel(~irq_flag, thread->base + CMDQ_THR_IRQ_STATUS); in cmdq_thread_irq_handler()
226 * reset / disable this GCE thread, so we need to check the enable in cmdq_thread_irq_handler()
227 * bit of this GCE thread. in cmdq_thread_irq_handler()
229 if (!(readl(thread->base + CMDQ_THR_ENABLE_TASK) & CMDQ_THR_ENABLED)) in cmdq_thread_irq_handler()
239 curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR) << cmdq->shift_pa; in cmdq_thread_irq_handler()
241 list_for_each_entry_safe(task, tmp, &thread->task_busy_list, in cmdq_thread_irq_handler()
243 task_end_pa = task->pa_base + task->pkt->cmd_buf_size; in cmdq_thread_irq_handler()
244 if (curr_pa >= task->pa_base && curr_pa < task_end_pa) in cmdq_thread_irq_handler()
247 if (!curr_task || curr_pa == task_end_pa - CMDQ_INST_SIZE) { in cmdq_thread_irq_handler()
260 if (list_empty(&thread->task_busy_list)) { in cmdq_thread_irq_handler()
262 clk_disable(cmdq->clock); in cmdq_thread_irq_handler()
272 irq_status = readl(cmdq->base + CMDQ_CURR_IRQ_STATUS) & cmdq->irq_mask; in cmdq_irq_handler()
273 if (!(irq_status ^ cmdq->irq_mask)) in cmdq_irq_handler()
276 for_each_clear_bit(bit, &irq_status, cmdq->thread_nr) { in cmdq_irq_handler()
277 struct cmdq_thread *thread = &cmdq->thread[bit]; in cmdq_irq_handler()
279 spin_lock_irqsave(&thread->chan->lock, flags); in cmdq_irq_handler()
281 spin_unlock_irqrestore(&thread->chan->lock, flags); in cmdq_irq_handler()
294 cmdq->suspended = true; in cmdq_suspend()
296 for (i = 0; i < cmdq->thread_nr; i++) { in cmdq_suspend()
297 thread = &cmdq->thread[i]; in cmdq_suspend()
298 if (!list_empty(&thread->task_busy_list)) { in cmdq_suspend()
307 clk_unprepare(cmdq->clock); in cmdq_suspend()
316 WARN_ON(clk_prepare(cmdq->clock) < 0); in cmdq_resume()
317 cmdq->suspended = false; in cmdq_resume()
325 clk_unprepare(cmdq->clock); in cmdq_remove()
333 struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv; in cmdq_mbox_send_data()
334 struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev); in cmdq_mbox_send_data()
339 WARN_ON(cmdq->suspended); in cmdq_mbox_send_data()
343 return -ENOMEM; in cmdq_mbox_send_data()
345 task->cmdq = cmdq; in cmdq_mbox_send_data()
346 INIT_LIST_HEAD(&task->list_entry); in cmdq_mbox_send_data()
347 task->pa_base = pkt->pa_base; in cmdq_mbox_send_data()
348 task->thread = thread; in cmdq_mbox_send_data()
349 task->pkt = pkt; in cmdq_mbox_send_data()
351 if (list_empty(&thread->task_busy_list)) { in cmdq_mbox_send_data()
352 WARN_ON(clk_enable(cmdq->clock) < 0); in cmdq_mbox_send_data()
361 writel(task->pa_base >> cmdq->shift_pa, in cmdq_mbox_send_data()
362 thread->base + CMDQ_THR_CURR_ADDR); in cmdq_mbox_send_data()
363 writel((task->pa_base + pkt->cmd_buf_size) >> cmdq->shift_pa, in cmdq_mbox_send_data()
364 thread->base + CMDQ_THR_END_ADDR); in cmdq_mbox_send_data()
366 writel(thread->priority, thread->base + CMDQ_THR_PRIORITY); in cmdq_mbox_send_data()
367 writel(CMDQ_THR_IRQ_EN, thread->base + CMDQ_THR_IRQ_ENABLE); in cmdq_mbox_send_data()
368 writel(CMDQ_THR_ENABLED, thread->base + CMDQ_THR_ENABLE_TASK); in cmdq_mbox_send_data()
371 curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR) << in cmdq_mbox_send_data()
372 cmdq->shift_pa; in cmdq_mbox_send_data()
373 end_pa = readl(thread->base + CMDQ_THR_END_ADDR) << in cmdq_mbox_send_data()
374 cmdq->shift_pa; in cmdq_mbox_send_data()
376 if (curr_pa == end_pa - CMDQ_INST_SIZE || in cmdq_mbox_send_data()
379 writel(task->pa_base >> cmdq->shift_pa, in cmdq_mbox_send_data()
380 thread->base + CMDQ_THR_CURR_ADDR); in cmdq_mbox_send_data()
385 writel((task->pa_base + pkt->cmd_buf_size) >> cmdq->shift_pa, in cmdq_mbox_send_data()
386 thread->base + CMDQ_THR_END_ADDR); in cmdq_mbox_send_data()
389 list_move_tail(&task->list_entry, &thread->task_busy_list); in cmdq_mbox_send_data()
401 struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv; in cmdq_mbox_shutdown()
402 struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev); in cmdq_mbox_shutdown()
406 spin_lock_irqsave(&thread->chan->lock, flags); in cmdq_mbox_shutdown()
407 if (list_empty(&thread->task_busy_list)) in cmdq_mbox_shutdown()
414 if (list_empty(&thread->task_busy_list)) in cmdq_mbox_shutdown()
417 list_for_each_entry_safe(task, tmp, &thread->task_busy_list, in cmdq_mbox_shutdown()
424 clk_disable(cmdq->clock); in cmdq_mbox_shutdown()
427 * The thread->task_busy_list empty means thread already disable. The in cmdq_mbox_shutdown()
432 spin_unlock_irqrestore(&thread->chan->lock, flags); in cmdq_mbox_shutdown()
437 struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv; in cmdq_mbox_flush()
440 struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev); in cmdq_mbox_flush()
445 spin_lock_irqsave(&thread->chan->lock, flags); in cmdq_mbox_flush()
446 if (list_empty(&thread->task_busy_list)) in cmdq_mbox_flush()
453 list_for_each_entry_safe(task, tmp, &thread->task_busy_list, in cmdq_mbox_flush()
455 cb = &task->pkt->async_cb; in cmdq_mbox_flush()
456 if (cb->cb) { in cmdq_mbox_flush()
458 data.data = cb->data; in cmdq_mbox_flush()
459 cb->cb(data); in cmdq_mbox_flush()
461 list_del(&task->list_entry); in cmdq_mbox_flush()
467 clk_disable(cmdq->clock); in cmdq_mbox_flush()
470 spin_unlock_irqrestore(&thread->chan->lock, flags); in cmdq_mbox_flush()
475 spin_unlock_irqrestore(&thread->chan->lock, flags); in cmdq_mbox_flush()
476 if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_ENABLE_TASK, in cmdq_mbox_flush()
478 dev_err(cmdq->mbox.dev, "Fail to wait GCE thread 0x%x done\n", in cmdq_mbox_flush()
479 (u32)(thread->base - cmdq->base)); in cmdq_mbox_flush()
481 return -EFAULT; in cmdq_mbox_flush()
496 int ind = sp->args[0]; in cmdq_xlate()
499 if (ind >= mbox->num_chans) in cmdq_xlate()
500 return ERR_PTR(-EINVAL); in cmdq_xlate()
502 thread = (struct cmdq_thread *)mbox->chans[ind].con_priv; in cmdq_xlate()
503 thread->priority = sp->args[1]; in cmdq_xlate()
504 thread->chan = &mbox->chans[ind]; in cmdq_xlate()
506 return &mbox->chans[ind]; in cmdq_xlate()
511 struct device *dev = &pdev->dev; in cmdq_probe()
519 return -ENOMEM; in cmdq_probe()
522 cmdq->base = devm_ioremap_resource(dev, res); in cmdq_probe()
523 if (IS_ERR(cmdq->base)) { in cmdq_probe()
524 dev_err(dev, "failed to ioremap gce\n"); in cmdq_probe()
525 return PTR_ERR(cmdq->base); in cmdq_probe()
528 cmdq->irq = platform_get_irq(pdev, 0); in cmdq_probe()
529 if (cmdq->irq < 0) in cmdq_probe()
530 return cmdq->irq; in cmdq_probe()
535 return -EINVAL; in cmdq_probe()
538 cmdq->thread_nr = plat_data->thread_nr; in cmdq_probe()
539 cmdq->shift_pa = plat_data->shift; in cmdq_probe()
540 cmdq->irq_mask = GENMASK(cmdq->thread_nr - 1, 0); in cmdq_probe()
541 err = devm_request_irq(dev, cmdq->irq, cmdq_irq_handler, IRQF_SHARED, in cmdq_probe()
549 dev, cmdq->base, cmdq->irq); in cmdq_probe()
551 cmdq->clock = devm_clk_get(dev, "gce"); in cmdq_probe()
552 if (IS_ERR(cmdq->clock)) { in cmdq_probe()
553 dev_err(dev, "failed to get gce clk\n"); in cmdq_probe()
554 return PTR_ERR(cmdq->clock); in cmdq_probe()
557 cmdq->mbox.dev = dev; in cmdq_probe()
558 cmdq->mbox.chans = devm_kcalloc(dev, cmdq->thread_nr, in cmdq_probe()
559 sizeof(*cmdq->mbox.chans), GFP_KERNEL); in cmdq_probe()
560 if (!cmdq->mbox.chans) in cmdq_probe()
561 return -ENOMEM; in cmdq_probe()
563 cmdq->mbox.num_chans = cmdq->thread_nr; in cmdq_probe()
564 cmdq->mbox.ops = &cmdq_mbox_chan_ops; in cmdq_probe()
565 cmdq->mbox.of_xlate = cmdq_xlate; in cmdq_probe()
568 cmdq->mbox.txdone_irq = false; in cmdq_probe()
569 cmdq->mbox.txdone_poll = false; in cmdq_probe()
571 cmdq->thread = devm_kcalloc(dev, cmdq->thread_nr, in cmdq_probe()
572 sizeof(*cmdq->thread), GFP_KERNEL); in cmdq_probe()
573 if (!cmdq->thread) in cmdq_probe()
574 return -ENOMEM; in cmdq_probe()
576 for (i = 0; i < cmdq->thread_nr; i++) { in cmdq_probe()
577 cmdq->thread[i].base = cmdq->base + CMDQ_THR_BASE + in cmdq_probe()
579 INIT_LIST_HEAD(&cmdq->thread[i].task_busy_list); in cmdq_probe()
580 cmdq->mbox.chans[i].con_priv = (void *)&cmdq->thread[i]; in cmdq_probe()
583 err = devm_mbox_controller_register(dev, &cmdq->mbox); in cmdq_probe()
585 dev_err(dev, "failed to register mailbox: %d\n", err); in cmdq_probe()
590 WARN_ON(clk_prepare(cmdq->clock) < 0); in cmdq_probe()
607 {.compatible = "mediatek,mt8173-gce", .data = (void *)&gce_plat_v2},
608 {.compatible = "mediatek,mt8183-gce", .data = (void *)&gce_plat_v3},
609 {.compatible = "mediatek,mt6779-gce", .data = (void *)&gce_plat_v4},