Lines Matching +full:tx +full:- +full:mailbox +full:- +full:count
21 * SoC specific ring manager driver is implemented as a mailbox controller
29 * hardware devices for achieving high through-put.
32 * except submitting request to SBA hardware device via mailbox channels.
34 * mailbox channel provided by Broadcom SoC specific ring manager driver.
42 #include <linux/dma-mapping.h>
46 #include <linux/mailbox/brcm-message.h>
93 #define to_sba_request(tx) \ argument
94 container_of(tx, struct sba_request, tx)
121 struct dma_async_tx_descriptor tx; member
146 /* Maibox client and Mailbox channels */
209 spin_lock_irqsave(&sba->reqs_lock, flags); in sba_alloc_request()
210 list_for_each_entry(req, &sba->reqs_free_list, node) { in sba_alloc_request()
211 if (async_tx_test_ack(&req->tx)) { in sba_alloc_request()
212 list_move_tail(&req->node, &sba->reqs_alloc_list); in sba_alloc_request()
217 spin_unlock_irqrestore(&sba->reqs_lock, flags); in sba_alloc_request()
222 * mailbox channels hoping few active requests in sba_alloc_request()
226 mbox_client_peek_data(sba->mchan); in sba_alloc_request()
230 req->flags = SBA_REQUEST_STATE_ALLOCED; in sba_alloc_request()
231 req->first = req; in sba_alloc_request()
232 INIT_LIST_HEAD(&req->next); in sba_alloc_request()
233 atomic_set(&req->next_pending_count, 1); in sba_alloc_request()
235 dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan); in sba_alloc_request()
236 async_tx_ack(&req->tx); in sba_alloc_request()
241 /* Note: Must be called with sba->reqs_lock held */
245 lockdep_assert_held(&sba->reqs_lock); in _sba_pending_request()
246 req->flags &= ~SBA_REQUEST_STATE_MASK; in _sba_pending_request()
247 req->flags |= SBA_REQUEST_STATE_PENDING; in _sba_pending_request()
248 list_move_tail(&req->node, &sba->reqs_pending_list); in _sba_pending_request()
249 if (list_empty(&sba->reqs_active_list)) in _sba_pending_request()
250 sba->reqs_fence = false; in _sba_pending_request()
253 /* Note: Must be called with sba->reqs_lock held */
257 lockdep_assert_held(&sba->reqs_lock); in _sba_active_request()
258 if (list_empty(&sba->reqs_active_list)) in _sba_active_request()
259 sba->reqs_fence = false; in _sba_active_request()
260 if (sba->reqs_fence) in _sba_active_request()
262 req->flags &= ~SBA_REQUEST_STATE_MASK; in _sba_active_request()
263 req->flags |= SBA_REQUEST_STATE_ACTIVE; in _sba_active_request()
264 list_move_tail(&req->node, &sba->reqs_active_list); in _sba_active_request()
265 if (req->flags & SBA_REQUEST_FENCE) in _sba_active_request()
266 sba->reqs_fence = true; in _sba_active_request()
270 /* Note: Must be called with sba->reqs_lock held */
274 lockdep_assert_held(&sba->reqs_lock); in _sba_abort_request()
275 req->flags &= ~SBA_REQUEST_STATE_MASK; in _sba_abort_request()
276 req->flags |= SBA_REQUEST_STATE_ABORTED; in _sba_abort_request()
277 list_move_tail(&req->node, &sba->reqs_aborted_list); in _sba_abort_request()
278 if (list_empty(&sba->reqs_active_list)) in _sba_abort_request()
279 sba->reqs_fence = false; in _sba_abort_request()
282 /* Note: Must be called with sba->reqs_lock held */
286 lockdep_assert_held(&sba->reqs_lock); in _sba_free_request()
287 req->flags &= ~SBA_REQUEST_STATE_MASK; in _sba_free_request()
288 req->flags |= SBA_REQUEST_STATE_FREE; in _sba_free_request()
289 list_move_tail(&req->node, &sba->reqs_free_list); in _sba_free_request()
290 if (list_empty(&sba->reqs_active_list)) in _sba_free_request()
291 sba->reqs_fence = false; in _sba_free_request()
298 struct sba_device *sba = req->sba; in sba_free_chained_requests()
300 spin_lock_irqsave(&sba->reqs_lock, flags); in sba_free_chained_requests()
303 list_for_each_entry(nreq, &req->next, next) in sba_free_chained_requests()
306 spin_unlock_irqrestore(&sba->reqs_lock, flags); in sba_free_chained_requests()
313 struct sba_device *sba = req->sba; in sba_chain_request()
315 spin_lock_irqsave(&sba->reqs_lock, flags); in sba_chain_request()
317 list_add_tail(&req->next, &first->next); in sba_chain_request()
318 req->first = first; in sba_chain_request()
319 atomic_inc(&first->next_pending_count); in sba_chain_request()
321 spin_unlock_irqrestore(&sba->reqs_lock, flags); in sba_chain_request()
329 spin_lock_irqsave(&sba->reqs_lock, flags); in sba_cleanup_nonpending_requests()
332 list_for_each_entry_safe(req, req1, &sba->reqs_alloc_list, node) in sba_cleanup_nonpending_requests()
336 list_for_each_entry_safe(req, req1, &sba->reqs_active_list, node) in sba_cleanup_nonpending_requests()
344 spin_unlock_irqrestore(&sba->reqs_lock, flags); in sba_cleanup_nonpending_requests()
352 spin_lock_irqsave(&sba->reqs_lock, flags); in sba_cleanup_pending_requests()
355 list_for_each_entry_safe(req, req1, &sba->reqs_pending_list, node) in sba_cleanup_pending_requests()
358 spin_unlock_irqrestore(&sba->reqs_lock, flags); in sba_cleanup_pending_requests()
367 req->msg.error = 0; in sba_send_mbox_request()
368 ret = mbox_send_message(sba->mchan, &req->msg); in sba_send_mbox_request()
370 dev_err(sba->dev, "send message failed with error %d", ret); in sba_send_mbox_request()
374 /* Check error returned by mailbox controller */ in sba_send_mbox_request()
375 ret = req->msg.error; in sba_send_mbox_request()
377 dev_err(sba->dev, "message error %d", ret); in sba_send_mbox_request()
380 /* Signal txdone for mailbox channel */ in sba_send_mbox_request()
381 mbox_client_txdone(sba->mchan, ret); in sba_send_mbox_request()
386 /* Note: Must be called with sba->reqs_lock held */
390 u32 count; in _sba_process_pending_requests() local
394 count = SBA_MAX_MSG_SEND_PER_MBOX_CHANNEL; in _sba_process_pending_requests()
395 while (!list_empty(&sba->reqs_pending_list) && count) { in _sba_process_pending_requests()
397 req = list_first_entry(&sba->reqs_pending_list, in _sba_process_pending_requests()
404 /* Send request to mailbox channel */ in _sba_process_pending_requests()
411 count--; in _sba_process_pending_requests()
419 struct dma_async_tx_descriptor *tx; in sba_process_received_request() local
420 struct sba_request *nreq, *first = req->first; in sba_process_received_request()
423 if (!atomic_dec_return(&first->next_pending_count)) { in sba_process_received_request()
424 tx = &first->tx; in sba_process_received_request()
426 WARN_ON(tx->cookie < 0); in sba_process_received_request()
427 if (tx->cookie > 0) { in sba_process_received_request()
428 spin_lock_irqsave(&sba->reqs_lock, flags); in sba_process_received_request()
429 dma_cookie_complete(tx); in sba_process_received_request()
430 spin_unlock_irqrestore(&sba->reqs_lock, flags); in sba_process_received_request()
431 dmaengine_desc_get_callback_invoke(tx, NULL); in sba_process_received_request()
432 dma_descriptor_unmap(tx); in sba_process_received_request()
433 tx->callback = NULL; in sba_process_received_request()
434 tx->callback_result = NULL; in sba_process_received_request()
437 dma_run_dependencies(tx); in sba_process_received_request()
439 spin_lock_irqsave(&sba->reqs_lock, flags); in sba_process_received_request()
442 list_for_each_entry(nreq, &first->next, next) in sba_process_received_request()
444 INIT_LIST_HEAD(&first->next); in sba_process_received_request()
452 spin_unlock_irqrestore(&sba->reqs_lock, flags); in sba_process_received_request()
464 spin_lock_irqsave(&sba->reqs_lock, flags); in sba_write_stats_in_seqfile()
466 list_for_each_entry(req, &sba->reqs_free_list, node) in sba_write_stats_in_seqfile()
467 if (async_tx_test_ack(&req->tx)) in sba_write_stats_in_seqfile()
470 list_for_each_entry(req, &sba->reqs_alloc_list, node) in sba_write_stats_in_seqfile()
473 list_for_each_entry(req, &sba->reqs_pending_list, node) in sba_write_stats_in_seqfile()
476 list_for_each_entry(req, &sba->reqs_active_list, node) in sba_write_stats_in_seqfile()
479 list_for_each_entry(req, &sba->reqs_aborted_list, node) in sba_write_stats_in_seqfile()
482 spin_unlock_irqrestore(&sba->reqs_lock, flags); in sba_write_stats_in_seqfile()
484 seq_printf(file, "maximum requests = %d\n", sba->max_req); in sba_write_stats_in_seqfile()
497 * Channel resources are pre-alloced so we just free-up in sba_free_chan_resources()
498 * whatever we can so that we can re-use pre-alloced in sba_free_chan_resources()
518 spin_lock_irqsave(&sba->reqs_lock, flags); in sba_issue_pending()
520 spin_unlock_irqrestore(&sba->reqs_lock, flags); in sba_issue_pending()
523 static dma_cookie_t sba_tx_submit(struct dma_async_tx_descriptor *tx) in sba_tx_submit() argument
530 if (unlikely(!tx)) in sba_tx_submit()
531 return -EINVAL; in sba_tx_submit()
533 sba = to_sba_device(tx->chan); in sba_tx_submit()
534 req = to_sba_request(tx); in sba_tx_submit()
537 spin_lock_irqsave(&sba->reqs_lock, flags); in sba_tx_submit()
538 cookie = dma_cookie_assign(tx); in sba_tx_submit()
540 list_for_each_entry(nreq, &req->next, next) in sba_tx_submit()
542 spin_unlock_irqrestore(&sba->reqs_lock, flags); in sba_tx_submit()
558 mbox_client_peek_data(sba->mchan); in sba_tx_status()
569 dma_addr_t resp_dma = req->tx.phys; in sba_fillup_interrupt_msg()
572 /* Type-B command to load dummy data into buf0 */ in sba_fillup_interrupt_msg()
575 cmd = sba_cmd_enc(cmd, req->sba->hw_resp_size, in sba_fillup_interrupt_msg()
582 cmdsp->cmd = cmd; in sba_fillup_interrupt_msg()
583 *cmdsp->cmd_dma = cpu_to_le64(cmd); in sba_fillup_interrupt_msg()
584 cmdsp->flags = BRCM_SBA_CMD_TYPE_B; in sba_fillup_interrupt_msg()
585 cmdsp->data = resp_dma; in sba_fillup_interrupt_msg()
586 cmdsp->data_len = req->sba->hw_resp_size; in sba_fillup_interrupt_msg()
589 /* Type-A command to write buf0 to dummy location */ in sba_fillup_interrupt_msg()
592 cmd = sba_cmd_enc(cmd, req->sba->hw_resp_size, in sba_fillup_interrupt_msg()
601 cmdsp->cmd = cmd; in sba_fillup_interrupt_msg()
602 *cmdsp->cmd_dma = cpu_to_le64(cmd); in sba_fillup_interrupt_msg()
603 cmdsp->flags = BRCM_SBA_CMD_TYPE_A; in sba_fillup_interrupt_msg()
604 if (req->sba->hw_resp_size) { in sba_fillup_interrupt_msg()
605 cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; in sba_fillup_interrupt_msg()
606 cmdsp->resp = resp_dma; in sba_fillup_interrupt_msg()
607 cmdsp->resp_len = req->sba->hw_resp_size; in sba_fillup_interrupt_msg()
609 cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; in sba_fillup_interrupt_msg()
610 cmdsp->data = resp_dma; in sba_fillup_interrupt_msg()
611 cmdsp->data_len = req->sba->hw_resp_size; in sba_fillup_interrupt_msg()
615 msg->type = BRCM_MESSAGE_SBA; in sba_fillup_interrupt_msg()
616 msg->sba.cmds = cmds; in sba_fillup_interrupt_msg()
617 msg->sba.cmds_count = cmdsp - cmds; in sba_fillup_interrupt_msg()
618 msg->ctx = req; in sba_fillup_interrupt_msg()
619 msg->error = 0; in sba_fillup_interrupt_msg()
637 req->flags |= SBA_REQUEST_FENCE; in sba_prep_dma_interrupt()
640 sba_fillup_interrupt_msg(req, req->cmds, &req->msg); in sba_prep_dma_interrupt()
643 req->tx.flags = flags; in sba_prep_dma_interrupt()
644 req->tx.cookie = -EBUSY; in sba_prep_dma_interrupt()
646 return &req->tx; in sba_prep_dma_interrupt()
657 dma_addr_t resp_dma = req->tx.phys; in sba_fillup_memcpy_msg()
660 /* Type-B command to load data into buf0 */ in sba_fillup_memcpy_msg()
670 cmdsp->cmd = cmd; in sba_fillup_memcpy_msg()
671 *cmdsp->cmd_dma = cpu_to_le64(cmd); in sba_fillup_memcpy_msg()
672 cmdsp->flags = BRCM_SBA_CMD_TYPE_B; in sba_fillup_memcpy_msg()
673 cmdsp->data = src + msg_offset; in sba_fillup_memcpy_msg()
674 cmdsp->data_len = msg_len; in sba_fillup_memcpy_msg()
677 /* Type-A command to write buf0 */ in sba_fillup_memcpy_msg()
689 cmdsp->cmd = cmd; in sba_fillup_memcpy_msg()
690 *cmdsp->cmd_dma = cpu_to_le64(cmd); in sba_fillup_memcpy_msg()
691 cmdsp->flags = BRCM_SBA_CMD_TYPE_A; in sba_fillup_memcpy_msg()
692 if (req->sba->hw_resp_size) { in sba_fillup_memcpy_msg()
693 cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; in sba_fillup_memcpy_msg()
694 cmdsp->resp = resp_dma; in sba_fillup_memcpy_msg()
695 cmdsp->resp_len = req->sba->hw_resp_size; in sba_fillup_memcpy_msg()
697 cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; in sba_fillup_memcpy_msg()
698 cmdsp->data = dst + msg_offset; in sba_fillup_memcpy_msg()
699 cmdsp->data_len = msg_len; in sba_fillup_memcpy_msg()
703 msg->type = BRCM_MESSAGE_SBA; in sba_fillup_memcpy_msg()
704 msg->sba.cmds = cmds; in sba_fillup_memcpy_msg()
705 msg->sba.cmds_count = cmdsp - cmds; in sba_fillup_memcpy_msg()
706 msg->ctx = req; in sba_fillup_memcpy_msg()
707 msg->error = 0; in sba_fillup_memcpy_msg()
722 req->flags |= SBA_REQUEST_FENCE; in sba_prep_dma_memcpy_req()
725 sba_fillup_memcpy_msg(req, req->cmds, &req->msg, in sba_prep_dma_memcpy_req()
729 req->tx.flags = flags; in sba_prep_dma_memcpy_req()
730 req->tx.cookie = -EBUSY; in sba_prep_dma_memcpy_req()
746 req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size; in sba_prep_dma_memcpy()
762 len -= req_len; in sba_prep_dma_memcpy()
765 return (first) ? &first->tx : NULL; in sba_prep_dma_memcpy()
777 dma_addr_t resp_dma = req->tx.phys; in sba_fillup_xor_msg()
780 /* Type-B command to load data into buf0 */ in sba_fillup_xor_msg()
790 cmdsp->cmd = cmd; in sba_fillup_xor_msg()
791 *cmdsp->cmd_dma = cpu_to_le64(cmd); in sba_fillup_xor_msg()
792 cmdsp->flags = BRCM_SBA_CMD_TYPE_B; in sba_fillup_xor_msg()
793 cmdsp->data = src[0] + msg_offset; in sba_fillup_xor_msg()
794 cmdsp->data_len = msg_len; in sba_fillup_xor_msg()
797 /* Type-B commands to xor data with buf0 and put it back in buf0 */ in sba_fillup_xor_msg()
808 cmdsp->cmd = cmd; in sba_fillup_xor_msg()
809 *cmdsp->cmd_dma = cpu_to_le64(cmd); in sba_fillup_xor_msg()
810 cmdsp->flags = BRCM_SBA_CMD_TYPE_B; in sba_fillup_xor_msg()
811 cmdsp->data = src[i] + msg_offset; in sba_fillup_xor_msg()
812 cmdsp->data_len = msg_len; in sba_fillup_xor_msg()
816 /* Type-A command to write buf0 */ in sba_fillup_xor_msg()
828 cmdsp->cmd = cmd; in sba_fillup_xor_msg()
829 *cmdsp->cmd_dma = cpu_to_le64(cmd); in sba_fillup_xor_msg()
830 cmdsp->flags = BRCM_SBA_CMD_TYPE_A; in sba_fillup_xor_msg()
831 if (req->sba->hw_resp_size) { in sba_fillup_xor_msg()
832 cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; in sba_fillup_xor_msg()
833 cmdsp->resp = resp_dma; in sba_fillup_xor_msg()
834 cmdsp->resp_len = req->sba->hw_resp_size; in sba_fillup_xor_msg()
836 cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; in sba_fillup_xor_msg()
837 cmdsp->data = dst + msg_offset; in sba_fillup_xor_msg()
838 cmdsp->data_len = msg_len; in sba_fillup_xor_msg()
842 msg->type = BRCM_MESSAGE_SBA; in sba_fillup_xor_msg()
843 msg->sba.cmds = cmds; in sba_fillup_xor_msg()
844 msg->sba.cmds_count = cmdsp - cmds; in sba_fillup_xor_msg()
845 msg->ctx = req; in sba_fillup_xor_msg()
846 msg->error = 0; in sba_fillup_xor_msg()
861 req->flags |= SBA_REQUEST_FENCE; in sba_prep_dma_xor_req()
864 sba_fillup_xor_msg(req, req->cmds, &req->msg, in sba_prep_dma_xor_req()
868 req->tx.flags = flags; in sba_prep_dma_xor_req()
869 req->tx.cookie = -EBUSY; in sba_prep_dma_xor_req()
884 if (unlikely(src_cnt > sba->max_xor_srcs)) in sba_prep_dma_xor()
889 req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size; in sba_prep_dma_xor()
905 len -= req_len; in sba_prep_dma_xor()
908 return (first) ? &first->tx : NULL; in sba_prep_dma_xor()
922 dma_addr_t resp_dma = req->tx.phys; in sba_fillup_pq_msg()
926 /* Type-B command to load old P into buf0 */ in sba_fillup_pq_msg()
937 cmdsp->cmd = cmd; in sba_fillup_pq_msg()
938 *cmdsp->cmd_dma = cpu_to_le64(cmd); in sba_fillup_pq_msg()
939 cmdsp->flags = BRCM_SBA_CMD_TYPE_B; in sba_fillup_pq_msg()
940 cmdsp->data = *dst_p + msg_offset; in sba_fillup_pq_msg()
941 cmdsp->data_len = msg_len; in sba_fillup_pq_msg()
945 /* Type-B command to load old Q into buf1 */ in sba_fillup_pq_msg()
956 cmdsp->cmd = cmd; in sba_fillup_pq_msg()
957 *cmdsp->cmd_dma = cpu_to_le64(cmd); in sba_fillup_pq_msg()
958 cmdsp->flags = BRCM_SBA_CMD_TYPE_B; in sba_fillup_pq_msg()
959 cmdsp->data = *dst_q + msg_offset; in sba_fillup_pq_msg()
960 cmdsp->data_len = msg_len; in sba_fillup_pq_msg()
964 /* Type-A command to zero all buffers */ in sba_fillup_pq_msg()
971 cmdsp->cmd = cmd; in sba_fillup_pq_msg()
972 *cmdsp->cmd_dma = cpu_to_le64(cmd); in sba_fillup_pq_msg()
973 cmdsp->flags = BRCM_SBA_CMD_TYPE_A; in sba_fillup_pq_msg()
977 /* Type-B commands for generate P onto buf0 and Q onto buf1 */ in sba_fillup_pq_msg()
990 cmdsp->cmd = cmd; in sba_fillup_pq_msg()
991 *cmdsp->cmd_dma = cpu_to_le64(cmd); in sba_fillup_pq_msg()
992 cmdsp->flags = BRCM_SBA_CMD_TYPE_B; in sba_fillup_pq_msg()
993 cmdsp->data = src[i] + msg_offset; in sba_fillup_pq_msg()
994 cmdsp->data_len = msg_len; in sba_fillup_pq_msg()
998 /* Type-A command to write buf0 */ in sba_fillup_pq_msg()
1011 cmdsp->cmd = cmd; in sba_fillup_pq_msg()
1012 *cmdsp->cmd_dma = cpu_to_le64(cmd); in sba_fillup_pq_msg()
1013 cmdsp->flags = BRCM_SBA_CMD_TYPE_A; in sba_fillup_pq_msg()
1014 if (req->sba->hw_resp_size) { in sba_fillup_pq_msg()
1015 cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; in sba_fillup_pq_msg()
1016 cmdsp->resp = resp_dma; in sba_fillup_pq_msg()
1017 cmdsp->resp_len = req->sba->hw_resp_size; in sba_fillup_pq_msg()
1019 cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; in sba_fillup_pq_msg()
1020 cmdsp->data = *dst_p + msg_offset; in sba_fillup_pq_msg()
1021 cmdsp->data_len = msg_len; in sba_fillup_pq_msg()
1025 /* Type-A command to write buf1 */ in sba_fillup_pq_msg()
1038 cmdsp->cmd = cmd; in sba_fillup_pq_msg()
1039 *cmdsp->cmd_dma = cpu_to_le64(cmd); in sba_fillup_pq_msg()
1040 cmdsp->flags = BRCM_SBA_CMD_TYPE_A; in sba_fillup_pq_msg()
1041 if (req->sba->hw_resp_size) { in sba_fillup_pq_msg()
1042 cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; in sba_fillup_pq_msg()
1043 cmdsp->resp = resp_dma; in sba_fillup_pq_msg()
1044 cmdsp->resp_len = req->sba->hw_resp_size; in sba_fillup_pq_msg()
1046 cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; in sba_fillup_pq_msg()
1047 cmdsp->data = *dst_q + msg_offset; in sba_fillup_pq_msg()
1048 cmdsp->data_len = msg_len; in sba_fillup_pq_msg()
1053 msg->type = BRCM_MESSAGE_SBA; in sba_fillup_pq_msg()
1054 msg->sba.cmds = cmds; in sba_fillup_pq_msg()
1055 msg->sba.cmds_count = cmdsp - cmds; in sba_fillup_pq_msg()
1056 msg->ctx = req; in sba_fillup_pq_msg()
1057 msg->error = 0; in sba_fillup_pq_msg()
1072 req->flags |= SBA_REQUEST_FENCE; in sba_prep_dma_pq_req()
1076 req->cmds, &req->msg, in sba_prep_dma_pq_req()
1080 req->tx.flags = flags; in sba_prep_dma_pq_req()
1081 req->tx.cookie = -EBUSY; in sba_prep_dma_pq_req()
1097 dma_addr_t resp_dma = req->tx.phys; in sba_fillup_pq_single_msg()
1104 /* Type-B command to load old P into buf0 */ in sba_fillup_pq_single_msg()
1114 cmdsp->cmd = cmd; in sba_fillup_pq_single_msg()
1115 *cmdsp->cmd_dma = cpu_to_le64(cmd); in sba_fillup_pq_single_msg()
1116 cmdsp->flags = BRCM_SBA_CMD_TYPE_B; in sba_fillup_pq_single_msg()
1117 cmdsp->data = *dst_p + msg_offset; in sba_fillup_pq_single_msg()
1118 cmdsp->data_len = msg_len; in sba_fillup_pq_single_msg()
1122 * Type-B commands to xor data with buf0 and put it in sba_fillup_pq_single_msg()
1134 cmdsp->cmd = cmd; in sba_fillup_pq_single_msg()
1135 *cmdsp->cmd_dma = cpu_to_le64(cmd); in sba_fillup_pq_single_msg()
1136 cmdsp->flags = BRCM_SBA_CMD_TYPE_B; in sba_fillup_pq_single_msg()
1137 cmdsp->data = src + msg_offset; in sba_fillup_pq_single_msg()
1138 cmdsp->data_len = msg_len; in sba_fillup_pq_single_msg()
1141 /* Type-B command to load old P into buf0 */ in sba_fillup_pq_single_msg()
1151 cmdsp->cmd = cmd; in sba_fillup_pq_single_msg()
1152 *cmdsp->cmd_dma = cpu_to_le64(cmd); in sba_fillup_pq_single_msg()
1153 cmdsp->flags = BRCM_SBA_CMD_TYPE_B; in sba_fillup_pq_single_msg()
1154 cmdsp->data = src + msg_offset; in sba_fillup_pq_single_msg()
1155 cmdsp->data_len = msg_len; in sba_fillup_pq_single_msg()
1159 /* Type-A command to write buf0 */ in sba_fillup_pq_single_msg()
1171 cmdsp->cmd = cmd; in sba_fillup_pq_single_msg()
1172 *cmdsp->cmd_dma = cpu_to_le64(cmd); in sba_fillup_pq_single_msg()
1173 cmdsp->flags = BRCM_SBA_CMD_TYPE_A; in sba_fillup_pq_single_msg()
1174 if (req->sba->hw_resp_size) { in sba_fillup_pq_single_msg()
1175 cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; in sba_fillup_pq_single_msg()
1176 cmdsp->resp = resp_dma; in sba_fillup_pq_single_msg()
1177 cmdsp->resp_len = req->sba->hw_resp_size; in sba_fillup_pq_single_msg()
1179 cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; in sba_fillup_pq_single_msg()
1180 cmdsp->data = *dst_p + msg_offset; in sba_fillup_pq_single_msg()
1181 cmdsp->data_len = msg_len; in sba_fillup_pq_single_msg()
1188 /* Type-A command to zero all buffers */ in sba_fillup_pq_single_msg()
1195 cmdsp->cmd = cmd; in sba_fillup_pq_single_msg()
1196 *cmdsp->cmd_dma = cpu_to_le64(cmd); in sba_fillup_pq_single_msg()
1197 cmdsp->flags = BRCM_SBA_CMD_TYPE_A; in sba_fillup_pq_single_msg()
1202 pos = (dpos < req->sba->max_pq_coefs) ? in sba_fillup_pq_single_msg()
1203 dpos : (req->sba->max_pq_coefs - 1); in sba_fillup_pq_single_msg()
1206 * Type-B command to generate initial Q from data in sba_fillup_pq_single_msg()
1220 cmdsp->cmd = cmd; in sba_fillup_pq_single_msg()
1221 *cmdsp->cmd_dma = cpu_to_le64(cmd); in sba_fillup_pq_single_msg()
1222 cmdsp->flags = BRCM_SBA_CMD_TYPE_B; in sba_fillup_pq_single_msg()
1223 cmdsp->data = src + msg_offset; in sba_fillup_pq_single_msg()
1224 cmdsp->data_len = msg_len; in sba_fillup_pq_single_msg()
1227 dpos -= pos; in sba_fillup_pq_single_msg()
1229 /* Multiple Type-A command to generate final Q */ in sba_fillup_pq_single_msg()
1231 pos = (dpos < req->sba->max_pq_coefs) ? in sba_fillup_pq_single_msg()
1232 dpos : (req->sba->max_pq_coefs - 1); in sba_fillup_pq_single_msg()
1235 * Type-A command to generate Q with buf0 and in sba_fillup_pq_single_msg()
1249 cmdsp->cmd = cmd; in sba_fillup_pq_single_msg()
1250 *cmdsp->cmd_dma = cpu_to_le64(cmd); in sba_fillup_pq_single_msg()
1251 cmdsp->flags = BRCM_SBA_CMD_TYPE_A; in sba_fillup_pq_single_msg()
1254 dpos -= pos; in sba_fillup_pq_single_msg()
1260 * Type-B command to XOR previous output with in sba_fillup_pq_single_msg()
1272 cmdsp->cmd = cmd; in sba_fillup_pq_single_msg()
1273 *cmdsp->cmd_dma = cpu_to_le64(cmd); in sba_fillup_pq_single_msg()
1274 cmdsp->flags = BRCM_SBA_CMD_TYPE_B; in sba_fillup_pq_single_msg()
1275 cmdsp->data = *dst_q + msg_offset; in sba_fillup_pq_single_msg()
1276 cmdsp->data_len = msg_len; in sba_fillup_pq_single_msg()
1280 /* Type-A command to write buf0 */ in sba_fillup_pq_single_msg()
1292 cmdsp->cmd = cmd; in sba_fillup_pq_single_msg()
1293 *cmdsp->cmd_dma = cpu_to_le64(cmd); in sba_fillup_pq_single_msg()
1294 cmdsp->flags = BRCM_SBA_CMD_TYPE_A; in sba_fillup_pq_single_msg()
1295 if (req->sba->hw_resp_size) { in sba_fillup_pq_single_msg()
1296 cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; in sba_fillup_pq_single_msg()
1297 cmdsp->resp = resp_dma; in sba_fillup_pq_single_msg()
1298 cmdsp->resp_len = req->sba->hw_resp_size; in sba_fillup_pq_single_msg()
1300 cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; in sba_fillup_pq_single_msg()
1301 cmdsp->data = *dst_q + msg_offset; in sba_fillup_pq_single_msg()
1302 cmdsp->data_len = msg_len; in sba_fillup_pq_single_msg()
1307 msg->type = BRCM_MESSAGE_SBA; in sba_fillup_pq_single_msg()
1308 msg->sba.cmds = cmds; in sba_fillup_pq_single_msg()
1309 msg->sba.cmds_count = cmdsp - cmds; in sba_fillup_pq_single_msg()
1310 msg->ctx = req; in sba_fillup_pq_single_msg()
1311 msg->error = 0; in sba_fillup_pq_single_msg()
1327 req->flags |= SBA_REQUEST_FENCE; in sba_prep_dma_pq_single_req()
1331 req->cmds, &req->msg, off, len, in sba_prep_dma_pq_single_req()
1335 req->tx.flags = flags; in sba_prep_dma_pq_single_req()
1336 req->tx.cookie = -EBUSY; in sba_prep_dma_pq_single_req()
1354 if (unlikely(src_cnt > sba->max_pq_srcs)) in sba_prep_dma_pq()
1357 if (sba->max_pq_coefs <= raid6_gflog[scf[i]]) in sba_prep_dma_pq()
1360 /* Figure-out P and Q destination addresses */ in sba_prep_dma_pq()
1368 req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size; in sba_prep_dma_pq()
1429 len -= req_len; in sba_prep_dma_pq()
1432 return (first) ? &first->tx : NULL; in sba_prep_dma_pq()
1440 /* ====== Mailbox callbacks ===== */
1445 struct sba_request *req = m->ctx; in sba_receive_message()
1446 struct sba_device *sba = req->sba; in sba_receive_message()
1448 /* Error count if message has error */ in sba_receive_message()
1449 if (m->error < 0) in sba_receive_message()
1450 dev_err(sba->dev, "%s got message with error %d", in sba_receive_message()
1451 dma_chan_name(&sba->dma_chan), m->error); in sba_receive_message()
1461 struct sba_device *sba = dev_get_drvdata(file->private); in sba_debugfs_stats_show()
1476 sba->resp_base = dma_alloc_coherent(sba->mbox_dev, in sba_prealloc_channel_resources()
1477 sba->max_resp_pool_size, in sba_prealloc_channel_resources()
1478 &sba->resp_dma_base, GFP_KERNEL); in sba_prealloc_channel_resources()
1479 if (!sba->resp_base) in sba_prealloc_channel_resources()
1480 return -ENOMEM; in sba_prealloc_channel_resources()
1482 sba->cmds_base = dma_alloc_coherent(sba->mbox_dev, in sba_prealloc_channel_resources()
1483 sba->max_cmds_pool_size, in sba_prealloc_channel_resources()
1484 &sba->cmds_dma_base, GFP_KERNEL); in sba_prealloc_channel_resources()
1485 if (!sba->cmds_base) { in sba_prealloc_channel_resources()
1486 ret = -ENOMEM; in sba_prealloc_channel_resources()
1490 spin_lock_init(&sba->reqs_lock); in sba_prealloc_channel_resources()
1491 sba->reqs_fence = false; in sba_prealloc_channel_resources()
1492 INIT_LIST_HEAD(&sba->reqs_alloc_list); in sba_prealloc_channel_resources()
1493 INIT_LIST_HEAD(&sba->reqs_pending_list); in sba_prealloc_channel_resources()
1494 INIT_LIST_HEAD(&sba->reqs_active_list); in sba_prealloc_channel_resources()
1495 INIT_LIST_HEAD(&sba->reqs_aborted_list); in sba_prealloc_channel_resources()
1496 INIT_LIST_HEAD(&sba->reqs_free_list); in sba_prealloc_channel_resources()
1498 for (i = 0; i < sba->max_req; i++) { in sba_prealloc_channel_resources()
1499 req = devm_kzalloc(sba->dev, in sba_prealloc_channel_resources()
1500 struct_size(req, cmds, sba->max_cmd_per_req), in sba_prealloc_channel_resources()
1503 ret = -ENOMEM; in sba_prealloc_channel_resources()
1506 INIT_LIST_HEAD(&req->node); in sba_prealloc_channel_resources()
1507 req->sba = sba; in sba_prealloc_channel_resources()
1508 req->flags = SBA_REQUEST_STATE_FREE; in sba_prealloc_channel_resources()
1509 INIT_LIST_HEAD(&req->next); in sba_prealloc_channel_resources()
1510 atomic_set(&req->next_pending_count, 0); in sba_prealloc_channel_resources()
1511 for (j = 0; j < sba->max_cmd_per_req; j++) { in sba_prealloc_channel_resources()
1512 req->cmds[j].cmd = 0; in sba_prealloc_channel_resources()
1513 req->cmds[j].cmd_dma = sba->cmds_base + in sba_prealloc_channel_resources()
1514 (i * sba->max_cmd_per_req + j) * sizeof(u64); in sba_prealloc_channel_resources()
1515 req->cmds[j].cmd_dma_addr = sba->cmds_dma_base + in sba_prealloc_channel_resources()
1516 (i * sba->max_cmd_per_req + j) * sizeof(u64); in sba_prealloc_channel_resources()
1517 req->cmds[j].flags = 0; in sba_prealloc_channel_resources()
1519 memset(&req->msg, 0, sizeof(req->msg)); in sba_prealloc_channel_resources()
1520 dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan); in sba_prealloc_channel_resources()
1521 async_tx_ack(&req->tx); in sba_prealloc_channel_resources()
1522 req->tx.tx_submit = sba_tx_submit; in sba_prealloc_channel_resources()
1523 req->tx.phys = sba->resp_dma_base + i * sba->hw_resp_size; in sba_prealloc_channel_resources()
1524 list_add_tail(&req->node, &sba->reqs_free_list); in sba_prealloc_channel_resources()
1530 dma_free_coherent(sba->mbox_dev, in sba_prealloc_channel_resources()
1531 sba->max_cmds_pool_size, in sba_prealloc_channel_resources()
1532 sba->cmds_base, sba->cmds_dma_base); in sba_prealloc_channel_resources()
1534 dma_free_coherent(sba->mbox_dev, in sba_prealloc_channel_resources()
1535 sba->max_resp_pool_size, in sba_prealloc_channel_resources()
1536 sba->resp_base, sba->resp_dma_base); in sba_prealloc_channel_resources()
1542 dmaengine_terminate_all(&sba->dma_chan); in sba_freeup_channel_resources()
1543 dma_free_coherent(sba->mbox_dev, sba->max_cmds_pool_size, in sba_freeup_channel_resources()
1544 sba->cmds_base, sba->cmds_dma_base); in sba_freeup_channel_resources()
1545 dma_free_coherent(sba->mbox_dev, sba->max_resp_pool_size, in sba_freeup_channel_resources()
1546 sba->resp_base, sba->resp_dma_base); in sba_freeup_channel_resources()
1547 sba->resp_base = NULL; in sba_freeup_channel_resources()
1548 sba->resp_dma_base = 0; in sba_freeup_channel_resources()
1554 struct dma_device *dma_dev = &sba->dma_dev; in sba_async_register()
1557 sba->dma_chan.device = dma_dev; in sba_async_register()
1558 dma_cookie_init(&sba->dma_chan); in sba_async_register()
1561 dma_cap_zero(dma_dev->cap_mask); in sba_async_register()
1562 dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask); in sba_async_register()
1563 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); in sba_async_register()
1564 dma_cap_set(DMA_XOR, dma_dev->cap_mask); in sba_async_register()
1565 dma_cap_set(DMA_PQ, dma_dev->cap_mask); in sba_async_register()
1568 * Set mailbox channel device as the base device of in sba_async_register()
1570 * will be done by mailbox controller in sba_async_register()
1572 dma_dev->dev = sba->mbox_dev; in sba_async_register()
1575 dma_dev->device_free_chan_resources = sba_free_chan_resources; in sba_async_register()
1576 dma_dev->device_terminate_all = sba_device_terminate_all; in sba_async_register()
1577 dma_dev->device_issue_pending = sba_issue_pending; in sba_async_register()
1578 dma_dev->device_tx_status = sba_tx_status; in sba_async_register()
1581 if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask)) in sba_async_register()
1582 dma_dev->device_prep_dma_interrupt = sba_prep_dma_interrupt; in sba_async_register()
1585 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) in sba_async_register()
1586 dma_dev->device_prep_dma_memcpy = sba_prep_dma_memcpy; in sba_async_register()
1589 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { in sba_async_register()
1590 dma_dev->device_prep_dma_xor = sba_prep_dma_xor; in sba_async_register()
1591 dma_dev->max_xor = sba->max_xor_srcs; in sba_async_register()
1595 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) { in sba_async_register()
1596 dma_dev->device_prep_dma_pq = sba_prep_dma_pq; in sba_async_register()
1597 dma_set_maxpq(dma_dev, sba->max_pq_srcs, 0); in sba_async_register()
1601 INIT_LIST_HEAD(&dma_dev->channels); in sba_async_register()
1602 list_add_tail(&sba->dma_chan.device_node, &dma_dev->channels); in sba_async_register()
1607 dev_err(sba->dev, "async device register error %d", ret); in sba_async_register()
1611 dev_info(sba->dev, "%s capabilities: %s%s%s%s\n", in sba_async_register()
1612 dma_chan_name(&sba->dma_chan), in sba_async_register()
1613 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "interrupt " : "", in sba_async_register()
1614 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "memcpy " : "", in sba_async_register()
1615 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", in sba_async_register()
1616 dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "pq " : ""); in sba_async_register()
1629 sba = devm_kzalloc(&pdev->dev, sizeof(*sba), GFP_KERNEL); in sba_probe()
1631 return -ENOMEM; in sba_probe()
1633 sba->dev = &pdev->dev; in sba_probe()
1636 /* Number of mailbox channels should be atleast 1 */ in sba_probe()
1637 ret = of_count_phandle_with_args(pdev->dev.of_node, in sba_probe()
1638 "mboxes", "#mbox-cells"); in sba_probe()
1640 return -ENODEV; in sba_probe()
1643 if (of_device_is_compatible(sba->dev->of_node, "brcm,iproc-sba")) in sba_probe()
1644 sba->ver = SBA_VER_1; in sba_probe()
1645 else if (of_device_is_compatible(sba->dev->of_node, in sba_probe()
1646 "brcm,iproc-sba-v2")) in sba_probe()
1647 sba->ver = SBA_VER_2; in sba_probe()
1649 return -ENODEV; in sba_probe()
1652 switch (sba->ver) { in sba_probe()
1654 sba->hw_buf_size = 4096; in sba_probe()
1655 sba->hw_resp_size = 8; in sba_probe()
1656 sba->max_pq_coefs = 6; in sba_probe()
1657 sba->max_pq_srcs = 6; in sba_probe()
1660 sba->hw_buf_size = 4096; in sba_probe()
1661 sba->hw_resp_size = 8; in sba_probe()
1662 sba->max_pq_coefs = 30; in sba_probe()
1668 sba->max_pq_srcs = 12; in sba_probe()
1671 return -EINVAL; in sba_probe()
1673 sba->max_req = SBA_MAX_REQ_PER_MBOX_CHANNEL; in sba_probe()
1674 sba->max_cmd_per_req = sba->max_pq_srcs + 3; in sba_probe()
1675 sba->max_xor_srcs = sba->max_cmd_per_req - 1; in sba_probe()
1676 sba->max_resp_pool_size = sba->max_req * sba->hw_resp_size; in sba_probe()
1677 sba->max_cmds_pool_size = sba->max_req * in sba_probe()
1678 sba->max_cmd_per_req * sizeof(u64); in sba_probe()
1680 /* Setup mailbox client */ in sba_probe()
1681 sba->client.dev = &pdev->dev; in sba_probe()
1682 sba->client.rx_callback = sba_receive_message; in sba_probe()
1683 sba->client.tx_block = false; in sba_probe()
1684 sba->client.knows_txdone = true; in sba_probe()
1685 sba->client.tx_tout = 0; in sba_probe()
1687 /* Request mailbox channel */ in sba_probe()
1688 sba->mchan = mbox_request_channel(&sba->client, 0); in sba_probe()
1689 if (IS_ERR(sba->mchan)) { in sba_probe()
1690 ret = PTR_ERR(sba->mchan); in sba_probe()
1694 /* Find-out underlying mailbox device */ in sba_probe()
1695 ret = of_parse_phandle_with_args(pdev->dev.of_node, in sba_probe()
1696 "mboxes", "#mbox-cells", 0, &args); in sba_probe()
1702 ret = -ENODEV; in sba_probe()
1705 sba->mbox_dev = &mbox_pdev->dev; in sba_probe()
1717 sba->root = debugfs_create_dir(dev_name(sba->dev), NULL); in sba_probe()
1720 debugfs_create_devm_seqfile(sba->dev, "stats", sba->root, in sba_probe()
1731 dev_info(sba->dev, "%s using SBAv%d mailbox channel from %s", in sba_probe()
1732 dma_chan_name(&sba->dma_chan), sba->ver+1, in sba_probe()
1733 dev_name(sba->mbox_dev)); in sba_probe()
1738 debugfs_remove_recursive(sba->root); in sba_probe()
1741 mbox_free_channel(sba->mchan); in sba_probe()
1749 dma_async_device_unregister(&sba->dma_dev); in sba_remove()
1751 debugfs_remove_recursive(sba->root); in sba_remove()
1755 mbox_free_channel(sba->mchan); in sba_remove()
1761 { .compatible = "brcm,iproc-sba", },
1762 { .compatible = "brcm,iproc-sba-v2", },
1771 .name = "bcm-sba-raid",