• Home
  • Raw
  • Download

Lines Matching refs:sba

113 	struct sba_device *sba;  member
203 static struct sba_request *sba_alloc_request(struct sba_device *sba) in sba_alloc_request() argument
209 spin_lock_irqsave(&sba->reqs_lock, flags); in sba_alloc_request()
210 list_for_each_entry(req, &sba->reqs_free_list, node) { in sba_alloc_request()
212 list_move_tail(&req->node, &sba->reqs_alloc_list); in sba_alloc_request()
217 spin_unlock_irqrestore(&sba->reqs_lock, flags); in sba_alloc_request()
226 mbox_client_peek_data(sba->mchan); in sba_alloc_request()
235 dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan); in sba_alloc_request()
242 static void _sba_pending_request(struct sba_device *sba, in _sba_pending_request() argument
245 lockdep_assert_held(&sba->reqs_lock); in _sba_pending_request()
248 list_move_tail(&req->node, &sba->reqs_pending_list); in _sba_pending_request()
249 if (list_empty(&sba->reqs_active_list)) in _sba_pending_request()
250 sba->reqs_fence = false; in _sba_pending_request()
254 static bool _sba_active_request(struct sba_device *sba, in _sba_active_request() argument
257 lockdep_assert_held(&sba->reqs_lock); in _sba_active_request()
258 if (list_empty(&sba->reqs_active_list)) in _sba_active_request()
259 sba->reqs_fence = false; in _sba_active_request()
260 if (sba->reqs_fence) in _sba_active_request()
264 list_move_tail(&req->node, &sba->reqs_active_list); in _sba_active_request()
266 sba->reqs_fence = true; in _sba_active_request()
271 static void _sba_abort_request(struct sba_device *sba, in _sba_abort_request() argument
274 lockdep_assert_held(&sba->reqs_lock); in _sba_abort_request()
277 list_move_tail(&req->node, &sba->reqs_aborted_list); in _sba_abort_request()
278 if (list_empty(&sba->reqs_active_list)) in _sba_abort_request()
279 sba->reqs_fence = false; in _sba_abort_request()
283 static void _sba_free_request(struct sba_device *sba, in _sba_free_request() argument
286 lockdep_assert_held(&sba->reqs_lock); in _sba_free_request()
289 list_move_tail(&req->node, &sba->reqs_free_list); in _sba_free_request()
290 if (list_empty(&sba->reqs_active_list)) in _sba_free_request()
291 sba->reqs_fence = false; in _sba_free_request()
298 struct sba_device *sba = req->sba; in sba_free_chained_requests() local
300 spin_lock_irqsave(&sba->reqs_lock, flags); in sba_free_chained_requests()
302 _sba_free_request(sba, req); in sba_free_chained_requests()
304 _sba_free_request(sba, nreq); in sba_free_chained_requests()
306 spin_unlock_irqrestore(&sba->reqs_lock, flags); in sba_free_chained_requests()
313 struct sba_device *sba = req->sba; in sba_chain_request() local
315 spin_lock_irqsave(&sba->reqs_lock, flags); in sba_chain_request()
321 spin_unlock_irqrestore(&sba->reqs_lock, flags); in sba_chain_request()
324 static void sba_cleanup_nonpending_requests(struct sba_device *sba) in sba_cleanup_nonpending_requests() argument
329 spin_lock_irqsave(&sba->reqs_lock, flags); in sba_cleanup_nonpending_requests()
332 list_for_each_entry_safe(req, req1, &sba->reqs_alloc_list, node) in sba_cleanup_nonpending_requests()
333 _sba_free_request(sba, req); in sba_cleanup_nonpending_requests()
336 list_for_each_entry_safe(req, req1, &sba->reqs_active_list, node) in sba_cleanup_nonpending_requests()
337 _sba_abort_request(sba, req); in sba_cleanup_nonpending_requests()
344 spin_unlock_irqrestore(&sba->reqs_lock, flags); in sba_cleanup_nonpending_requests()
347 static void sba_cleanup_pending_requests(struct sba_device *sba) in sba_cleanup_pending_requests() argument
352 spin_lock_irqsave(&sba->reqs_lock, flags); in sba_cleanup_pending_requests()
355 list_for_each_entry_safe(req, req1, &sba->reqs_pending_list, node) in sba_cleanup_pending_requests()
356 _sba_free_request(sba, req); in sba_cleanup_pending_requests()
358 spin_unlock_irqrestore(&sba->reqs_lock, flags); in sba_cleanup_pending_requests()
361 static int sba_send_mbox_request(struct sba_device *sba, in sba_send_mbox_request() argument
368 ret = mbox_send_message(sba->mchan, &req->msg); in sba_send_mbox_request()
370 dev_err(sba->dev, "send message failed with error %d", ret); in sba_send_mbox_request()
377 dev_err(sba->dev, "message error %d", ret); in sba_send_mbox_request()
381 mbox_client_txdone(sba->mchan, ret); in sba_send_mbox_request()
387 static void _sba_process_pending_requests(struct sba_device *sba) in _sba_process_pending_requests() argument
395 while (!list_empty(&sba->reqs_pending_list) && count) { in _sba_process_pending_requests()
397 req = list_first_entry(&sba->reqs_pending_list, in _sba_process_pending_requests()
401 if (!_sba_active_request(sba, req)) in _sba_process_pending_requests()
405 ret = sba_send_mbox_request(sba, req); in _sba_process_pending_requests()
407 _sba_pending_request(sba, req); in _sba_process_pending_requests()
415 static void sba_process_received_request(struct sba_device *sba, in sba_process_received_request() argument
428 spin_lock_irqsave(&sba->reqs_lock, flags); in sba_process_received_request()
430 spin_unlock_irqrestore(&sba->reqs_lock, flags); in sba_process_received_request()
439 spin_lock_irqsave(&sba->reqs_lock, flags); in sba_process_received_request()
443 _sba_free_request(sba, nreq); in sba_process_received_request()
447 _sba_free_request(sba, first); in sba_process_received_request()
450 _sba_process_pending_requests(sba); in sba_process_received_request()
452 spin_unlock_irqrestore(&sba->reqs_lock, flags); in sba_process_received_request()
456 static void sba_write_stats_in_seqfile(struct sba_device *sba, in sba_write_stats_in_seqfile() argument
464 spin_lock_irqsave(&sba->reqs_lock, flags); in sba_write_stats_in_seqfile()
466 list_for_each_entry(req, &sba->reqs_free_list, node) in sba_write_stats_in_seqfile()
470 list_for_each_entry(req, &sba->reqs_alloc_list, node) in sba_write_stats_in_seqfile()
473 list_for_each_entry(req, &sba->reqs_pending_list, node) in sba_write_stats_in_seqfile()
476 list_for_each_entry(req, &sba->reqs_active_list, node) in sba_write_stats_in_seqfile()
479 list_for_each_entry(req, &sba->reqs_aborted_list, node) in sba_write_stats_in_seqfile()
482 spin_unlock_irqrestore(&sba->reqs_lock, flags); in sba_write_stats_in_seqfile()
484 seq_printf(file, "maximum requests = %d\n", sba->max_req); in sba_write_stats_in_seqfile()
515 struct sba_device *sba = to_sba_device(dchan); in sba_issue_pending() local
518 spin_lock_irqsave(&sba->reqs_lock, flags); in sba_issue_pending()
519 _sba_process_pending_requests(sba); in sba_issue_pending()
520 spin_unlock_irqrestore(&sba->reqs_lock, flags); in sba_issue_pending()
527 struct sba_device *sba; in sba_tx_submit() local
533 sba = to_sba_device(tx->chan); in sba_tx_submit()
537 spin_lock_irqsave(&sba->reqs_lock, flags); in sba_tx_submit()
539 _sba_pending_request(sba, req); in sba_tx_submit()
541 _sba_pending_request(sba, nreq); in sba_tx_submit()
542 spin_unlock_irqrestore(&sba->reqs_lock, flags); in sba_tx_submit()
552 struct sba_device *sba = to_sba_device(dchan); in sba_tx_status() local
558 mbox_client_peek_data(sba->mchan); in sba_tx_status()
575 cmd = sba_cmd_enc(cmd, req->sba->hw_resp_size, in sba_fillup_interrupt_msg()
586 cmdsp->data_len = req->sba->hw_resp_size; in sba_fillup_interrupt_msg()
592 cmd = sba_cmd_enc(cmd, req->sba->hw_resp_size, in sba_fillup_interrupt_msg()
604 if (req->sba->hw_resp_size) { in sba_fillup_interrupt_msg()
607 cmdsp->resp_len = req->sba->hw_resp_size; in sba_fillup_interrupt_msg()
611 cmdsp->data_len = req->sba->hw_resp_size; in sba_fillup_interrupt_msg()
616 msg->sba.cmds = cmds; in sba_fillup_interrupt_msg()
617 msg->sba.cmds_count = cmdsp - cmds; in sba_fillup_interrupt_msg()
626 struct sba_device *sba = to_sba_device(dchan); in sba_prep_dma_interrupt() local
629 req = sba_alloc_request(sba); in sba_prep_dma_interrupt()
692 if (req->sba->hw_resp_size) { in sba_fillup_memcpy_msg()
695 cmdsp->resp_len = req->sba->hw_resp_size; in sba_fillup_memcpy_msg()
704 msg->sba.cmds = cmds; in sba_fillup_memcpy_msg()
705 msg->sba.cmds_count = cmdsp - cmds; in sba_fillup_memcpy_msg()
711 sba_prep_dma_memcpy_req(struct sba_device *sba, in sba_prep_dma_memcpy_req() argument
718 req = sba_alloc_request(sba); in sba_prep_dma_memcpy_req()
741 struct sba_device *sba = to_sba_device(dchan); in sba_prep_dma_memcpy() local
746 req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size; in sba_prep_dma_memcpy()
748 req = sba_prep_dma_memcpy_req(sba, off, dst, src, in sba_prep_dma_memcpy()
831 if (req->sba->hw_resp_size) { in sba_fillup_xor_msg()
834 cmdsp->resp_len = req->sba->hw_resp_size; in sba_fillup_xor_msg()
843 msg->sba.cmds = cmds; in sba_fillup_xor_msg()
844 msg->sba.cmds_count = cmdsp - cmds; in sba_fillup_xor_msg()
850 sba_prep_dma_xor_req(struct sba_device *sba, in sba_prep_dma_xor_req() argument
857 req = sba_alloc_request(sba); in sba_prep_dma_xor_req()
880 struct sba_device *sba = to_sba_device(dchan); in sba_prep_dma_xor() local
884 if (unlikely(src_cnt > sba->max_xor_srcs)) in sba_prep_dma_xor()
889 req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size; in sba_prep_dma_xor()
891 req = sba_prep_dma_xor_req(sba, off, dst, src, src_cnt, in sba_prep_dma_xor()
1014 if (req->sba->hw_resp_size) { in sba_fillup_pq_msg()
1017 cmdsp->resp_len = req->sba->hw_resp_size; in sba_fillup_pq_msg()
1041 if (req->sba->hw_resp_size) { in sba_fillup_pq_msg()
1044 cmdsp->resp_len = req->sba->hw_resp_size; in sba_fillup_pq_msg()
1054 msg->sba.cmds = cmds; in sba_fillup_pq_msg()
1055 msg->sba.cmds_count = cmdsp - cmds; in sba_fillup_pq_msg()
1061 sba_prep_dma_pq_req(struct sba_device *sba, dma_addr_t off, in sba_prep_dma_pq_req() argument
1068 req = sba_alloc_request(sba); in sba_prep_dma_pq_req()
1174 if (req->sba->hw_resp_size) { in sba_fillup_pq_single_msg()
1177 cmdsp->resp_len = req->sba->hw_resp_size; in sba_fillup_pq_single_msg()
1202 pos = (dpos < req->sba->max_pq_coefs) ? in sba_fillup_pq_single_msg()
1203 dpos : (req->sba->max_pq_coefs - 1); in sba_fillup_pq_single_msg()
1231 pos = (dpos < req->sba->max_pq_coefs) ? in sba_fillup_pq_single_msg()
1232 dpos : (req->sba->max_pq_coefs - 1); in sba_fillup_pq_single_msg()
1295 if (req->sba->hw_resp_size) { in sba_fillup_pq_single_msg()
1298 cmdsp->resp_len = req->sba->hw_resp_size; in sba_fillup_pq_single_msg()
1308 msg->sba.cmds = cmds; in sba_fillup_pq_single_msg()
1309 msg->sba.cmds_count = cmdsp - cmds; in sba_fillup_pq_single_msg()
1315 sba_prep_dma_pq_single_req(struct sba_device *sba, dma_addr_t off, in sba_prep_dma_pq_single_req() argument
1323 req = sba_alloc_request(sba); in sba_prep_dma_pq_single_req()
1350 struct sba_device *sba = to_sba_device(dchan); in sba_prep_dma_pq() local
1354 if (unlikely(src_cnt > sba->max_pq_srcs)) in sba_prep_dma_pq()
1357 if (sba->max_pq_coefs <= raid6_gflog[scf[i]]) in sba_prep_dma_pq()
1368 req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size; in sba_prep_dma_pq()
1384 req = sba_prep_dma_pq_single_req(sba, in sba_prep_dma_pq()
1402 req = sba_prep_dma_pq_single_req(sba, in sba_prep_dma_pq()
1416 req = sba_prep_dma_pq_req(sba, off, in sba_prep_dma_pq()
1446 struct sba_device *sba = req->sba; in sba_receive_message() local
1450 dev_err(sba->dev, "%s got message with error %d", in sba_receive_message()
1451 dma_chan_name(&sba->dma_chan), m->error); in sba_receive_message()
1454 sba_process_received_request(sba, req); in sba_receive_message()
1461 struct sba_device *sba = dev_get_drvdata(file->private); in sba_debugfs_stats_show() local
1464 sba_write_stats_in_seqfile(sba, file); in sba_debugfs_stats_show()
1471 static int sba_prealloc_channel_resources(struct sba_device *sba) in sba_prealloc_channel_resources() argument
1476 sba->resp_base = dma_alloc_coherent(sba->mbox_dev, in sba_prealloc_channel_resources()
1477 sba->max_resp_pool_size, in sba_prealloc_channel_resources()
1478 &sba->resp_dma_base, GFP_KERNEL); in sba_prealloc_channel_resources()
1479 if (!sba->resp_base) in sba_prealloc_channel_resources()
1482 sba->cmds_base = dma_alloc_coherent(sba->mbox_dev, in sba_prealloc_channel_resources()
1483 sba->max_cmds_pool_size, in sba_prealloc_channel_resources()
1484 &sba->cmds_dma_base, GFP_KERNEL); in sba_prealloc_channel_resources()
1485 if (!sba->cmds_base) { in sba_prealloc_channel_resources()
1490 spin_lock_init(&sba->reqs_lock); in sba_prealloc_channel_resources()
1491 sba->reqs_fence = false; in sba_prealloc_channel_resources()
1492 INIT_LIST_HEAD(&sba->reqs_alloc_list); in sba_prealloc_channel_resources()
1493 INIT_LIST_HEAD(&sba->reqs_pending_list); in sba_prealloc_channel_resources()
1494 INIT_LIST_HEAD(&sba->reqs_active_list); in sba_prealloc_channel_resources()
1495 INIT_LIST_HEAD(&sba->reqs_aborted_list); in sba_prealloc_channel_resources()
1496 INIT_LIST_HEAD(&sba->reqs_free_list); in sba_prealloc_channel_resources()
1498 for (i = 0; i < sba->max_req; i++) { in sba_prealloc_channel_resources()
1499 req = devm_kzalloc(sba->dev, in sba_prealloc_channel_resources()
1500 struct_size(req, cmds, sba->max_cmd_per_req), in sba_prealloc_channel_resources()
1507 req->sba = sba; in sba_prealloc_channel_resources()
1511 for (j = 0; j < sba->max_cmd_per_req; j++) { in sba_prealloc_channel_resources()
1513 req->cmds[j].cmd_dma = sba->cmds_base + in sba_prealloc_channel_resources()
1514 (i * sba->max_cmd_per_req + j) * sizeof(u64); in sba_prealloc_channel_resources()
1515 req->cmds[j].cmd_dma_addr = sba->cmds_dma_base + in sba_prealloc_channel_resources()
1516 (i * sba->max_cmd_per_req + j) * sizeof(u64); in sba_prealloc_channel_resources()
1520 dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan); in sba_prealloc_channel_resources()
1523 req->tx.phys = sba->resp_dma_base + i * sba->hw_resp_size; in sba_prealloc_channel_resources()
1524 list_add_tail(&req->node, &sba->reqs_free_list); in sba_prealloc_channel_resources()
1530 dma_free_coherent(sba->mbox_dev, in sba_prealloc_channel_resources()
1531 sba->max_cmds_pool_size, in sba_prealloc_channel_resources()
1532 sba->cmds_base, sba->cmds_dma_base); in sba_prealloc_channel_resources()
1534 dma_free_coherent(sba->mbox_dev, in sba_prealloc_channel_resources()
1535 sba->max_resp_pool_size, in sba_prealloc_channel_resources()
1536 sba->resp_base, sba->resp_dma_base); in sba_prealloc_channel_resources()
1540 static void sba_freeup_channel_resources(struct sba_device *sba) in sba_freeup_channel_resources() argument
1542 dmaengine_terminate_all(&sba->dma_chan); in sba_freeup_channel_resources()
1543 dma_free_coherent(sba->mbox_dev, sba->max_cmds_pool_size, in sba_freeup_channel_resources()
1544 sba->cmds_base, sba->cmds_dma_base); in sba_freeup_channel_resources()
1545 dma_free_coherent(sba->mbox_dev, sba->max_resp_pool_size, in sba_freeup_channel_resources()
1546 sba->resp_base, sba->resp_dma_base); in sba_freeup_channel_resources()
1547 sba->resp_base = NULL; in sba_freeup_channel_resources()
1548 sba->resp_dma_base = 0; in sba_freeup_channel_resources()
1551 static int sba_async_register(struct sba_device *sba) in sba_async_register() argument
1554 struct dma_device *dma_dev = &sba->dma_dev; in sba_async_register()
1557 sba->dma_chan.device = dma_dev; in sba_async_register()
1558 dma_cookie_init(&sba->dma_chan); in sba_async_register()
1572 dma_dev->dev = sba->mbox_dev; in sba_async_register()
1591 dma_dev->max_xor = sba->max_xor_srcs; in sba_async_register()
1597 dma_set_maxpq(dma_dev, sba->max_pq_srcs, 0); in sba_async_register()
1602 list_add_tail(&sba->dma_chan.device_node, &dma_dev->channels); in sba_async_register()
1607 dev_err(sba->dev, "async device register error %d", ret); in sba_async_register()
1611 dev_info(sba->dev, "%s capabilities: %s%s%s%s\n", in sba_async_register()
1612 dma_chan_name(&sba->dma_chan), in sba_async_register()
1624 struct sba_device *sba; in sba_probe() local
1629 sba = devm_kzalloc(&pdev->dev, sizeof(*sba), GFP_KERNEL); in sba_probe()
1630 if (!sba) in sba_probe()
1633 sba->dev = &pdev->dev; in sba_probe()
1634 platform_set_drvdata(pdev, sba); in sba_probe()
1643 if (of_device_is_compatible(sba->dev->of_node, "brcm,iproc-sba")) in sba_probe()
1644 sba->ver = SBA_VER_1; in sba_probe()
1645 else if (of_device_is_compatible(sba->dev->of_node, in sba_probe()
1647 sba->ver = SBA_VER_2; in sba_probe()
1652 switch (sba->ver) { in sba_probe()
1654 sba->hw_buf_size = 4096; in sba_probe()
1655 sba->hw_resp_size = 8; in sba_probe()
1656 sba->max_pq_coefs = 6; in sba_probe()
1657 sba->max_pq_srcs = 6; in sba_probe()
1660 sba->hw_buf_size = 4096; in sba_probe()
1661 sba->hw_resp_size = 8; in sba_probe()
1662 sba->max_pq_coefs = 30; in sba_probe()
1668 sba->max_pq_srcs = 12; in sba_probe()
1673 sba->max_req = SBA_MAX_REQ_PER_MBOX_CHANNEL; in sba_probe()
1674 sba->max_cmd_per_req = sba->max_pq_srcs + 3; in sba_probe()
1675 sba->max_xor_srcs = sba->max_cmd_per_req - 1; in sba_probe()
1676 sba->max_resp_pool_size = sba->max_req * sba->hw_resp_size; in sba_probe()
1677 sba->max_cmds_pool_size = sba->max_req * in sba_probe()
1678 sba->max_cmd_per_req * sizeof(u64); in sba_probe()
1681 sba->client.dev = &pdev->dev; in sba_probe()
1682 sba->client.rx_callback = sba_receive_message; in sba_probe()
1683 sba->client.tx_block = false; in sba_probe()
1684 sba->client.knows_txdone = true; in sba_probe()
1685 sba->client.tx_tout = 0; in sba_probe()
1688 sba->mchan = mbox_request_channel(&sba->client, 0); in sba_probe()
1689 if (IS_ERR(sba->mchan)) { in sba_probe()
1690 ret = PTR_ERR(sba->mchan); in sba_probe()
1705 sba->mbox_dev = &mbox_pdev->dev; in sba_probe()
1708 ret = sba_prealloc_channel_resources(sba); in sba_probe()
1717 sba->root = debugfs_create_dir(dev_name(sba->dev), NULL); in sba_probe()
1720 debugfs_create_devm_seqfile(sba->dev, "stats", sba->root, in sba_probe()
1726 ret = sba_async_register(sba); in sba_probe()
1731 dev_info(sba->dev, "%s using SBAv%d mailbox channel from %s", in sba_probe()
1732 dma_chan_name(&sba->dma_chan), sba->ver+1, in sba_probe()
1733 dev_name(sba->mbox_dev)); in sba_probe()
1738 debugfs_remove_recursive(sba->root); in sba_probe()
1739 sba_freeup_channel_resources(sba); in sba_probe()
1741 mbox_free_channel(sba->mchan); in sba_probe()
1747 struct sba_device *sba = platform_get_drvdata(pdev); in sba_remove() local
1749 dma_async_device_unregister(&sba->dma_dev); in sba_remove()
1751 debugfs_remove_recursive(sba->root); in sba_remove()
1753 sba_freeup_channel_resources(sba); in sba_remove()
1755 mbox_free_channel(sba->mchan); in sba_remove()