Lines Matching refs:sba
105 struct sba_device *sba; member
195 static struct sba_request *sba_alloc_request(struct sba_device *sba) in sba_alloc_request() argument
201 spin_lock_irqsave(&sba->reqs_lock, flags); in sba_alloc_request()
202 list_for_each_entry(req, &sba->reqs_free_list, node) { in sba_alloc_request()
204 list_move_tail(&req->node, &sba->reqs_alloc_list); in sba_alloc_request()
209 spin_unlock_irqrestore(&sba->reqs_lock, flags); in sba_alloc_request()
218 mbox_client_peek_data(sba->mchan); in sba_alloc_request()
227 dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan); in sba_alloc_request()
234 static void _sba_pending_request(struct sba_device *sba, in _sba_pending_request() argument
237 lockdep_assert_held(&sba->reqs_lock); in _sba_pending_request()
240 list_move_tail(&req->node, &sba->reqs_pending_list); in _sba_pending_request()
241 if (list_empty(&sba->reqs_active_list)) in _sba_pending_request()
242 sba->reqs_fence = false; in _sba_pending_request()
246 static bool _sba_active_request(struct sba_device *sba, in _sba_active_request() argument
249 lockdep_assert_held(&sba->reqs_lock); in _sba_active_request()
250 if (list_empty(&sba->reqs_active_list)) in _sba_active_request()
251 sba->reqs_fence = false; in _sba_active_request()
252 if (sba->reqs_fence) in _sba_active_request()
256 list_move_tail(&req->node, &sba->reqs_active_list); in _sba_active_request()
258 sba->reqs_fence = true; in _sba_active_request()
263 static void _sba_abort_request(struct sba_device *sba, in _sba_abort_request() argument
266 lockdep_assert_held(&sba->reqs_lock); in _sba_abort_request()
269 list_move_tail(&req->node, &sba->reqs_aborted_list); in _sba_abort_request()
270 if (list_empty(&sba->reqs_active_list)) in _sba_abort_request()
271 sba->reqs_fence = false; in _sba_abort_request()
275 static void _sba_free_request(struct sba_device *sba, in _sba_free_request() argument
278 lockdep_assert_held(&sba->reqs_lock); in _sba_free_request()
281 list_move_tail(&req->node, &sba->reqs_free_list); in _sba_free_request()
282 if (list_empty(&sba->reqs_active_list)) in _sba_free_request()
283 sba->reqs_fence = false; in _sba_free_request()
290 struct sba_device *sba = req->sba; in sba_free_chained_requests() local
292 spin_lock_irqsave(&sba->reqs_lock, flags); in sba_free_chained_requests()
294 _sba_free_request(sba, req); in sba_free_chained_requests()
296 _sba_free_request(sba, nreq); in sba_free_chained_requests()
298 spin_unlock_irqrestore(&sba->reqs_lock, flags); in sba_free_chained_requests()
305 struct sba_device *sba = req->sba; in sba_chain_request() local
307 spin_lock_irqsave(&sba->reqs_lock, flags); in sba_chain_request()
313 spin_unlock_irqrestore(&sba->reqs_lock, flags); in sba_chain_request()
316 static void sba_cleanup_nonpending_requests(struct sba_device *sba) in sba_cleanup_nonpending_requests() argument
321 spin_lock_irqsave(&sba->reqs_lock, flags); in sba_cleanup_nonpending_requests()
324 list_for_each_entry_safe(req, req1, &sba->reqs_alloc_list, node) in sba_cleanup_nonpending_requests()
325 _sba_free_request(sba, req); in sba_cleanup_nonpending_requests()
328 list_for_each_entry_safe(req, req1, &sba->reqs_active_list, node) in sba_cleanup_nonpending_requests()
329 _sba_abort_request(sba, req); in sba_cleanup_nonpending_requests()
336 spin_unlock_irqrestore(&sba->reqs_lock, flags); in sba_cleanup_nonpending_requests()
339 static void sba_cleanup_pending_requests(struct sba_device *sba) in sba_cleanup_pending_requests() argument
344 spin_lock_irqsave(&sba->reqs_lock, flags); in sba_cleanup_pending_requests()
347 list_for_each_entry_safe(req, req1, &sba->reqs_pending_list, node) in sba_cleanup_pending_requests()
348 _sba_free_request(sba, req); in sba_cleanup_pending_requests()
350 spin_unlock_irqrestore(&sba->reqs_lock, flags); in sba_cleanup_pending_requests()
353 static int sba_send_mbox_request(struct sba_device *sba, in sba_send_mbox_request() argument
360 ret = mbox_send_message(sba->mchan, &req->msg); in sba_send_mbox_request()
362 dev_err(sba->dev, "send message failed with error %d", ret); in sba_send_mbox_request()
369 dev_err(sba->dev, "message error %d", ret); in sba_send_mbox_request()
373 mbox_client_txdone(sba->mchan, ret); in sba_send_mbox_request()
379 static void _sba_process_pending_requests(struct sba_device *sba) in _sba_process_pending_requests() argument
387 while (!list_empty(&sba->reqs_pending_list) && count) { in _sba_process_pending_requests()
389 req = list_first_entry(&sba->reqs_pending_list, in _sba_process_pending_requests()
393 if (!_sba_active_request(sba, req)) in _sba_process_pending_requests()
397 ret = sba_send_mbox_request(sba, req); in _sba_process_pending_requests()
399 _sba_pending_request(sba, req); in _sba_process_pending_requests()
407 static void sba_process_received_request(struct sba_device *sba, in sba_process_received_request() argument
420 spin_lock_irqsave(&sba->reqs_lock, flags); in sba_process_received_request()
422 spin_unlock_irqrestore(&sba->reqs_lock, flags); in sba_process_received_request()
431 spin_lock_irqsave(&sba->reqs_lock, flags); in sba_process_received_request()
435 _sba_free_request(sba, nreq); in sba_process_received_request()
439 _sba_free_request(sba, first); in sba_process_received_request()
442 _sba_process_pending_requests(sba); in sba_process_received_request()
444 spin_unlock_irqrestore(&sba->reqs_lock, flags); in sba_process_received_request()
448 static void sba_write_stats_in_seqfile(struct sba_device *sba, in sba_write_stats_in_seqfile() argument
456 spin_lock_irqsave(&sba->reqs_lock, flags); in sba_write_stats_in_seqfile()
458 list_for_each_entry(req, &sba->reqs_free_list, node) in sba_write_stats_in_seqfile()
462 list_for_each_entry(req, &sba->reqs_alloc_list, node) in sba_write_stats_in_seqfile()
465 list_for_each_entry(req, &sba->reqs_pending_list, node) in sba_write_stats_in_seqfile()
468 list_for_each_entry(req, &sba->reqs_active_list, node) in sba_write_stats_in_seqfile()
471 list_for_each_entry(req, &sba->reqs_aborted_list, node) in sba_write_stats_in_seqfile()
474 spin_unlock_irqrestore(&sba->reqs_lock, flags); in sba_write_stats_in_seqfile()
476 seq_printf(file, "maximum requests = %d\n", sba->max_req); in sba_write_stats_in_seqfile()
507 struct sba_device *sba = to_sba_device(dchan); in sba_issue_pending() local
510 spin_lock_irqsave(&sba->reqs_lock, flags); in sba_issue_pending()
511 _sba_process_pending_requests(sba); in sba_issue_pending()
512 spin_unlock_irqrestore(&sba->reqs_lock, flags); in sba_issue_pending()
519 struct sba_device *sba; in sba_tx_submit() local
525 sba = to_sba_device(tx->chan); in sba_tx_submit()
529 spin_lock_irqsave(&sba->reqs_lock, flags); in sba_tx_submit()
531 _sba_pending_request(sba, req); in sba_tx_submit()
533 _sba_pending_request(sba, nreq); in sba_tx_submit()
534 spin_unlock_irqrestore(&sba->reqs_lock, flags); in sba_tx_submit()
544 struct sba_device *sba = to_sba_device(dchan); in sba_tx_status() local
550 mbox_client_peek_data(sba->mchan); in sba_tx_status()
567 cmd = sba_cmd_enc(cmd, req->sba->hw_resp_size, in sba_fillup_interrupt_msg()
578 cmdsp->data_len = req->sba->hw_resp_size; in sba_fillup_interrupt_msg()
584 cmd = sba_cmd_enc(cmd, req->sba->hw_resp_size, in sba_fillup_interrupt_msg()
596 if (req->sba->hw_resp_size) { in sba_fillup_interrupt_msg()
599 cmdsp->resp_len = req->sba->hw_resp_size; in sba_fillup_interrupt_msg()
603 cmdsp->data_len = req->sba->hw_resp_size; in sba_fillup_interrupt_msg()
608 msg->sba.cmds = cmds; in sba_fillup_interrupt_msg()
609 msg->sba.cmds_count = cmdsp - cmds; in sba_fillup_interrupt_msg()
618 struct sba_device *sba = to_sba_device(dchan); in sba_prep_dma_interrupt() local
621 req = sba_alloc_request(sba); in sba_prep_dma_interrupt()
684 if (req->sba->hw_resp_size) { in sba_fillup_memcpy_msg()
687 cmdsp->resp_len = req->sba->hw_resp_size; in sba_fillup_memcpy_msg()
696 msg->sba.cmds = cmds; in sba_fillup_memcpy_msg()
697 msg->sba.cmds_count = cmdsp - cmds; in sba_fillup_memcpy_msg()
703 sba_prep_dma_memcpy_req(struct sba_device *sba, in sba_prep_dma_memcpy_req() argument
710 req = sba_alloc_request(sba); in sba_prep_dma_memcpy_req()
733 struct sba_device *sba = to_sba_device(dchan); in sba_prep_dma_memcpy() local
738 req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size; in sba_prep_dma_memcpy()
740 req = sba_prep_dma_memcpy_req(sba, off, dst, src, in sba_prep_dma_memcpy()
823 if (req->sba->hw_resp_size) { in sba_fillup_xor_msg()
826 cmdsp->resp_len = req->sba->hw_resp_size; in sba_fillup_xor_msg()
835 msg->sba.cmds = cmds; in sba_fillup_xor_msg()
836 msg->sba.cmds_count = cmdsp - cmds; in sba_fillup_xor_msg()
842 sba_prep_dma_xor_req(struct sba_device *sba, in sba_prep_dma_xor_req() argument
849 req = sba_alloc_request(sba); in sba_prep_dma_xor_req()
872 struct sba_device *sba = to_sba_device(dchan); in sba_prep_dma_xor() local
876 if (unlikely(src_cnt > sba->max_xor_srcs)) in sba_prep_dma_xor()
881 req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size; in sba_prep_dma_xor()
883 req = sba_prep_dma_xor_req(sba, off, dst, src, src_cnt, in sba_prep_dma_xor()
1006 if (req->sba->hw_resp_size) { in sba_fillup_pq_msg()
1009 cmdsp->resp_len = req->sba->hw_resp_size; in sba_fillup_pq_msg()
1033 if (req->sba->hw_resp_size) { in sba_fillup_pq_msg()
1036 cmdsp->resp_len = req->sba->hw_resp_size; in sba_fillup_pq_msg()
1046 msg->sba.cmds = cmds; in sba_fillup_pq_msg()
1047 msg->sba.cmds_count = cmdsp - cmds; in sba_fillup_pq_msg()
1053 sba_prep_dma_pq_req(struct sba_device *sba, dma_addr_t off, in sba_prep_dma_pq_req() argument
1060 req = sba_alloc_request(sba); in sba_prep_dma_pq_req()
1166 if (req->sba->hw_resp_size) { in sba_fillup_pq_single_msg()
1169 cmdsp->resp_len = req->sba->hw_resp_size; in sba_fillup_pq_single_msg()
1194 pos = (dpos < req->sba->max_pq_coefs) ? in sba_fillup_pq_single_msg()
1195 dpos : (req->sba->max_pq_coefs - 1); in sba_fillup_pq_single_msg()
1223 pos = (dpos < req->sba->max_pq_coefs) ? in sba_fillup_pq_single_msg()
1224 dpos : (req->sba->max_pq_coefs - 1); in sba_fillup_pq_single_msg()
1287 if (req->sba->hw_resp_size) { in sba_fillup_pq_single_msg()
1290 cmdsp->resp_len = req->sba->hw_resp_size; in sba_fillup_pq_single_msg()
1300 msg->sba.cmds = cmds; in sba_fillup_pq_single_msg()
1301 msg->sba.cmds_count = cmdsp - cmds; in sba_fillup_pq_single_msg()
1307 sba_prep_dma_pq_single_req(struct sba_device *sba, dma_addr_t off, in sba_prep_dma_pq_single_req() argument
1315 req = sba_alloc_request(sba); in sba_prep_dma_pq_single_req()
1342 struct sba_device *sba = to_sba_device(dchan); in sba_prep_dma_pq() local
1346 if (unlikely(src_cnt > sba->max_pq_srcs)) in sba_prep_dma_pq()
1349 if (sba->max_pq_coefs <= raid6_gflog[scf[i]]) in sba_prep_dma_pq()
1360 req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size; in sba_prep_dma_pq()
1376 req = sba_prep_dma_pq_single_req(sba, in sba_prep_dma_pq()
1394 req = sba_prep_dma_pq_single_req(sba, in sba_prep_dma_pq()
1408 req = sba_prep_dma_pq_req(sba, off, in sba_prep_dma_pq()
1438 struct sba_device *sba = req->sba; in sba_receive_message() local
1442 dev_err(sba->dev, "%s got message with error %d", in sba_receive_message()
1443 dma_chan_name(&sba->dma_chan), m->error); in sba_receive_message()
1446 sba_process_received_request(sba, req); in sba_receive_message()
1453 struct sba_device *sba = dev_get_drvdata(file->private); in sba_debugfs_stats_show() local
1456 sba_write_stats_in_seqfile(sba, file); in sba_debugfs_stats_show()
1463 static int sba_prealloc_channel_resources(struct sba_device *sba) in sba_prealloc_channel_resources() argument
1468 sba->resp_base = dma_alloc_coherent(sba->mbox_dev, in sba_prealloc_channel_resources()
1469 sba->max_resp_pool_size, in sba_prealloc_channel_resources()
1470 &sba->resp_dma_base, GFP_KERNEL); in sba_prealloc_channel_resources()
1471 if (!sba->resp_base) in sba_prealloc_channel_resources()
1474 sba->cmds_base = dma_alloc_coherent(sba->mbox_dev, in sba_prealloc_channel_resources()
1475 sba->max_cmds_pool_size, in sba_prealloc_channel_resources()
1476 &sba->cmds_dma_base, GFP_KERNEL); in sba_prealloc_channel_resources()
1477 if (!sba->cmds_base) { in sba_prealloc_channel_resources()
1482 spin_lock_init(&sba->reqs_lock); in sba_prealloc_channel_resources()
1483 sba->reqs_fence = false; in sba_prealloc_channel_resources()
1484 INIT_LIST_HEAD(&sba->reqs_alloc_list); in sba_prealloc_channel_resources()
1485 INIT_LIST_HEAD(&sba->reqs_pending_list); in sba_prealloc_channel_resources()
1486 INIT_LIST_HEAD(&sba->reqs_active_list); in sba_prealloc_channel_resources()
1487 INIT_LIST_HEAD(&sba->reqs_aborted_list); in sba_prealloc_channel_resources()
1488 INIT_LIST_HEAD(&sba->reqs_free_list); in sba_prealloc_channel_resources()
1490 for (i = 0; i < sba->max_req; i++) { in sba_prealloc_channel_resources()
1491 req = devm_kzalloc(sba->dev, in sba_prealloc_channel_resources()
1492 struct_size(req, cmds, sba->max_cmd_per_req), in sba_prealloc_channel_resources()
1499 req->sba = sba; in sba_prealloc_channel_resources()
1503 for (j = 0; j < sba->max_cmd_per_req; j++) { in sba_prealloc_channel_resources()
1505 req->cmds[j].cmd_dma = sba->cmds_base + in sba_prealloc_channel_resources()
1506 (i * sba->max_cmd_per_req + j) * sizeof(u64); in sba_prealloc_channel_resources()
1507 req->cmds[j].cmd_dma_addr = sba->cmds_dma_base + in sba_prealloc_channel_resources()
1508 (i * sba->max_cmd_per_req + j) * sizeof(u64); in sba_prealloc_channel_resources()
1512 dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan); in sba_prealloc_channel_resources()
1515 req->tx.phys = sba->resp_dma_base + i * sba->hw_resp_size; in sba_prealloc_channel_resources()
1516 list_add_tail(&req->node, &sba->reqs_free_list); in sba_prealloc_channel_resources()
1522 dma_free_coherent(sba->mbox_dev, in sba_prealloc_channel_resources()
1523 sba->max_cmds_pool_size, in sba_prealloc_channel_resources()
1524 sba->cmds_base, sba->cmds_dma_base); in sba_prealloc_channel_resources()
1526 dma_free_coherent(sba->mbox_dev, in sba_prealloc_channel_resources()
1527 sba->max_resp_pool_size, in sba_prealloc_channel_resources()
1528 sba->resp_base, sba->resp_dma_base); in sba_prealloc_channel_resources()
1532 static void sba_freeup_channel_resources(struct sba_device *sba) in sba_freeup_channel_resources() argument
1534 dmaengine_terminate_all(&sba->dma_chan); in sba_freeup_channel_resources()
1535 dma_free_coherent(sba->mbox_dev, sba->max_cmds_pool_size, in sba_freeup_channel_resources()
1536 sba->cmds_base, sba->cmds_dma_base); in sba_freeup_channel_resources()
1537 dma_free_coherent(sba->mbox_dev, sba->max_resp_pool_size, in sba_freeup_channel_resources()
1538 sba->resp_base, sba->resp_dma_base); in sba_freeup_channel_resources()
1539 sba->resp_base = NULL; in sba_freeup_channel_resources()
1540 sba->resp_dma_base = 0; in sba_freeup_channel_resources()
1543 static int sba_async_register(struct sba_device *sba) in sba_async_register() argument
1546 struct dma_device *dma_dev = &sba->dma_dev; in sba_async_register()
1549 sba->dma_chan.device = dma_dev; in sba_async_register()
1550 dma_cookie_init(&sba->dma_chan); in sba_async_register()
1564 dma_dev->dev = sba->mbox_dev; in sba_async_register()
1583 dma_dev->max_xor = sba->max_xor_srcs; in sba_async_register()
1589 dma_set_maxpq(dma_dev, sba->max_pq_srcs, 0); in sba_async_register()
1594 list_add_tail(&sba->dma_chan.device_node, &dma_dev->channels); in sba_async_register()
1599 dev_err(sba->dev, "async device register error %d", ret); in sba_async_register()
1603 dev_info(sba->dev, "%s capabilities: %s%s%s%s\n", in sba_async_register()
1604 dma_chan_name(&sba->dma_chan), in sba_async_register()
1616 struct sba_device *sba; in sba_probe() local
1621 sba = devm_kzalloc(&pdev->dev, sizeof(*sba), GFP_KERNEL); in sba_probe()
1622 if (!sba) in sba_probe()
1625 sba->dev = &pdev->dev; in sba_probe()
1626 platform_set_drvdata(pdev, sba); in sba_probe()
1635 if (of_device_is_compatible(sba->dev->of_node, "brcm,iproc-sba")) in sba_probe()
1636 sba->ver = SBA_VER_1; in sba_probe()
1637 else if (of_device_is_compatible(sba->dev->of_node, in sba_probe()
1639 sba->ver = SBA_VER_2; in sba_probe()
1644 switch (sba->ver) { in sba_probe()
1646 sba->hw_buf_size = 4096; in sba_probe()
1647 sba->hw_resp_size = 8; in sba_probe()
1648 sba->max_pq_coefs = 6; in sba_probe()
1649 sba->max_pq_srcs = 6; in sba_probe()
1652 sba->hw_buf_size = 4096; in sba_probe()
1653 sba->hw_resp_size = 8; in sba_probe()
1654 sba->max_pq_coefs = 30; in sba_probe()
1660 sba->max_pq_srcs = 12; in sba_probe()
1665 sba->max_req = SBA_MAX_REQ_PER_MBOX_CHANNEL; in sba_probe()
1666 sba->max_cmd_per_req = sba->max_pq_srcs + 3; in sba_probe()
1667 sba->max_xor_srcs = sba->max_cmd_per_req - 1; in sba_probe()
1668 sba->max_resp_pool_size = sba->max_req * sba->hw_resp_size; in sba_probe()
1669 sba->max_cmds_pool_size = sba->max_req * in sba_probe()
1670 sba->max_cmd_per_req * sizeof(u64); in sba_probe()
1673 sba->client.dev = &pdev->dev; in sba_probe()
1674 sba->client.rx_callback = sba_receive_message; in sba_probe()
1675 sba->client.tx_block = false; in sba_probe()
1676 sba->client.knows_txdone = true; in sba_probe()
1677 sba->client.tx_tout = 0; in sba_probe()
1680 sba->mchan = mbox_request_channel(&sba->client, 0); in sba_probe()
1681 if (IS_ERR(sba->mchan)) { in sba_probe()
1682 ret = PTR_ERR(sba->mchan); in sba_probe()
1697 sba->mbox_dev = &mbox_pdev->dev; in sba_probe()
1700 ret = sba_prealloc_channel_resources(sba); in sba_probe()
1709 sba->root = debugfs_create_dir(dev_name(sba->dev), NULL); in sba_probe()
1712 debugfs_create_devm_seqfile(sba->dev, "stats", sba->root, in sba_probe()
1718 ret = sba_async_register(sba); in sba_probe()
1723 dev_info(sba->dev, "%s using SBAv%d mailbox channel from %s", in sba_probe()
1724 dma_chan_name(&sba->dma_chan), sba->ver+1, in sba_probe()
1725 dev_name(sba->mbox_dev)); in sba_probe()
1730 debugfs_remove_recursive(sba->root); in sba_probe()
1731 sba_freeup_channel_resources(sba); in sba_probe()
1733 mbox_free_channel(sba->mchan); in sba_probe()
1739 struct sba_device *sba = platform_get_drvdata(pdev); in sba_remove() local
1741 dma_async_device_unregister(&sba->dma_dev); in sba_remove()
1743 debugfs_remove_recursive(sba->root); in sba_remove()
1745 sba_freeup_channel_resources(sba); in sba_remove()
1747 mbox_free_channel(sba->mchan); in sba_remove()