Lines Matching refs:sba

103 	struct sba_device *sba;  member
193 static struct sba_request *sba_alloc_request(struct sba_device *sba) in sba_alloc_request() argument
199 spin_lock_irqsave(&sba->reqs_lock, flags); in sba_alloc_request()
200 list_for_each_entry(req, &sba->reqs_free_list, node) { in sba_alloc_request()
202 list_move_tail(&req->node, &sba->reqs_alloc_list); in sba_alloc_request()
207 spin_unlock_irqrestore(&sba->reqs_lock, flags); in sba_alloc_request()
216 mbox_client_peek_data(sba->mchan); in sba_alloc_request()
225 dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan); in sba_alloc_request()
232 static void _sba_pending_request(struct sba_device *sba, in _sba_pending_request() argument
235 lockdep_assert_held(&sba->reqs_lock); in _sba_pending_request()
238 list_move_tail(&req->node, &sba->reqs_pending_list); in _sba_pending_request()
239 if (list_empty(&sba->reqs_active_list)) in _sba_pending_request()
240 sba->reqs_fence = false; in _sba_pending_request()
244 static bool _sba_active_request(struct sba_device *sba, in _sba_active_request() argument
247 lockdep_assert_held(&sba->reqs_lock); in _sba_active_request()
248 if (list_empty(&sba->reqs_active_list)) in _sba_active_request()
249 sba->reqs_fence = false; in _sba_active_request()
250 if (sba->reqs_fence) in _sba_active_request()
254 list_move_tail(&req->node, &sba->reqs_active_list); in _sba_active_request()
256 sba->reqs_fence = true; in _sba_active_request()
261 static void _sba_abort_request(struct sba_device *sba, in _sba_abort_request() argument
264 lockdep_assert_held(&sba->reqs_lock); in _sba_abort_request()
267 list_move_tail(&req->node, &sba->reqs_aborted_list); in _sba_abort_request()
268 if (list_empty(&sba->reqs_active_list)) in _sba_abort_request()
269 sba->reqs_fence = false; in _sba_abort_request()
273 static void _sba_free_request(struct sba_device *sba, in _sba_free_request() argument
276 lockdep_assert_held(&sba->reqs_lock); in _sba_free_request()
279 list_move_tail(&req->node, &sba->reqs_free_list); in _sba_free_request()
280 if (list_empty(&sba->reqs_active_list)) in _sba_free_request()
281 sba->reqs_fence = false; in _sba_free_request()
288 struct sba_device *sba = req->sba; in sba_free_chained_requests() local
290 spin_lock_irqsave(&sba->reqs_lock, flags); in sba_free_chained_requests()
292 _sba_free_request(sba, req); in sba_free_chained_requests()
294 _sba_free_request(sba, nreq); in sba_free_chained_requests()
296 spin_unlock_irqrestore(&sba->reqs_lock, flags); in sba_free_chained_requests()
303 struct sba_device *sba = req->sba; in sba_chain_request() local
305 spin_lock_irqsave(&sba->reqs_lock, flags); in sba_chain_request()
311 spin_unlock_irqrestore(&sba->reqs_lock, flags); in sba_chain_request()
314 static void sba_cleanup_nonpending_requests(struct sba_device *sba) in sba_cleanup_nonpending_requests() argument
319 spin_lock_irqsave(&sba->reqs_lock, flags); in sba_cleanup_nonpending_requests()
322 list_for_each_entry_safe(req, req1, &sba->reqs_alloc_list, node) in sba_cleanup_nonpending_requests()
323 _sba_free_request(sba, req); in sba_cleanup_nonpending_requests()
326 list_for_each_entry_safe(req, req1, &sba->reqs_active_list, node) in sba_cleanup_nonpending_requests()
327 _sba_abort_request(sba, req); in sba_cleanup_nonpending_requests()
334 spin_unlock_irqrestore(&sba->reqs_lock, flags); in sba_cleanup_nonpending_requests()
337 static void sba_cleanup_pending_requests(struct sba_device *sba) in sba_cleanup_pending_requests() argument
342 spin_lock_irqsave(&sba->reqs_lock, flags); in sba_cleanup_pending_requests()
345 list_for_each_entry_safe(req, req1, &sba->reqs_pending_list, node) in sba_cleanup_pending_requests()
346 _sba_free_request(sba, req); in sba_cleanup_pending_requests()
348 spin_unlock_irqrestore(&sba->reqs_lock, flags); in sba_cleanup_pending_requests()
351 static int sba_send_mbox_request(struct sba_device *sba, in sba_send_mbox_request() argument
358 ret = mbox_send_message(sba->mchan, &req->msg); in sba_send_mbox_request()
360 dev_err(sba->dev, "send message failed with error %d", ret); in sba_send_mbox_request()
367 dev_err(sba->dev, "message error %d", ret); in sba_send_mbox_request()
371 mbox_client_txdone(sba->mchan, ret); in sba_send_mbox_request()
377 static void _sba_process_pending_requests(struct sba_device *sba) in _sba_process_pending_requests() argument
385 while (!list_empty(&sba->reqs_pending_list) && count) { in _sba_process_pending_requests()
387 req = list_first_entry(&sba->reqs_pending_list, in _sba_process_pending_requests()
391 if (!_sba_active_request(sba, req)) in _sba_process_pending_requests()
395 ret = sba_send_mbox_request(sba, req); in _sba_process_pending_requests()
397 _sba_pending_request(sba, req); in _sba_process_pending_requests()
405 static void sba_process_received_request(struct sba_device *sba, in sba_process_received_request() argument
418 spin_lock_irqsave(&sba->reqs_lock, flags); in sba_process_received_request()
420 spin_unlock_irqrestore(&sba->reqs_lock, flags); in sba_process_received_request()
429 spin_lock_irqsave(&sba->reqs_lock, flags); in sba_process_received_request()
433 _sba_free_request(sba, nreq); in sba_process_received_request()
437 _sba_free_request(sba, first); in sba_process_received_request()
440 _sba_process_pending_requests(sba); in sba_process_received_request()
442 spin_unlock_irqrestore(&sba->reqs_lock, flags); in sba_process_received_request()
446 static void sba_write_stats_in_seqfile(struct sba_device *sba, in sba_write_stats_in_seqfile() argument
454 spin_lock_irqsave(&sba->reqs_lock, flags); in sba_write_stats_in_seqfile()
456 list_for_each_entry(req, &sba->reqs_free_list, node) in sba_write_stats_in_seqfile()
460 list_for_each_entry(req, &sba->reqs_alloc_list, node) in sba_write_stats_in_seqfile()
463 list_for_each_entry(req, &sba->reqs_pending_list, node) in sba_write_stats_in_seqfile()
466 list_for_each_entry(req, &sba->reqs_active_list, node) in sba_write_stats_in_seqfile()
469 list_for_each_entry(req, &sba->reqs_aborted_list, node) in sba_write_stats_in_seqfile()
472 spin_unlock_irqrestore(&sba->reqs_lock, flags); in sba_write_stats_in_seqfile()
474 seq_printf(file, "maximum requests = %d\n", sba->max_req); in sba_write_stats_in_seqfile()
505 struct sba_device *sba = to_sba_device(dchan); in sba_issue_pending() local
508 spin_lock_irqsave(&sba->reqs_lock, flags); in sba_issue_pending()
509 _sba_process_pending_requests(sba); in sba_issue_pending()
510 spin_unlock_irqrestore(&sba->reqs_lock, flags); in sba_issue_pending()
517 struct sba_device *sba; in sba_tx_submit() local
523 sba = to_sba_device(tx->chan); in sba_tx_submit()
527 spin_lock_irqsave(&sba->reqs_lock, flags); in sba_tx_submit()
529 _sba_pending_request(sba, req); in sba_tx_submit()
531 _sba_pending_request(sba, nreq); in sba_tx_submit()
532 spin_unlock_irqrestore(&sba->reqs_lock, flags); in sba_tx_submit()
542 struct sba_device *sba = to_sba_device(dchan); in sba_tx_status() local
548 mbox_client_peek_data(sba->mchan); in sba_tx_status()
565 cmd = sba_cmd_enc(cmd, req->sba->hw_resp_size, in sba_fillup_interrupt_msg()
576 cmdsp->data_len = req->sba->hw_resp_size; in sba_fillup_interrupt_msg()
582 cmd = sba_cmd_enc(cmd, req->sba->hw_resp_size, in sba_fillup_interrupt_msg()
594 if (req->sba->hw_resp_size) { in sba_fillup_interrupt_msg()
597 cmdsp->resp_len = req->sba->hw_resp_size; in sba_fillup_interrupt_msg()
601 cmdsp->data_len = req->sba->hw_resp_size; in sba_fillup_interrupt_msg()
606 msg->sba.cmds = cmds; in sba_fillup_interrupt_msg()
607 msg->sba.cmds_count = cmdsp - cmds; in sba_fillup_interrupt_msg()
616 struct sba_device *sba = to_sba_device(dchan); in sba_prep_dma_interrupt() local
619 req = sba_alloc_request(sba); in sba_prep_dma_interrupt()
682 if (req->sba->hw_resp_size) { in sba_fillup_memcpy_msg()
685 cmdsp->resp_len = req->sba->hw_resp_size; in sba_fillup_memcpy_msg()
694 msg->sba.cmds = cmds; in sba_fillup_memcpy_msg()
695 msg->sba.cmds_count = cmdsp - cmds; in sba_fillup_memcpy_msg()
701 sba_prep_dma_memcpy_req(struct sba_device *sba, in sba_prep_dma_memcpy_req() argument
708 req = sba_alloc_request(sba); in sba_prep_dma_memcpy_req()
731 struct sba_device *sba = to_sba_device(dchan); in sba_prep_dma_memcpy() local
736 req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size; in sba_prep_dma_memcpy()
738 req = sba_prep_dma_memcpy_req(sba, off, dst, src, in sba_prep_dma_memcpy()
821 if (req->sba->hw_resp_size) { in sba_fillup_xor_msg()
824 cmdsp->resp_len = req->sba->hw_resp_size; in sba_fillup_xor_msg()
833 msg->sba.cmds = cmds; in sba_fillup_xor_msg()
834 msg->sba.cmds_count = cmdsp - cmds; in sba_fillup_xor_msg()
840 sba_prep_dma_xor_req(struct sba_device *sba, in sba_prep_dma_xor_req() argument
847 req = sba_alloc_request(sba); in sba_prep_dma_xor_req()
870 struct sba_device *sba = to_sba_device(dchan); in sba_prep_dma_xor() local
874 if (unlikely(src_cnt > sba->max_xor_srcs)) in sba_prep_dma_xor()
879 req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size; in sba_prep_dma_xor()
881 req = sba_prep_dma_xor_req(sba, off, dst, src, src_cnt, in sba_prep_dma_xor()
1004 if (req->sba->hw_resp_size) { in sba_fillup_pq_msg()
1007 cmdsp->resp_len = req->sba->hw_resp_size; in sba_fillup_pq_msg()
1031 if (req->sba->hw_resp_size) { in sba_fillup_pq_msg()
1034 cmdsp->resp_len = req->sba->hw_resp_size; in sba_fillup_pq_msg()
1044 msg->sba.cmds = cmds; in sba_fillup_pq_msg()
1045 msg->sba.cmds_count = cmdsp - cmds; in sba_fillup_pq_msg()
1051 sba_prep_dma_pq_req(struct sba_device *sba, dma_addr_t off, in sba_prep_dma_pq_req() argument
1058 req = sba_alloc_request(sba); in sba_prep_dma_pq_req()
1164 if (req->sba->hw_resp_size) { in sba_fillup_pq_single_msg()
1167 cmdsp->resp_len = req->sba->hw_resp_size; in sba_fillup_pq_single_msg()
1192 pos = (dpos < req->sba->max_pq_coefs) ? in sba_fillup_pq_single_msg()
1193 dpos : (req->sba->max_pq_coefs - 1); in sba_fillup_pq_single_msg()
1221 pos = (dpos < req->sba->max_pq_coefs) ? in sba_fillup_pq_single_msg()
1222 dpos : (req->sba->max_pq_coefs - 1); in sba_fillup_pq_single_msg()
1285 if (req->sba->hw_resp_size) { in sba_fillup_pq_single_msg()
1288 cmdsp->resp_len = req->sba->hw_resp_size; in sba_fillup_pq_single_msg()
1298 msg->sba.cmds = cmds; in sba_fillup_pq_single_msg()
1299 msg->sba.cmds_count = cmdsp - cmds; in sba_fillup_pq_single_msg()
1305 sba_prep_dma_pq_single_req(struct sba_device *sba, dma_addr_t off, in sba_prep_dma_pq_single_req() argument
1313 req = sba_alloc_request(sba); in sba_prep_dma_pq_single_req()
1340 struct sba_device *sba = to_sba_device(dchan); in sba_prep_dma_pq() local
1344 if (unlikely(src_cnt > sba->max_pq_srcs)) in sba_prep_dma_pq()
1347 if (sba->max_pq_coefs <= raid6_gflog[scf[i]]) in sba_prep_dma_pq()
1358 req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size; in sba_prep_dma_pq()
1374 req = sba_prep_dma_pq_single_req(sba, in sba_prep_dma_pq()
1392 req = sba_prep_dma_pq_single_req(sba, in sba_prep_dma_pq()
1406 req = sba_prep_dma_pq_req(sba, off, in sba_prep_dma_pq()
1436 struct sba_device *sba = req->sba; in sba_receive_message() local
1440 dev_err(sba->dev, "%s got message with error %d", in sba_receive_message()
1441 dma_chan_name(&sba->dma_chan), m->error); in sba_receive_message()
1444 sba_process_received_request(sba, req); in sba_receive_message()
1451 struct sba_device *sba = dev_get_drvdata(file->private); in sba_debugfs_stats_show() local
1454 sba_write_stats_in_seqfile(sba, file); in sba_debugfs_stats_show()
1461 static int sba_prealloc_channel_resources(struct sba_device *sba) in sba_prealloc_channel_resources() argument
1466 sba->resp_base = dma_alloc_coherent(sba->mbox_dev, in sba_prealloc_channel_resources()
1467 sba->max_resp_pool_size, in sba_prealloc_channel_resources()
1468 &sba->resp_dma_base, GFP_KERNEL); in sba_prealloc_channel_resources()
1469 if (!sba->resp_base) in sba_prealloc_channel_resources()
1472 sba->cmds_base = dma_alloc_coherent(sba->mbox_dev, in sba_prealloc_channel_resources()
1473 sba->max_cmds_pool_size, in sba_prealloc_channel_resources()
1474 &sba->cmds_dma_base, GFP_KERNEL); in sba_prealloc_channel_resources()
1475 if (!sba->cmds_base) { in sba_prealloc_channel_resources()
1480 spin_lock_init(&sba->reqs_lock); in sba_prealloc_channel_resources()
1481 sba->reqs_fence = false; in sba_prealloc_channel_resources()
1482 INIT_LIST_HEAD(&sba->reqs_alloc_list); in sba_prealloc_channel_resources()
1483 INIT_LIST_HEAD(&sba->reqs_pending_list); in sba_prealloc_channel_resources()
1484 INIT_LIST_HEAD(&sba->reqs_active_list); in sba_prealloc_channel_resources()
1485 INIT_LIST_HEAD(&sba->reqs_aborted_list); in sba_prealloc_channel_resources()
1486 INIT_LIST_HEAD(&sba->reqs_free_list); in sba_prealloc_channel_resources()
1488 for (i = 0; i < sba->max_req; i++) { in sba_prealloc_channel_resources()
1489 req = devm_kzalloc(sba->dev, in sba_prealloc_channel_resources()
1490 struct_size(req, cmds, sba->max_cmd_per_req), in sba_prealloc_channel_resources()
1497 req->sba = sba; in sba_prealloc_channel_resources()
1501 for (j = 0; j < sba->max_cmd_per_req; j++) { in sba_prealloc_channel_resources()
1503 req->cmds[j].cmd_dma = sba->cmds_base + in sba_prealloc_channel_resources()
1504 (i * sba->max_cmd_per_req + j) * sizeof(u64); in sba_prealloc_channel_resources()
1505 req->cmds[j].cmd_dma_addr = sba->cmds_dma_base + in sba_prealloc_channel_resources()
1506 (i * sba->max_cmd_per_req + j) * sizeof(u64); in sba_prealloc_channel_resources()
1510 dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan); in sba_prealloc_channel_resources()
1513 req->tx.phys = sba->resp_dma_base + i * sba->hw_resp_size; in sba_prealloc_channel_resources()
1514 list_add_tail(&req->node, &sba->reqs_free_list); in sba_prealloc_channel_resources()
1520 dma_free_coherent(sba->mbox_dev, in sba_prealloc_channel_resources()
1521 sba->max_cmds_pool_size, in sba_prealloc_channel_resources()
1522 sba->cmds_base, sba->cmds_dma_base); in sba_prealloc_channel_resources()
1524 dma_free_coherent(sba->mbox_dev, in sba_prealloc_channel_resources()
1525 sba->max_resp_pool_size, in sba_prealloc_channel_resources()
1526 sba->resp_base, sba->resp_dma_base); in sba_prealloc_channel_resources()
1530 static void sba_freeup_channel_resources(struct sba_device *sba) in sba_freeup_channel_resources() argument
1532 dmaengine_terminate_all(&sba->dma_chan); in sba_freeup_channel_resources()
1533 dma_free_coherent(sba->mbox_dev, sba->max_cmds_pool_size, in sba_freeup_channel_resources()
1534 sba->cmds_base, sba->cmds_dma_base); in sba_freeup_channel_resources()
1535 dma_free_coherent(sba->mbox_dev, sba->max_resp_pool_size, in sba_freeup_channel_resources()
1536 sba->resp_base, sba->resp_dma_base); in sba_freeup_channel_resources()
1537 sba->resp_base = NULL; in sba_freeup_channel_resources()
1538 sba->resp_dma_base = 0; in sba_freeup_channel_resources()
1541 static int sba_async_register(struct sba_device *sba) in sba_async_register() argument
1544 struct dma_device *dma_dev = &sba->dma_dev; in sba_async_register()
1547 sba->dma_chan.device = dma_dev; in sba_async_register()
1548 dma_cookie_init(&sba->dma_chan); in sba_async_register()
1562 dma_dev->dev = sba->mbox_dev; in sba_async_register()
1581 dma_dev->max_xor = sba->max_xor_srcs; in sba_async_register()
1587 dma_set_maxpq(dma_dev, sba->max_pq_srcs, 0); in sba_async_register()
1592 list_add_tail(&sba->dma_chan.device_node, &dma_dev->channels); in sba_async_register()
1597 dev_err(sba->dev, "async device register error %d", ret); in sba_async_register()
1601 dev_info(sba->dev, "%s capabilities: %s%s%s%s\n", in sba_async_register()
1602 dma_chan_name(&sba->dma_chan), in sba_async_register()
1614 struct sba_device *sba; in sba_probe() local
1619 sba = devm_kzalloc(&pdev->dev, sizeof(*sba), GFP_KERNEL); in sba_probe()
1620 if (!sba) in sba_probe()
1623 sba->dev = &pdev->dev; in sba_probe()
1624 platform_set_drvdata(pdev, sba); in sba_probe()
1633 if (of_device_is_compatible(sba->dev->of_node, "brcm,iproc-sba")) in sba_probe()
1634 sba->ver = SBA_VER_1; in sba_probe()
1635 else if (of_device_is_compatible(sba->dev->of_node, in sba_probe()
1637 sba->ver = SBA_VER_2; in sba_probe()
1642 switch (sba->ver) { in sba_probe()
1644 sba->hw_buf_size = 4096; in sba_probe()
1645 sba->hw_resp_size = 8; in sba_probe()
1646 sba->max_pq_coefs = 6; in sba_probe()
1647 sba->max_pq_srcs = 6; in sba_probe()
1650 sba->hw_buf_size = 4096; in sba_probe()
1651 sba->hw_resp_size = 8; in sba_probe()
1652 sba->max_pq_coefs = 30; in sba_probe()
1658 sba->max_pq_srcs = 12; in sba_probe()
1663 sba->max_req = SBA_MAX_REQ_PER_MBOX_CHANNEL; in sba_probe()
1664 sba->max_cmd_per_req = sba->max_pq_srcs + 3; in sba_probe()
1665 sba->max_xor_srcs = sba->max_cmd_per_req - 1; in sba_probe()
1666 sba->max_resp_pool_size = sba->max_req * sba->hw_resp_size; in sba_probe()
1667 sba->max_cmds_pool_size = sba->max_req * in sba_probe()
1668 sba->max_cmd_per_req * sizeof(u64); in sba_probe()
1671 sba->client.dev = &pdev->dev; in sba_probe()
1672 sba->client.rx_callback = sba_receive_message; in sba_probe()
1673 sba->client.tx_block = false; in sba_probe()
1674 sba->client.knows_txdone = true; in sba_probe()
1675 sba->client.tx_tout = 0; in sba_probe()
1678 sba->mchan = mbox_request_channel(&sba->client, 0); in sba_probe()
1679 if (IS_ERR(sba->mchan)) { in sba_probe()
1680 ret = PTR_ERR(sba->mchan); in sba_probe()
1695 sba->mbox_dev = &mbox_pdev->dev; in sba_probe()
1698 ret = sba_prealloc_channel_resources(sba); in sba_probe()
1707 sba->root = debugfs_create_dir(dev_name(sba->dev), NULL); in sba_probe()
1710 debugfs_create_devm_seqfile(sba->dev, "stats", sba->root, in sba_probe()
1716 ret = sba_async_register(sba); in sba_probe()
1721 dev_info(sba->dev, "%s using SBAv%d mailbox channel from %s", in sba_probe()
1722 dma_chan_name(&sba->dma_chan), sba->ver+1, in sba_probe()
1723 dev_name(sba->mbox_dev)); in sba_probe()
1728 debugfs_remove_recursive(sba->root); in sba_probe()
1729 sba_freeup_channel_resources(sba); in sba_probe()
1731 mbox_free_channel(sba->mchan); in sba_probe()
1737 struct sba_device *sba = platform_get_drvdata(pdev); in sba_remove() local
1739 dma_async_device_unregister(&sba->dma_dev); in sba_remove()
1741 debugfs_remove_recursive(sba->root); in sba_remove()
1743 sba_freeup_channel_resources(sba); in sba_remove()
1745 mbox_free_channel(sba->mchan); in sba_remove()