Lines Matching refs:qp
120 struct ntb_transport_qp *qp; member
148 void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data,
160 void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
272 #define QP_TO_MW(nt, qp) ((qp) % nt->mw_count) argument
279 static int ntb_async_tx_submit(struct ntb_transport_qp *qp,
466 struct ntb_transport_qp *qp; in debugfs_read() local
470 qp = filp->private_data; in debugfs_read()
472 if (!qp || !qp->link_is_up) in debugfs_read()
485 "rx_bytes - \t%llu\n", qp->rx_bytes); in debugfs_read()
487 "rx_pkts - \t%llu\n", qp->rx_pkts); in debugfs_read()
489 "rx_memcpy - \t%llu\n", qp->rx_memcpy); in debugfs_read()
491 "rx_async - \t%llu\n", qp->rx_async); in debugfs_read()
493 "rx_ring_empty - %llu\n", qp->rx_ring_empty); in debugfs_read()
495 "rx_err_no_buf - %llu\n", qp->rx_err_no_buf); in debugfs_read()
497 "rx_err_oflow - \t%llu\n", qp->rx_err_oflow); in debugfs_read()
499 "rx_err_ver - \t%llu\n", qp->rx_err_ver); in debugfs_read()
501 "rx_buff - \t0x%p\n", qp->rx_buff); in debugfs_read()
503 "rx_index - \t%u\n", qp->rx_index); in debugfs_read()
505 "rx_max_entry - \t%u\n", qp->rx_max_entry); in debugfs_read()
507 "rx_alloc_entry - \t%u\n\n", qp->rx_alloc_entry); in debugfs_read()
510 "tx_bytes - \t%llu\n", qp->tx_bytes); in debugfs_read()
512 "tx_pkts - \t%llu\n", qp->tx_pkts); in debugfs_read()
514 "tx_memcpy - \t%llu\n", qp->tx_memcpy); in debugfs_read()
516 "tx_async - \t%llu\n", qp->tx_async); in debugfs_read()
518 "tx_ring_full - \t%llu\n", qp->tx_ring_full); in debugfs_read()
520 "tx_err_no_buf - %llu\n", qp->tx_err_no_buf); in debugfs_read()
522 "tx_mw - \t0x%p\n", qp->tx_mw); in debugfs_read()
524 "tx_index (H) - \t%u\n", qp->tx_index); in debugfs_read()
527 qp->remote_rx_info->entry); in debugfs_read()
529 "tx_max_entry - \t%u\n", qp->tx_max_entry); in debugfs_read()
532 ntb_transport_tx_free_entry(qp)); in debugfs_read()
538 qp->tx_dma_chan ? "Yes" : "No"); in debugfs_read()
541 qp->rx_dma_chan ? "Yes" : "No"); in debugfs_read()
544 qp->link_is_up ? "Up" : "Down"); in debugfs_read()
616 struct ntb_transport_qp *qp = &nt->qp_vec[qp_num]; in ntb_transport_setup_qp_mw() local
640 qp->rx_buff = mw->virt_addr + rx_size * (qp_num / mw_count); in ntb_transport_setup_qp_mw()
643 qp->remote_rx_info = qp->rx_buff + rx_size; in ntb_transport_setup_qp_mw()
646 qp->rx_max_frame = min(transport_mtu, rx_size / 2); in ntb_transport_setup_qp_mw()
647 qp->rx_max_entry = rx_size / qp->rx_max_frame; in ntb_transport_setup_qp_mw()
648 qp->rx_index = 0; in ntb_transport_setup_qp_mw()
656 for (i = qp->rx_alloc_entry; i < qp->rx_max_entry; i++) { in ntb_transport_setup_qp_mw()
661 entry->qp = qp; in ntb_transport_setup_qp_mw()
662 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, in ntb_transport_setup_qp_mw()
663 &qp->rx_free_q); in ntb_transport_setup_qp_mw()
664 qp->rx_alloc_entry++; in ntb_transport_setup_qp_mw()
667 qp->remote_rx_info->entry = qp->rx_max_entry - 1; in ntb_transport_setup_qp_mw()
670 for (i = 0; i < qp->rx_max_entry; i++) { in ntb_transport_setup_qp_mw()
671 void *offset = (qp->rx_buff + qp->rx_max_frame * (i + 1) - in ntb_transport_setup_qp_mw()
676 qp->rx_pkts = 0; in ntb_transport_setup_qp_mw()
677 qp->tx_pkts = 0; in ntb_transport_setup_qp_mw()
678 qp->tx_index = 0; in ntb_transport_setup_qp_mw()
685 struct ntb_transport_qp *qp = dev; in ntb_transport_isr() local
687 tasklet_schedule(&qp->rxc_db_work); in ntb_transport_isr()
695 struct ntb_transport_qp *qp = &nt->qp_vec[qp_num]; in ntb_transport_setup_qp_peer_msi() local
704 qp->peer_msi_desc.addr_offset = in ntb_transport_setup_qp_peer_msi()
705 ntb_peer_spad_read(qp->ndev, PIDX, spad); in ntb_transport_setup_qp_peer_msi()
706 qp->peer_msi_desc.data = in ntb_transport_setup_qp_peer_msi()
707 ntb_peer_spad_read(qp->ndev, PIDX, spad + 1); in ntb_transport_setup_qp_peer_msi()
709 dev_dbg(&qp->ndev->pdev->dev, "QP%d Peer MSI addr=%x data=%x\n", in ntb_transport_setup_qp_peer_msi()
710 qp_num, qp->peer_msi_desc.addr_offset, qp->peer_msi_desc.data); in ntb_transport_setup_qp_peer_msi()
712 if (qp->peer_msi_desc.addr_offset) { in ntb_transport_setup_qp_peer_msi()
713 qp->use_msi = true; in ntb_transport_setup_qp_peer_msi()
714 dev_info(&qp->ndev->pdev->dev, in ntb_transport_setup_qp_peer_msi()
722 struct ntb_transport_qp *qp = &nt->qp_vec[qp_num]; in ntb_transport_setup_qp_msi() local
730 dev_warn_once(&qp->ndev->pdev->dev, in ntb_transport_setup_qp_msi()
735 ntb_spad_write(qp->ndev, spad, 0); in ntb_transport_setup_qp_msi()
736 ntb_spad_write(qp->ndev, spad + 1, 0); in ntb_transport_setup_qp_msi()
738 if (!qp->msi_irq) { in ntb_transport_setup_qp_msi()
739 qp->msi_irq = ntbm_msi_request_irq(qp->ndev, ntb_transport_isr, in ntb_transport_setup_qp_msi()
740 KBUILD_MODNAME, qp, in ntb_transport_setup_qp_msi()
741 &qp->msi_desc); in ntb_transport_setup_qp_msi()
742 if (qp->msi_irq < 0) { in ntb_transport_setup_qp_msi()
743 dev_warn(&qp->ndev->pdev->dev, in ntb_transport_setup_qp_msi()
750 rc = ntb_spad_write(qp->ndev, spad, qp->msi_desc.addr_offset); in ntb_transport_setup_qp_msi()
754 rc = ntb_spad_write(qp->ndev, spad + 1, qp->msi_desc.data); in ntb_transport_setup_qp_msi()
758 dev_dbg(&qp->ndev->pdev->dev, "QP%d MSI %d addr=%x data=%x\n", in ntb_transport_setup_qp_msi()
759 qp_num, qp->msi_irq, qp->msi_desc.addr_offset, in ntb_transport_setup_qp_msi()
760 qp->msi_desc.data); in ntb_transport_setup_qp_msi()
765 devm_free_irq(&nt->ndev->dev, qp->msi_irq, qp); in ntb_transport_setup_qp_msi()
912 static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp) in ntb_qp_link_down_reset() argument
914 qp->link_is_up = false; in ntb_qp_link_down_reset()
915 qp->active = false; in ntb_qp_link_down_reset()
917 qp->tx_index = 0; in ntb_qp_link_down_reset()
918 qp->rx_index = 0; in ntb_qp_link_down_reset()
919 qp->rx_bytes = 0; in ntb_qp_link_down_reset()
920 qp->rx_pkts = 0; in ntb_qp_link_down_reset()
921 qp->rx_ring_empty = 0; in ntb_qp_link_down_reset()
922 qp->rx_err_no_buf = 0; in ntb_qp_link_down_reset()
923 qp->rx_err_oflow = 0; in ntb_qp_link_down_reset()
924 qp->rx_err_ver = 0; in ntb_qp_link_down_reset()
925 qp->rx_memcpy = 0; in ntb_qp_link_down_reset()
926 qp->rx_async = 0; in ntb_qp_link_down_reset()
927 qp->tx_bytes = 0; in ntb_qp_link_down_reset()
928 qp->tx_pkts = 0; in ntb_qp_link_down_reset()
929 qp->tx_ring_full = 0; in ntb_qp_link_down_reset()
930 qp->tx_err_no_buf = 0; in ntb_qp_link_down_reset()
931 qp->tx_memcpy = 0; in ntb_qp_link_down_reset()
932 qp->tx_async = 0; in ntb_qp_link_down_reset()
935 static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp) in ntb_qp_link_cleanup() argument
937 struct ntb_transport_ctx *nt = qp->transport; in ntb_qp_link_cleanup()
940 dev_info(&pdev->dev, "qp %d: Link Cleanup\n", qp->qp_num); in ntb_qp_link_cleanup()
942 cancel_delayed_work_sync(&qp->link_work); in ntb_qp_link_cleanup()
943 ntb_qp_link_down_reset(qp); in ntb_qp_link_cleanup()
945 if (qp->event_handler) in ntb_qp_link_cleanup()
946 qp->event_handler(qp->cb_data, qp->link_is_up); in ntb_qp_link_cleanup()
951 struct ntb_transport_qp *qp = container_of(work, in ntb_qp_link_cleanup_work() local
954 struct ntb_transport_ctx *nt = qp->transport; in ntb_qp_link_cleanup_work()
956 ntb_qp_link_cleanup(qp); in ntb_qp_link_cleanup_work()
959 schedule_delayed_work(&qp->link_work, in ntb_qp_link_cleanup_work()
963 static void ntb_qp_link_down(struct ntb_transport_qp *qp) in ntb_qp_link_down() argument
965 schedule_work(&qp->link_cleanup); in ntb_qp_link_down()
970 struct ntb_transport_qp *qp; in ntb_transport_link_cleanup() local
979 qp = &nt->qp_vec[i]; in ntb_transport_link_cleanup()
980 ntb_qp_link_cleanup(qp); in ntb_transport_link_cleanup()
981 cancel_work_sync(&qp->link_cleanup); in ntb_transport_link_cleanup()
982 cancel_delayed_work_sync(&qp->link_work); in ntb_transport_link_cleanup()
1097 struct ntb_transport_qp *qp = &nt->qp_vec[i]; in ntb_transport_link_work() local
1102 if (qp->client_ready) in ntb_transport_link_work()
1103 schedule_delayed_work(&qp->link_work, 0); in ntb_transport_link_work()
1124 struct ntb_transport_qp *qp = container_of(work, in ntb_qp_link_work() local
1127 struct pci_dev *pdev = qp->ndev->pdev; in ntb_qp_link_work()
1128 struct ntb_transport_ctx *nt = qp->transport; in ntb_qp_link_work()
1135 ntb_peer_spad_write(nt->ndev, PIDX, QP_LINKS, val | BIT(qp->qp_num)); in ntb_qp_link_work()
1141 if (val & BIT(qp->qp_num)) { in ntb_qp_link_work()
1142 dev_info(&pdev->dev, "qp %d: Link Up\n", qp->qp_num); in ntb_qp_link_work()
1143 qp->link_is_up = true; in ntb_qp_link_work()
1144 qp->active = true; in ntb_qp_link_work()
1146 if (qp->event_handler) in ntb_qp_link_work()
1147 qp->event_handler(qp->cb_data, qp->link_is_up); in ntb_qp_link_work()
1149 if (qp->active) in ntb_qp_link_work()
1150 tasklet_schedule(&qp->rxc_db_work); in ntb_qp_link_work()
1152 schedule_delayed_work(&qp->link_work, in ntb_qp_link_work()
1159 struct ntb_transport_qp *qp; in ntb_transport_init_queue() local
1171 qp = &nt->qp_vec[qp_num]; in ntb_transport_init_queue()
1172 qp->qp_num = qp_num; in ntb_transport_init_queue()
1173 qp->transport = nt; in ntb_transport_init_queue()
1174 qp->ndev = nt->ndev; in ntb_transport_init_queue()
1175 qp->client_ready = false; in ntb_transport_init_queue()
1176 qp->event_handler = NULL; in ntb_transport_init_queue()
1177 ntb_qp_link_down_reset(qp); in ntb_transport_init_queue()
1193 qp->tx_mw_size = tx_size; in ntb_transport_init_queue()
1194 qp->tx_mw = nt->mw_vec[mw_num].vbase + qp_offset; in ntb_transport_init_queue()
1195 if (!qp->tx_mw) in ntb_transport_init_queue()
1198 qp->tx_mw_phys = mw_base + qp_offset; in ntb_transport_init_queue()
1199 if (!qp->tx_mw_phys) in ntb_transport_init_queue()
1203 qp->rx_info = qp->tx_mw + tx_size; in ntb_transport_init_queue()
1206 qp->tx_max_frame = min(transport_mtu, tx_size / 2); in ntb_transport_init_queue()
1207 qp->tx_max_entry = tx_size / qp->tx_max_frame; in ntb_transport_init_queue()
1213 qp->debugfs_dir = debugfs_create_dir(debugfs_name, in ntb_transport_init_queue()
1216 qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR, in ntb_transport_init_queue()
1217 qp->debugfs_dir, qp, in ntb_transport_init_queue()
1220 qp->debugfs_dir = NULL; in ntb_transport_init_queue()
1221 qp->debugfs_stats = NULL; in ntb_transport_init_queue()
1224 INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work); in ntb_transport_init_queue()
1225 INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work); in ntb_transport_init_queue()
1227 spin_lock_init(&qp->ntb_rx_q_lock); in ntb_transport_init_queue()
1228 spin_lock_init(&qp->ntb_tx_free_q_lock); in ntb_transport_init_queue()
1230 INIT_LIST_HEAD(&qp->rx_post_q); in ntb_transport_init_queue()
1231 INIT_LIST_HEAD(&qp->rx_pend_q); in ntb_transport_init_queue()
1232 INIT_LIST_HEAD(&qp->rx_free_q); in ntb_transport_init_queue()
1233 INIT_LIST_HEAD(&qp->tx_free_q); in ntb_transport_init_queue()
1235 tasklet_init(&qp->rxc_db_work, ntb_transport_rxc_db, in ntb_transport_init_queue()
1236 (unsigned long)qp); in ntb_transport_init_queue()
1404 struct ntb_transport_qp *qp; in ntb_transport_free() local
1416 qp = &nt->qp_vec[i]; in ntb_transport_free()
1418 ntb_transport_free_queue(qp); in ntb_transport_free()
1419 debugfs_remove_recursive(qp->debugfs_dir); in ntb_transport_free()
1437 static void ntb_complete_rxc(struct ntb_transport_qp *qp) in ntb_complete_rxc() argument
1444 spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags); in ntb_complete_rxc()
1446 while (!list_empty(&qp->rx_post_q)) { in ntb_complete_rxc()
1447 entry = list_first_entry(&qp->rx_post_q, in ntb_complete_rxc()
1453 iowrite32(entry->rx_index, &qp->rx_info->entry); in ntb_complete_rxc()
1458 list_move_tail(&entry->entry, &qp->rx_free_q); in ntb_complete_rxc()
1460 spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags); in ntb_complete_rxc()
1462 if (qp->rx_handler && qp->client_ready) in ntb_complete_rxc()
1463 qp->rx_handler(qp, qp->cb_data, cb_data, len); in ntb_complete_rxc()
1465 spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags); in ntb_complete_rxc()
1468 spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags); in ntb_complete_rxc()
1487 struct ntb_transport_qp *qp = entry->qp; in ntb_rx_copy_callback() local
1488 void *offset = qp->rx_buff + qp->rx_max_frame * in ntb_rx_copy_callback()
1489 qp->rx_index; in ntb_rx_copy_callback()
1492 qp->rx_memcpy++; in ntb_rx_copy_callback()
1504 ntb_complete_rxc(entry->qp); in ntb_rx_copy_callback()
1523 struct ntb_transport_qp *qp = entry->qp; in ntb_async_rx_submit() local
1524 struct dma_chan *chan = qp->rx_dma_chan; in ntb_async_rx_submit()
1574 qp->last_cookie = cookie; in ntb_async_rx_submit()
1576 qp->rx_async++; in ntb_async_rx_submit()
1590 struct ntb_transport_qp *qp = entry->qp; in ntb_async_rx() local
1591 struct dma_chan *chan = qp->rx_dma_chan; in ntb_async_rx()
1605 qp->rx_async++; in ntb_async_rx()
1611 qp->rx_memcpy++; in ntb_async_rx()
1614 static int ntb_process_rxc(struct ntb_transport_qp *qp) in ntb_process_rxc() argument
1620 offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index; in ntb_process_rxc()
1621 hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header); in ntb_process_rxc()
1623 dev_dbg(&qp->ndev->pdev->dev, "qp %d: RX ver %u len %d flags %x\n", in ntb_process_rxc()
1624 qp->qp_num, hdr->ver, hdr->len, hdr->flags); in ntb_process_rxc()
1627 dev_dbg(&qp->ndev->pdev->dev, "done flag not set\n"); in ntb_process_rxc()
1628 qp->rx_ring_empty++; in ntb_process_rxc()
1633 dev_dbg(&qp->ndev->pdev->dev, "link down flag set\n"); in ntb_process_rxc()
1634 ntb_qp_link_down(qp); in ntb_process_rxc()
1639 if (hdr->ver != (u32)qp->rx_pkts) { in ntb_process_rxc()
1640 dev_dbg(&qp->ndev->pdev->dev, in ntb_process_rxc()
1642 qp->rx_pkts, hdr->ver); in ntb_process_rxc()
1643 qp->rx_err_ver++; in ntb_process_rxc()
1647 entry = ntb_list_mv(&qp->ntb_rx_q_lock, &qp->rx_pend_q, &qp->rx_post_q); in ntb_process_rxc()
1649 dev_dbg(&qp->ndev->pdev->dev, "no receive buffer\n"); in ntb_process_rxc()
1650 qp->rx_err_no_buf++; in ntb_process_rxc()
1655 entry->rx_index = qp->rx_index; in ntb_process_rxc()
1658 dev_dbg(&qp->ndev->pdev->dev, in ntb_process_rxc()
1661 qp->rx_err_oflow++; in ntb_process_rxc()
1666 ntb_complete_rxc(qp); in ntb_process_rxc()
1668 dev_dbg(&qp->ndev->pdev->dev, in ntb_process_rxc()
1670 qp->rx_index, hdr->ver, hdr->len, entry->len); in ntb_process_rxc()
1672 qp->rx_bytes += hdr->len; in ntb_process_rxc()
1673 qp->rx_pkts++; in ntb_process_rxc()
1680 qp->rx_index++; in ntb_process_rxc()
1681 qp->rx_index %= qp->rx_max_entry; in ntb_process_rxc()
1688 struct ntb_transport_qp *qp = (void *)data; in ntb_transport_rxc_db() local
1691 dev_dbg(&qp->ndev->pdev->dev, "%s: doorbell %d received\n", in ntb_transport_rxc_db()
1692 __func__, qp->qp_num); in ntb_transport_rxc_db()
1697 for (i = 0; i < qp->rx_max_entry; i++) { in ntb_transport_rxc_db()
1698 rc = ntb_process_rxc(qp); in ntb_transport_rxc_db()
1703 if (i && qp->rx_dma_chan) in ntb_transport_rxc_db()
1704 dma_async_issue_pending(qp->rx_dma_chan); in ntb_transport_rxc_db()
1706 if (i == qp->rx_max_entry) { in ntb_transport_rxc_db()
1708 if (qp->active) in ntb_transport_rxc_db()
1709 tasklet_schedule(&qp->rxc_db_work); in ntb_transport_rxc_db()
1710 } else if (ntb_db_read(qp->ndev) & BIT_ULL(qp->qp_num)) { in ntb_transport_rxc_db()
1712 ntb_db_clear(qp->ndev, BIT_ULL(qp->qp_num)); in ntb_transport_rxc_db()
1714 ntb_db_read(qp->ndev); in ntb_transport_rxc_db()
1720 if (qp->active) in ntb_transport_rxc_db()
1721 tasklet_schedule(&qp->rxc_db_work); in ntb_transport_rxc_db()
1729 struct ntb_transport_qp *qp = entry->qp; in ntb_tx_copy_callback() local
1744 qp->tx_mw + qp->tx_max_frame * in ntb_tx_copy_callback()
1749 qp->tx_memcpy++; in ntb_tx_copy_callback()
1761 if (qp->use_msi) in ntb_tx_copy_callback()
1762 ntb_msi_peer_trigger(qp->ndev, PIDX, &qp->peer_msi_desc); in ntb_tx_copy_callback()
1764 ntb_peer_db_set(qp->ndev, BIT_ULL(qp->qp_num)); in ntb_tx_copy_callback()
1771 qp->tx_bytes += entry->len; in ntb_tx_copy_callback()
1773 if (qp->tx_handler) in ntb_tx_copy_callback()
1774 qp->tx_handler(qp, qp->cb_data, entry->cb_data, in ntb_tx_copy_callback()
1778 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, &qp->tx_free_q); in ntb_tx_copy_callback()
1799 static int ntb_async_tx_submit(struct ntb_transport_qp *qp, in ntb_async_tx_submit() argument
1803 struct dma_chan *chan = qp->tx_dma_chan; in ntb_async_tx_submit()
1813 dest = qp->tx_mw_dma_addr + qp->tx_max_frame * entry->tx_index; in ntb_async_tx_submit()
1858 static void ntb_async_tx(struct ntb_transport_qp *qp, in ntb_async_tx() argument
1862 struct dma_chan *chan = qp->tx_dma_chan; in ntb_async_tx()
1866 entry->tx_index = qp->tx_index; in ntb_async_tx()
1867 offset = qp->tx_mw + qp->tx_max_frame * entry->tx_index; in ntb_async_tx()
1868 hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header); in ntb_async_tx()
1872 iowrite32((u32)qp->tx_pkts, &hdr->ver); in ntb_async_tx()
1880 res = ntb_async_tx_submit(qp, entry); in ntb_async_tx()
1885 qp->tx_async++; in ntb_async_tx()
1891 qp->tx_memcpy++; in ntb_async_tx()
1894 static int ntb_process_tx(struct ntb_transport_qp *qp, in ntb_process_tx() argument
1897 if (qp->tx_index == qp->remote_rx_info->entry) { in ntb_process_tx()
1898 qp->tx_ring_full++; in ntb_process_tx()
1902 if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) { in ntb_process_tx()
1903 if (qp->tx_handler) in ntb_process_tx()
1904 qp->tx_handler(qp, qp->cb_data, NULL, -EIO); in ntb_process_tx()
1906 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, in ntb_process_tx()
1907 &qp->tx_free_q); in ntb_process_tx()
1911 ntb_async_tx(qp, entry); in ntb_process_tx()
1913 qp->tx_index++; in ntb_process_tx()
1914 qp->tx_index %= qp->tx_max_entry; in ntb_process_tx()
1916 qp->tx_pkts++; in ntb_process_tx()
1921 static void ntb_send_link_down(struct ntb_transport_qp *qp) in ntb_send_link_down() argument
1923 struct pci_dev *pdev = qp->ndev->pdev; in ntb_send_link_down()
1927 if (!qp->link_is_up) in ntb_send_link_down()
1930 dev_info(&pdev->dev, "qp %d: Send Link Down\n", qp->qp_num); in ntb_send_link_down()
1933 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q); in ntb_send_link_down()
1947 rc = ntb_process_tx(qp, entry); in ntb_send_link_down()
1950 qp->qp_num); in ntb_send_link_down()
1952 ntb_qp_link_down_reset(qp); in ntb_send_link_down()
1982 struct ntb_transport_qp *qp; in ntb_transport_create_queue() local
2002 qp = &nt->qp_vec[free_queue]; in ntb_transport_create_queue()
2003 qp_bit = BIT_ULL(qp->qp_num); in ntb_transport_create_queue()
2007 qp->cb_data = data; in ntb_transport_create_queue()
2008 qp->rx_handler = handlers->rx_handler; in ntb_transport_create_queue()
2009 qp->tx_handler = handlers->tx_handler; in ntb_transport_create_queue()
2010 qp->event_handler = handlers->event_handler; in ntb_transport_create_queue()
2016 qp->tx_dma_chan = in ntb_transport_create_queue()
2019 if (!qp->tx_dma_chan) in ntb_transport_create_queue()
2022 qp->rx_dma_chan = in ntb_transport_create_queue()
2025 if (!qp->rx_dma_chan) in ntb_transport_create_queue()
2028 qp->tx_dma_chan = NULL; in ntb_transport_create_queue()
2029 qp->rx_dma_chan = NULL; in ntb_transport_create_queue()
2032 qp->tx_mw_dma_addr = 0; in ntb_transport_create_queue()
2033 if (qp->tx_dma_chan) { in ntb_transport_create_queue()
2034 qp->tx_mw_dma_addr = in ntb_transport_create_queue()
2035 dma_map_resource(qp->tx_dma_chan->device->dev, in ntb_transport_create_queue()
2036 qp->tx_mw_phys, qp->tx_mw_size, in ntb_transport_create_queue()
2038 if (dma_mapping_error(qp->tx_dma_chan->device->dev, in ntb_transport_create_queue()
2039 qp->tx_mw_dma_addr)) { in ntb_transport_create_queue()
2040 qp->tx_mw_dma_addr = 0; in ntb_transport_create_queue()
2046 qp->tx_dma_chan ? "DMA" : "CPU"); in ntb_transport_create_queue()
2049 qp->rx_dma_chan ? "DMA" : "CPU"); in ntb_transport_create_queue()
2056 entry->qp = qp; in ntb_transport_create_queue()
2057 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, in ntb_transport_create_queue()
2058 &qp->rx_free_q); in ntb_transport_create_queue()
2060 qp->rx_alloc_entry = NTB_QP_DEF_NUM_ENTRIES; in ntb_transport_create_queue()
2062 for (i = 0; i < qp->tx_max_entry; i++) { in ntb_transport_create_queue()
2067 entry->qp = qp; in ntb_transport_create_queue()
2068 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, in ntb_transport_create_queue()
2069 &qp->tx_free_q); in ntb_transport_create_queue()
2072 ntb_db_clear(qp->ndev, qp_bit); in ntb_transport_create_queue()
2073 ntb_db_clear_mask(qp->ndev, qp_bit); in ntb_transport_create_queue()
2075 dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num); in ntb_transport_create_queue()
2077 return qp; in ntb_transport_create_queue()
2080 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) in ntb_transport_create_queue()
2083 qp->rx_alloc_entry = 0; in ntb_transport_create_queue()
2084 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q))) in ntb_transport_create_queue()
2086 if (qp->tx_mw_dma_addr) in ntb_transport_create_queue()
2087 dma_unmap_resource(qp->tx_dma_chan->device->dev, in ntb_transport_create_queue()
2088 qp->tx_mw_dma_addr, qp->tx_mw_size, in ntb_transport_create_queue()
2090 if (qp->tx_dma_chan) in ntb_transport_create_queue()
2091 dma_release_channel(qp->tx_dma_chan); in ntb_transport_create_queue()
2092 if (qp->rx_dma_chan) in ntb_transport_create_queue()
2093 dma_release_channel(qp->rx_dma_chan); in ntb_transport_create_queue()
2106 void ntb_transport_free_queue(struct ntb_transport_qp *qp) in ntb_transport_free_queue() argument
2112 if (!qp) in ntb_transport_free_queue()
2115 pdev = qp->ndev->pdev; in ntb_transport_free_queue()
2117 qp->active = false; in ntb_transport_free_queue()
2119 if (qp->tx_dma_chan) { in ntb_transport_free_queue()
2120 struct dma_chan *chan = qp->tx_dma_chan; in ntb_transport_free_queue()
2124 qp->tx_dma_chan = NULL; in ntb_transport_free_queue()
2129 dma_sync_wait(chan, qp->last_cookie); in ntb_transport_free_queue()
2133 qp->tx_mw_dma_addr, qp->tx_mw_size, in ntb_transport_free_queue()
2139 if (qp->rx_dma_chan) { in ntb_transport_free_queue()
2140 struct dma_chan *chan = qp->rx_dma_chan; in ntb_transport_free_queue()
2144 qp->rx_dma_chan = NULL; in ntb_transport_free_queue()
2149 dma_sync_wait(chan, qp->last_cookie); in ntb_transport_free_queue()
2154 qp_bit = BIT_ULL(qp->qp_num); in ntb_transport_free_queue()
2156 ntb_db_set_mask(qp->ndev, qp_bit); in ntb_transport_free_queue()
2157 tasklet_kill(&qp->rxc_db_work); in ntb_transport_free_queue()
2159 cancel_delayed_work_sync(&qp->link_work); in ntb_transport_free_queue()
2161 qp->cb_data = NULL; in ntb_transport_free_queue()
2162 qp->rx_handler = NULL; in ntb_transport_free_queue()
2163 qp->tx_handler = NULL; in ntb_transport_free_queue()
2164 qp->event_handler = NULL; in ntb_transport_free_queue()
2166 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q))) in ntb_transport_free_queue()
2169 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q))) { in ntb_transport_free_queue()
2174 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_post_q))) { in ntb_transport_free_queue()
2179 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) in ntb_transport_free_queue()
2182 qp->transport->qp_bitmap_free |= qp_bit; in ntb_transport_free_queue()
2184 dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num); in ntb_transport_free_queue()
2198 void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len) in ntb_transport_rx_remove() argument
2203 if (!qp || qp->client_ready) in ntb_transport_rx_remove()
2206 entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q); in ntb_transport_rx_remove()
2213 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_free_q); in ntb_transport_rx_remove()
2231 int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, in ntb_transport_rx_enqueue() argument
2236 if (!qp) in ntb_transport_rx_enqueue()
2239 entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q); in ntb_transport_rx_enqueue()
2251 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_pend_q); in ntb_transport_rx_enqueue()
2253 if (qp->active) in ntb_transport_rx_enqueue()
2254 tasklet_schedule(&qp->rxc_db_work); in ntb_transport_rx_enqueue()
2273 int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, in ntb_transport_tx_enqueue() argument
2279 if (!qp || !qp->link_is_up || !len) in ntb_transport_tx_enqueue()
2282 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q); in ntb_transport_tx_enqueue()
2284 qp->tx_err_no_buf++; in ntb_transport_tx_enqueue()
2296 rc = ntb_process_tx(qp, entry); in ntb_transport_tx_enqueue()
2298 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, in ntb_transport_tx_enqueue()
2299 &qp->tx_free_q); in ntb_transport_tx_enqueue()
2311 void ntb_transport_link_up(struct ntb_transport_qp *qp) in ntb_transport_link_up() argument
2313 if (!qp) in ntb_transport_link_up()
2316 qp->client_ready = true; in ntb_transport_link_up()
2318 if (qp->transport->link_is_up) in ntb_transport_link_up()
2319 schedule_delayed_work(&qp->link_work, 0); in ntb_transport_link_up()
2331 void ntb_transport_link_down(struct ntb_transport_qp *qp) in ntb_transport_link_down() argument
2335 if (!qp) in ntb_transport_link_down()
2338 qp->client_ready = false; in ntb_transport_link_down()
2340 val = ntb_spad_read(qp->ndev, QP_LINKS); in ntb_transport_link_down()
2342 ntb_peer_spad_write(qp->ndev, PIDX, QP_LINKS, val & ~BIT(qp->qp_num)); in ntb_transport_link_down()
2344 if (qp->link_is_up) in ntb_transport_link_down()
2345 ntb_send_link_down(qp); in ntb_transport_link_down()
2347 cancel_delayed_work_sync(&qp->link_work); in ntb_transport_link_down()
2359 bool ntb_transport_link_query(struct ntb_transport_qp *qp) in ntb_transport_link_query() argument
2361 if (!qp) in ntb_transport_link_query()
2364 return qp->link_is_up; in ntb_transport_link_query()
2376 unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp) in ntb_transport_qp_num() argument
2378 if (!qp) in ntb_transport_qp_num()
2381 return qp->qp_num; in ntb_transport_qp_num()
2393 unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp) in ntb_transport_max_size() argument
2399 if (!qp) in ntb_transport_max_size()
2402 rx_chan = qp->rx_dma_chan; in ntb_transport_max_size()
2403 tx_chan = qp->tx_dma_chan; in ntb_transport_max_size()
2409 max_size = qp->tx_max_frame - sizeof(struct ntb_payload_header); in ntb_transport_max_size()
2416 unsigned int ntb_transport_tx_free_entry(struct ntb_transport_qp *qp) in ntb_transport_tx_free_entry() argument
2418 unsigned int head = qp->tx_index; in ntb_transport_tx_free_entry()
2419 unsigned int tail = qp->remote_rx_info->entry; in ntb_transport_tx_free_entry()
2421 return tail > head ? tail - head : qp->tx_max_entry + tail - head; in ntb_transport_tx_free_entry()
2428 struct ntb_transport_qp *qp; in ntb_transport_doorbell_callback() local
2442 qp = &nt->qp_vec[qp_num]; in ntb_transport_doorbell_callback()
2444 if (qp->active) in ntb_transport_doorbell_callback()
2445 tasklet_schedule(&qp->rxc_db_work); in ntb_transport_doorbell_callback()