| /drivers/net/ethernet/cavium/liquidio/ |
| A D | cn66xx_device.c | 89 r64 = lio_pci_readq(oct, CN6XXX_DPI_SLI_PRTX_CFG(oct->pcie_port)); in lio_cn6xxx_setup_pcie_mps() 91 lio_pci_writeq(oct, r64, CN6XXX_DPI_SLI_PRTX_CFG(oct->pcie_port)); in lio_cn6xxx_setup_pcie_mps() 112 r64 = octeon_read_csr64(oct, CN6XXX_SLI_S2M_PORTX_CTL(oct->pcie_port)); in lio_cn6xxx_setup_pcie_mrrs() 114 octeon_write_csr64(oct, CN6XXX_SLI_S2M_PORTX_CTL(oct->pcie_port), r64); in lio_cn6xxx_setup_pcie_mrrs() 117 r64 = lio_pci_readq(oct, CN6XXX_DPI_SLI_PRTX_CFG(oct->pcie_port)); in lio_cn6xxx_setup_pcie_mrrs() 119 lio_pci_writeq(oct, r64, CN6XXX_DPI_SLI_PRTX_CFG(oct->pcie_port)); in lio_cn6xxx_setup_pcie_mrrs() 420 bar1 = lio_pci_readq(oct, CN6XXX_BAR1_REG(idx, oct->pcie_port)); in lio_cn6xxx_bar1_idx_setup() 423 bar1 = lio_pci_readq(oct, CN6XXX_BAR1_REG(idx, oct->pcie_port)); in lio_cn6xxx_bar1_idx_setup() 433 bar1 = lio_pci_readq(oct, CN6XXX_BAR1_REG(idx, oct->pcie_port)); in lio_cn6xxx_bar1_idx_setup() 440 lio_pci_writeq(oct, mask, CN6XXX_BAR1_REG(idx, oct->pcie_port)); in lio_cn6xxx_bar1_idx_write() [all …]
|
| A D | cn23xx_pf_device.c | 44 oct->octeon_id); in cn23xx_pf_soft_reset() 57 oct->octeon_id); in cn23xx_pf_soft_reset() 62 oct->octeon_id); in cn23xx_pf_soft_reset() 96 oct->octeon_id); in cn23xx_enable_error_reporting() 652 oct, in cn23xx_enable_io_queues() 879 oct, CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx)); in cn23xx_bar1_idx_setup() 884 oct, CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx)); in cn23xx_bar1_idx_setup() 896 oct, CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx))); in cn23xx_bar1_idx_setup() 908 oct, CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx)); in cn23xx_bar1_idx_read() 1129 oct->sriov_info.trs, oct->sriov_info.max_vfs, in cn23xx_sriov_config() [all …]
|
| A D | cn68xx_device.c | 57 lio_pci_readq(oct, CN6XXX_DPI_CTL)); in lio_cn68xx_set_dpi_regs() 62 lio_cn6xxx_soft_reset(oct); in lio_cn68xx_soft_reset() 63 lio_cn68xx_set_dpi_regs(oct); in lio_cn68xx_soft_reset() 97 lio_cn68xx_setup_pkt_ctl_regs(oct); in lio_cn68xx_setup_device_regs() 130 if (octeon_map_pci_barx(oct, 0, 0)) in lio_setup_cn68xx_octeon_device() 136 octeon_unmap_pci_barx(oct, 0); in lio_setup_cn68xx_octeon_device() 160 lio_cn6xxx_setup_reg_address(oct, oct->chip, &oct->reg_list); in lio_setup_cn68xx_octeon_device() 163 if (lio_is_210nv(oct)) in lio_setup_cn68xx_octeon_device() 173 octeon_unmap_pci_barx(oct, 0); in lio_setup_cn68xx_octeon_device() 174 octeon_unmap_pci_barx(oct, 1); in lio_setup_cn68xx_octeon_device() [all …]
|
| A D | octeon_device.c | 661 vfree(oct); in octeon_free_device_mem() 717 return oct; in octeon_allocate_device_mem() 734 if (oct) { in octeon_allocate_device() 741 if (!oct) in octeon_allocate_device() 748 snprintf(oct->device_name, sizeof(oct->device_name), in octeon_allocate_device() 807 oct->loc.bus, oct->loc.dev, oct->loc.func, refcount); in octeon_register_device() 825 oct->loc.bus, oct->loc.dev, oct->loc.func, refcount); in octeon_deregister_device() 939 oct->droq[0] = vzalloc(sizeof(*oct->droq[0])); in octeon_setup_output_queues() 1285 if (oct && (q_no < MAX_OCTEON_INSTR_QUEUES(oct)) && in octeon_get_tx_qsize() 1295 if (oct && (q_no < MAX_OCTEON_OUTPUT_QUEUES(oct)) && in octeon_get_rx_qsize() [all …]
|
| A D | cn23xx_vf_device.c | 59 d64 = octeon_read_csr64(oct, in cn23xx_vf_reset_io_queues() 78 dev_err(&oct->pci_dev->dev, in cn23xx_vf_reset_io_queues() 91 dev_err(&oct->pci_dev->dev, in cn23xx_vf_reset_io_queues() 108 if (cn23xx_vf_reset_io_queues(oct, oct->sriov_info.rings_per_vf)) in cn23xx_vf_setup_global_input_regs() 237 if (oct->msix_on) { in cn23xx_setup_vf_iq_regs() 274 vfree(oct->mbox[0]); in cn23xx_free_vf_mbox() 288 mbox->oct_dev = oct; in cn23xx_setup_vf_mbox() 309 oct->mbox[0] = mbox; in cn23xx_setup_vf_mbox() 450 oct->instr_queue[q_no]->txpciq.s.pkind = oct->pfvf_hsword.pkind; in cn23xx_octeon_pfvf_handshake() 466 oct->pfvf_hsword.pkind); in cn23xx_octeon_pfvf_handshake() [all …]
|
| A D | octeon_console.c | 206 oct, oct->bootmem_desc_addr, in __cvmx_bootmem_check_version() 210 oct, oct->bootmem_desc_addr, in __cvmx_bootmem_check_version() 237 oct, named_addr, in __cvmx_bootmem_find_named_block_flags() 267 oct, oct->bootmem_desc_addr, in cvmx_bootmem_phy_named_block_find() 273 oct, oct->bootmem_desc_addr, in cvmx_bootmem_phy_named_block_find() 280 oct, oct->bootmem_desc_addr, in cvmx_bootmem_phy_named_block_find() 290 oct, named_addr, in cvmx_bootmem_phy_named_block_find() 557 oct->fn_list.bar1_idx_setup(oct, addr, oct->console_nb_info.bar1_index, in octeon_init_consoles() 565 oct->num_consoles = octeon_read_device_mem32(oct, in octeon_init_consoles() 571 oct->num_consoles); in octeon_init_consoles() [all …]
|
| A D | request_manager.c | 58 if (OCTEON_CN6XXX(oct)) in octeon_init_instr_queue() 67 oct->chip_id); in octeon_init_instr_queue() 75 iq->oct_dev = oct; in octeon_init_instr_queue() 132 oct->fn_list.setup_iq_regs(oct, iq_no); in octeon_init_instr_queue() 183 oct->num_iqs--; in octeon_delete_instr_queue() 227 oct->num_iqs++; in octeon_setup_iq() 228 if (oct->fn_list.enable_io_queues(oct)) { in octeon_setup_iq() 498 if (!oct) in __check_db_timeout() 597 if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) { in octeon_prepare_soft_command() 701 if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) { in octeon_send_soft_command() [all …]
|
| A D | lio_vf_main.c | 180 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); in stop_pci_io() 228 stop_pci_io(oct); in liquidio_pcie_error_detected() 473 oct->fn_list.disable_io_queues(oct); in octeon_destroy_resources() 506 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); in octeon_destroy_resources() 538 oct->fn_list.free_mbox(oct); in octeon_destroy_resources() 2318 if (oct->fn_list.setup_device_regs(oct)) { in octeon_device_init() 2352 if (oct->fn_list.setup_mbox(oct)) { in octeon_device_init() 2368 if (octeon_setup_interrupt(oct, oct->sriov_info.rings_per_vf)) in octeon_device_init() 2381 oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR); in octeon_device_init() 2391 oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR); in octeon_device_init() [all …]
|
| A D | lio_main.c | 168 if (OCTEON_CN23XX_PF(oct) && oct->msix_on) { in octeon_droq_bh() 310 free_irq(oct->pci_dev->irq, oct); in stop_pci_io() 727 if (!oct) in disable_all_vf_links() 982 oct->fn_list.disable_io_queues(oct); in octeon_destroy_resources() 1038 free_irq(oct->pci_dev->irq, oct); in octeon_destroy_resources() 1055 oct->fn_list.free_mbox(oct); in octeon_destroy_resources() 1117 oct->fn_list.soft_reset(oct); in octeon_destroy_resources() 1789 if (!OCTEON_CN23XX_PF(oct) || !oct->msix_on) { in liquidio_open() 1819 if (!OCTEON_CN23XX_PF(oct) || !oct->msix_on) in liquidio_open() 3128 oct = priv->oct; in liquidio_eswitch_mode_get() [all …]
|
| A D | lio_vf_rep.c | 113 struct octeon_device *oct; in lio_vf_rep_open() local 116 oct = vf_rep->oct; in lio_vf_rep_open() 146 struct octeon_device *oct; in lio_vf_rep_stop() local 149 oct = vf_rep->oct; in lio_vf_rep_stop() 206 oct = vf_rep->oct; in lio_vf_rep_change_mtu() 231 struct octeon_device *oct = vf_rep->oct; in lio_vf_rep_phys_port_name() local 304 if (!oct) in lio_vf_rep_pkt_recv() 373 struct octeon_device *oct = vf_rep->oct; in lio_vf_rep_pkt_xmit() local 470 oct = vf_rep->oct; in lio_vf_rep_fetch_stats() 521 vf_rep->oct = oct; in lio_vf_rep_create() [all …]
|
| A D | lio_core.c | 128 lio_dma_alloc(oct, in lio_setup_glists() 737 if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct) || in liquidio_napi_drv_callback() 996 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); in liquidio_legacy_intr_handler() 998 ret = oct->fn_list.process_interrupt_regs(oct); in liquidio_legacy_intr_handler() 1005 oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR); in liquidio_legacy_intr_handler() 1054 oct->msix_entries = kcalloc(oct->num_msix_irqs, in octeon_setup_interrupt() 1123 oct->octeon_id, oct->pf_num, i); in octeon_setup_interrupt() 1128 oct->octeon_id, oct->vf_num, i); in octeon_setup_interrupt() 1141 oct); in octeon_setup_interrupt() 1185 oct->octeon_id, oct->pf_num, 0); in octeon_setup_interrupt() [all …]
|
| A D | octeon_mem_ops.c | 35 mask = oct->fn_list.bar1_idx_read(oct, idx); in octeon_toggle_bar1_swapmode() 37 oct->fn_list.bar1_idx_write(oct, idx, mask); in octeon_toggle_bar1_swapmode() 40 #define octeon_toggle_bar1_swapmode(oct, idx) argument 52 octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX); in octeon_pci_fastwrite() 61 octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX); in octeon_pci_fastwrite() 76 octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX); in octeon_pci_fastread() 108 mapped_addr = oct->mmio[1].hw_addr in __octeon_pci_rw_core_mem() 123 index_reg_val = oct->fn_list.bar1_idx_read(oct, MEMOPS_IDX); in __octeon_pci_rw_core_mem() 125 oct->fn_list.bar1_idx_setup(oct, addr, MEMOPS_IDX, 1); in __octeon_pci_rw_core_mem() 126 mapped_addr = oct->mmio[1].hw_addr in __octeon_pci_rw_core_mem() [all …]
|
| A D | lio_ethtool.c | 424 oct->speed_boot == oct->speed_setting) in lio_set_link_ksettings() 1109 oct->fn_list.disable_io_queues(oct); in lio_reset_queues() 1150 oct->fn_list.free_mbox(oct); in lio_reset_queues() 1180 if (oct->fn_list.setup_device_regs(oct)) { in lio_reset_queues() 1197 if (oct->fn_list.setup_mbox(oct)) { in lio_reset_queues() 1212 if (oct->fn_list.enable_io_queues(oct)) { in lio_reset_queues() 2556 reg, oct->pcie_port, oct->pf_num, in cn23xx_read_csr_reg() 2563 reg, oct->pcie_port, oct->pf_num, in cn23xx_read_csr_reg() 2570 reg, oct->pcie_port, oct->pf_num, in cn23xx_read_csr_reg() 2583 oct->pcie_port, oct->pf_num, (u64)octeon_read_csr64(oct, reg)); in cn23xx_read_csr_reg() [all …]
|
| A D | octeon_droq.c | 209 vfree(oct->droq[q_no]); in octeon_delete_droq() 210 oct->droq[q_no] = NULL; in octeon_delete_droq() 211 oct->num_oqs--; in octeon_delete_droq() 233 droq->oct_dev = oct; in octeon_init_droq() 301 oct->fn_list.setup_oq_regs(oct, q_no); in octeon_init_droq() 762 (oct, in octeon_droq_process_packets() 813 (oct, in octeon_droq_process_poll_pkts() 849 lio_enable_irq(oct->droq[q_no], oct->instr_queue[q_no]); in octeon_enable_irq() 853 lio_enable_irq(oct->droq[q_no], oct->instr_queue[q_no]); in octeon_enable_irq() 957 oct->num_oqs++; in octeon_create_droq() [all …]
|
| A D | octeon_main.h | 98 if (oct->mmio[baridx].done) in octeon_unmap_pci_barx() 99 iounmap(oct->mmio[baridx].hw_addr); in octeon_unmap_pci_barx() 101 if (oct->mmio[baridx].start) in octeon_unmap_pci_barx() 122 oct->mmio[baridx].start = pci_resource_start(oct->pci_dev, baridx * 2); in octeon_map_pci_barx() 123 oct->mmio[baridx].len = pci_resource_len(oct->pci_dev, baridx * 2); in octeon_map_pci_barx() 125 mapped_len = oct->mmio[baridx].len; in octeon_map_pci_barx() 132 oct->mmio[baridx].hw_addr = in octeon_map_pci_barx() 134 oct->mmio[baridx].mapped_len = mapped_len; in octeon_map_pci_barx() 138 oct->mmio[baridx].len); in octeon_map_pci_barx() 140 if (!oct->mmio[baridx].hw_addr) { in octeon_map_pci_barx() [all …]
|
| /drivers/net/ethernet/marvell/octeon_ep/ |
| A D | octep_cn9k_pf.c | 161 cn93_reset_iq(oct, q); in octep_reset_io_queues_cn93_pf() 162 cn93_reset_oq(oct, q); in octep_reset_io_queues_cn93_pf() 188 if (oct->pcie_port) in octep_configure_ring_mapping_cn93_pf() 218 val = octep_read_csr64(oct, CN93_SDP_MAC_PF_RING_CTL(oct->pcie_port)); in octep_init_config_cn93_pf() 656 oct->hw_ops.setup_iq_regs(oct, i); in octep_reinit_regs_cn93_pf() 659 oct->hw_ops.setup_oq_regs(oct, i); in octep_reinit_regs_cn93_pf() 661 oct->hw_ops.enable_interrupts(oct); in octep_reinit_regs_cn93_pf() 662 oct->hw_ops.enable_io_queues(oct); in octep_reinit_regs_cn93_pf() 665 writel(oct->oq[i]->max_count, oct->oq[i]->pkts_credit_reg); in octep_reinit_regs_cn93_pf() 834 cn93_dump_regs(oct, q); in octep_dump_registers_cn93_pf() [all …]
|
| A D | octep_cnxk_pf.c | 184 cnxk_reset_iq(oct, q); in octep_reset_io_queues_cnxk_pf() 185 cnxk_reset_oq(oct, q); in octep_reset_io_queues_cnxk_pf() 211 if (oct->pcie_port) in octep_configure_ring_mapping_cnxk_pf() 242 val = octep_read_csr64(oct, CNXK_SDP_MAC_PF_RING_CTL(oct->pcie_port)); in octep_init_config_cnxk_pf() 681 oct->hw_ops.setup_iq_regs(oct, i); in octep_reinit_regs_cnxk_pf() 684 oct->hw_ops.setup_oq_regs(oct, i); in octep_reinit_regs_cnxk_pf() 686 oct->hw_ops.enable_interrupts(oct); in octep_reinit_regs_cnxk_pf() 687 oct->hw_ops.enable_io_queues(oct); in octep_reinit_regs_cnxk_pf() 690 writel(oct->oq[i]->max_count, oct->oq[i]->pkts_credit_reg); in octep_reinit_regs_cnxk_pf() 857 cnxk_dump_regs(oct, q); in octep_dump_registers_cnxk_pf() [all …]
|
| A D | octep_main.c | 175 return oct->hw_ops.mbox_intr_handler(oct); in octep_mbox_intr_handler() 190 return oct->hw_ops.oei_intr_handler(oct); in octep_oei_intr_handler() 205 return oct->hw_ops.ire_intr_handler(oct); in octep_ire_intr_handler() 220 return oct->hw_ops.ore_intr_handler(oct); in octep_ore_intr_handler() 698 oct->hw_ops.reset_io_queues(oct); in octep_open() 725 oct->hw_ops.enable_io_queues(oct); in octep_open() 728 oct->hw_ops.enable_interrupts(oct); in octep_open() 775 oct->hw_ops.disable_interrupts(oct); in octep_stop() 782 oct->hw_ops.disable_io_queues(oct); in octep_stop() 783 oct->hw_ops.reset_io_queues(oct); in octep_stop() [all …]
|
| A D | octep_pfvf_mbox.c | 160 dev_err(&oct->pdev->dev, in octep_pfvf_set_mac_addr() 269 oct->mbox[ring] = vzalloc(sizeof(*oct->mbox[ring])); in octep_setup_pfvf_mbox() 271 if (!oct->mbox[ring]) in octep_setup_pfvf_mbox() 278 oct->mbox[ring]->wk.ctxptr = oct->mbox[ring]; in octep_setup_pfvf_mbox() 279 oct->mbox[ring]->oct = oct; in octep_setup_pfvf_mbox() 281 oct->hw_ops.setup_mbox_regs(oct, ring); in octep_setup_pfvf_mbox() 291 vfree(oct->mbox[ring]); in octep_setup_pfvf_mbox() 292 oct->mbox[ring] = NULL; in octep_setup_pfvf_mbox() 305 if (!oct->mbox[ring]) in octep_delete_pfvf_mbox() 312 vfree(oct->mbox[ring]); in octep_delete_pfvf_mbox() [all …]
|
| A D | octep_main.h | 95 int (*soft_reset)(struct octep_device *oct); 96 void (*reinit_regs)(struct octep_device *oct); 140 struct octep_device *oct; member 332 u16 rev = (oct->rev_id & 0xC) >> 2; in OCTEP_MAJOR_REV() 339 return (oct->rev_id & 0x3); in OCTEP_MINOR_REV() 373 dev_dbg(&oct->pdev->dev, in OCTEP_PCI_WIN_READ() 395 dev_dbg(&oct->pdev->dev, in OCTEP_PCI_WIN_WRITE() 402 int octep_setup_iqs(struct octep_device *oct); 403 void octep_free_iqs(struct octep_device *oct); 405 int octep_setup_oqs(struct octep_device *oct); [all …]
|
| A D | octep_ctrl_net.c | 104 struct pci_dev *pdev = oct->pdev; in octep_ctrl_net_init() 108 INIT_LIST_HEAD(&oct->ctrl_req_wait_list); in octep_ctrl_net_init() 111 ctrl_mbox = &oct->ctrl_mbox; in octep_ctrl_net_init() 136 err = octep_send_mbox_req(oct, &d, true); in octep_ctrl_net_get_link_status() 182 err = octep_send_mbox_req(oct, &d, true); in octep_ctrl_net_get_mac_addr() 216 err = octep_send_mbox_req(oct, &d, true); in octep_ctrl_net_get_mtu() 248 err = octep_send_mbox_req(oct, &d, true); in octep_ctrl_net_get_if_stats() 321 struct net_device *netdev = oct->netdev; in process_mbox_notify() 334 octep_pfvf_notify(oct, msg); in process_mbox_notify() 377 process_mbox_resp(oct, &msg); in octep_ctrl_net_recv_fw_messages() [all …]
|
| /drivers/net/ethernet/marvell/octeon_ep_vf/ |
| A D | octep_vf_cn9k.c | 130 cn93_vf_reset_iq(oct, q); in octep_vf_reset_io_queues_cn93() 131 cn93_vf_reset_oq(oct, q); in octep_vf_reset_io_queues_cn93() 262 if (oct->mbox) in cn93_handle_vf_mbox_intr() 272 struct octep_vf_device *oct; in octep_vf_ioq_intr_handler_cn93() local 276 oct = vector->octep_vf_dev; in octep_vf_ioq_intr_handler_cn93() 296 oct->hw_ops.setup_iq_regs(oct, i); in octep_vf_reinit_regs_cn93() 299 oct->hw_ops.setup_oq_regs(oct, i); in octep_vf_reinit_regs_cn93() 301 oct->hw_ops.enable_interrupts(oct); in octep_vf_reinit_regs_cn93() 302 oct->hw_ops.enable_io_queues(oct); in octep_vf_reinit_regs_cn93() 305 writel(oct->oq[i]->max_count, oct->oq[i]->pkts_credit_reg); in octep_vf_reinit_regs_cn93() [all …]
|
| A D | octep_vf_cnxk.c | 132 cnxk_vf_reset_iq(oct, q); in octep_vf_reset_io_queues_cnxk() 133 cnxk_vf_reset_oq(oct, q); in octep_vf_reset_io_queues_cnxk() 273 if (oct->mbox) in cnxk_handle_vf_mbox_intr() 283 struct octep_vf_device *oct; in octep_vf_ioq_intr_handler_cnxk() local 287 oct = vector->octep_vf_dev; in octep_vf_ioq_intr_handler_cnxk() 307 oct->hw_ops.setup_iq_regs(oct, i); in octep_vf_reinit_regs_cnxk() 310 oct->hw_ops.setup_oq_regs(oct, i); in octep_vf_reinit_regs_cnxk() 312 oct->hw_ops.enable_interrupts(oct); in octep_vf_reinit_regs_cnxk() 313 oct->hw_ops.enable_io_queues(oct); in octep_vf_reinit_regs_cnxk() 316 writel(oct->oq[i]->max_count, oct->oq[i]->pkts_credit_reg); in octep_vf_reinit_regs_cnxk() [all …]
|
| A D | octep_vf_main.c | 57 oct->ioq_vector[i] = vzalloc(sizeof(*oct->ioq_vector[i])); in octep_vf_alloc_ioq_vectors() 221 free_irq(oct->msix_entries[i].vector, oct); in octep_vf_request_irqs() 354 oct->oq[i]->napi = &oct->ioq_vector[i]->napi; in octep_vf_napi_add() 460 oct->hw_ops.reset_io_queues(oct); in octep_vf_open() 487 oct->hw_ops.enable_io_queues(oct); in octep_vf_open() 490 oct->hw_ops.enable_interrupts(oct); in octep_vf_open() 536 oct->hw_ops.disable_interrupts(oct); in octep_vf_stop() 543 oct->hw_ops.disable_io_queues(oct); in octep_vf_stop() 544 oct->hw_ops.reset_io_queues(oct); in octep_vf_stop() 954 oct->conf = kzalloc(sizeof(*oct->conf), GFP_KERNEL); in octep_vf_device_setup() [all …]
|
| A D | octep_vf_mbox.c | 27 oct->mbox = vzalloc(sizeof(*oct->mbox)); in octep_vf_setup_mbox() 28 if (!oct->mbox) in octep_vf_setup_mbox() 33 oct->hw_ops.setup_mbox_regs(oct, ring); in octep_vf_setup_mbox() 35 oct->mbox->wk.ctxptr = oct; in octep_vf_setup_mbox() 43 if (oct->mbox) { in octep_vf_delete_mbox() 48 vfree(oct->mbox); in octep_vf_delete_mbox() 49 oct->mbox = NULL; in octep_vf_delete_mbox() 65 dev_err(&oct->pdev->dev, in octep_vf_mbox_version_check() 70 dev_dbg(&oct->pdev->dev, in octep_vf_mbox_version_check() 88 mbox = oct->mbox; in octep_vf_mbox_work() [all …]
|