Lines Matching refs:adap
188 static int cfg_queues(struct adapter *adap);
238 struct adapter *adap = pi->adapter; in dcb_tx_queue_prio_enable() local
239 struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset]; in dcb_tx_queue_prio_enable()
259 err = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1, in dcb_tx_queue_prio_enable()
264 dev_err(adap->pdev_dev, in dcb_tx_queue_prio_enable()
306 void t4_os_portmod_changed(struct adapter *adap, int port_id) in t4_os_portmod_changed() argument
312 struct net_device *dev = adap->port[port_id]; in t4_os_portmod_changed()
351 struct adapter *adap = pi->adapter; in cxgb4_set_addr_hash() local
357 list_for_each_entry(entry, &adap->mac_hlist, list) { in cxgb4_set_addr_hash()
361 return t4_set_addr_hash(adap, adap->mbox, pi->viid, ucast, in cxgb4_set_addr_hash()
368 struct adapter *adap = pi->adapter; in cxgb4_mac_sync() local
383 ret = cxgb4_alloc_mac_filt(adap, pi->viid, free, 1, maclist, in cxgb4_mac_sync()
396 list_add_tail(&new_entry->list, &adap->mac_hlist); in cxgb4_mac_sync()
406 struct adapter *adap = pi->adapter; in cxgb4_mac_unsync() local
414 list_for_each_entry_safe(entry, tmp, &adap->mac_hlist, list) { in cxgb4_mac_unsync()
422 ret = cxgb4_free_mac_filt(adap, pi->viid, 1, maclist, false); in cxgb4_mac_unsync()
535 static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd) in dcb_rpl() argument
538 struct net_device *dev = adap->port[adap->chan_map[port]]; in dcb_rpl()
542 cxgb4_dcb_handle_fw_update(adap, pcmd); in dcb_rpl()
571 dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n" in fwevtq_handler()
582 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start]; in fwevtq_handler()
588 t4_sge_eth_txq_egress_update(q->adap, eq, -1); in fwevtq_handler()
612 dev = q->adap->port[q->adap->chan_map[port]]; in fwevtq_handler()
626 dcb_rpl(q->adap, pcmd); in fwevtq_handler()
630 t4_handle_fw_rpl(q->adap, p->data); in fwevtq_handler()
634 do_l2t_write_rpl(q->adap, p); in fwevtq_handler()
638 do_smt_write_rpl(q->adap, p); in fwevtq_handler()
642 filter_rpl(q->adap, p); in fwevtq_handler()
646 hash_filter_rpl(q->adap, p); in fwevtq_handler()
650 hash_del_filter_rpl(q->adap, p); in fwevtq_handler()
654 do_srq_table_rpl(q->adap, p); in fwevtq_handler()
656 dev_err(q->adap->pdev_dev, in fwevtq_handler()
678 struct adapter *adap = cookie; in t4_nondata_intr() local
679 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A)); in t4_nondata_intr()
682 adap->swintr = 1; in t4_nondata_intr()
683 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A), v); in t4_nondata_intr()
685 if (adap->flags & CXGB4_MASTER_PF) in t4_nondata_intr()
686 t4_slow_intr_handler(adap); in t4_nondata_intr()
690 int cxgb4_set_msix_aff(struct adapter *adap, unsigned short vec, in cxgb4_set_msix_aff() argument
696 dev_err(adap->pdev_dev, "alloc_cpumask_var failed\n"); in cxgb4_set_msix_aff()
700 cpumask_set_cpu(cpumask_local_spread(idx, dev_to_node(adap->pdev_dev)), in cxgb4_set_msix_aff()
705 dev_warn(adap->pdev_dev, in cxgb4_set_msix_aff()
718 static int request_msix_queue_irqs(struct adapter *adap) in request_msix_queue_irqs() argument
720 struct sge *s = &adap->sge; in request_msix_queue_irqs()
727 err = request_irq(adap->msix_info[s->fwevtq_msix_idx].vec, in request_msix_queue_irqs()
729 adap->msix_info[s->fwevtq_msix_idx].desc, in request_msix_queue_irqs()
743 cxgb4_set_msix_aff(adap, minfo->vec, in request_msix_queue_irqs()
754 free_irq(adap->msix_info[s->fwevtq_msix_idx].vec, &s->fw_evtq); in request_msix_queue_irqs()
758 static void free_msix_queue_irqs(struct adapter *adap) in free_msix_queue_irqs() argument
760 struct sge *s = &adap->sge; in free_msix_queue_irqs()
764 free_irq(adap->msix_info[s->fwevtq_msix_idx].vec, &s->fw_evtq); in free_msix_queue_irqs()
772 static int setup_ppod_edram(struct adapter *adap) in setup_ppod_edram() argument
786 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val); in setup_ppod_edram()
788 dev_warn(adap->pdev_dev, in setup_ppod_edram()
797 ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val); in setup_ppod_edram()
799 dev_err(adap->pdev_dev, in setup_ppod_edram()
829 struct adapter *adap = pi->adapter; in cxgb4_config_rss() local
832 ret = t4_config_rss_range(adap, adap->mbox, viid, 0, rss_size, rss, in cxgb4_config_rss()
842 return t4_config_vi_rss(adap, adap->mbox, viid, in cxgb4_config_rss()
887 static int setup_rss(struct adapter *adap) in setup_rss() argument
891 for_each_port(adap, i) { in setup_rss()
892 const struct port_info *pi = adap2pinfo(adap, i); in setup_rss()
923 static void quiesce_rx(struct adapter *adap) in quiesce_rx() argument
927 for (i = 0; i < adap->sge.ingr_sz; i++) { in quiesce_rx()
928 struct sge_rspq *q = adap->sge.ingr_map[i]; in quiesce_rx()
938 static void disable_interrupts(struct adapter *adap) in disable_interrupts() argument
940 struct sge *s = &adap->sge; in disable_interrupts()
942 if (adap->flags & CXGB4_FULL_INIT_DONE) { in disable_interrupts()
943 t4_intr_disable(adap); in disable_interrupts()
944 if (adap->flags & CXGB4_USING_MSIX) { in disable_interrupts()
945 free_msix_queue_irqs(adap); in disable_interrupts()
946 free_irq(adap->msix_info[s->nd_msix_idx].vec, in disable_interrupts()
947 adap); in disable_interrupts()
949 free_irq(adap->pdev->irq, adap); in disable_interrupts()
951 quiesce_rx(adap); in disable_interrupts()
955 void cxgb4_enable_rx(struct adapter *adap, struct sge_rspq *q) in cxgb4_enable_rx() argument
961 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A), in cxgb4_enable_rx()
969 static void enable_rx(struct adapter *adap) in enable_rx() argument
973 for (i = 0; i < adap->sge.ingr_sz; i++) { in enable_rx()
974 struct sge_rspq *q = adap->sge.ingr_map[i]; in enable_rx()
979 cxgb4_enable_rx(adap, q); in enable_rx()
983 static int setup_non_data_intr(struct adapter *adap) in setup_non_data_intr() argument
987 adap->sge.nd_msix_idx = -1; in setup_non_data_intr()
988 if (!(adap->flags & CXGB4_USING_MSIX)) in setup_non_data_intr()
992 msix = cxgb4_get_msix_idx_from_bmap(adap); in setup_non_data_intr()
996 snprintf(adap->msix_info[msix].desc, in setup_non_data_intr()
997 sizeof(adap->msix_info[msix].desc), in setup_non_data_intr()
998 "%s", adap->port[0]->name); in setup_non_data_intr()
1000 adap->sge.nd_msix_idx = msix; in setup_non_data_intr()
1004 static int setup_fw_sge_queues(struct adapter *adap) in setup_fw_sge_queues() argument
1006 struct sge *s = &adap->sge; in setup_fw_sge_queues()
1012 if (adap->flags & CXGB4_USING_MSIX) { in setup_fw_sge_queues()
1014 msix = cxgb4_get_msix_idx_from_bmap(adap); in setup_fw_sge_queues()
1018 snprintf(adap->msix_info[msix].desc, in setup_fw_sge_queues()
1019 sizeof(adap->msix_info[msix].desc), in setup_fw_sge_queues()
1020 "%s-FWeventq", adap->port[0]->name); in setup_fw_sge_queues()
1022 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0, in setup_fw_sge_queues()
1029 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0], in setup_fw_sge_queues()
1032 cxgb4_free_msix_idx_in_bmap(adap, msix); in setup_fw_sge_queues()
1046 static int setup_sge_queues(struct adapter *adap) in setup_sge_queues() argument
1049 struct sge *s = &adap->sge; in setup_sge_queues()
1053 if (is_uld(adap)) in setup_sge_queues()
1056 if (!(adap->flags & CXGB4_USING_MSIX)) in setup_sge_queues()
1059 for_each_port(adap, i) { in setup_sge_queues()
1060 struct net_device *dev = adap->port[i]; in setup_sge_queues()
1067 msix = cxgb4_get_msix_idx_from_bmap(adap); in setup_sge_queues()
1073 snprintf(adap->msix_info[msix].desc, in setup_sge_queues()
1074 sizeof(adap->msix_info[msix].desc), in setup_sge_queues()
1076 q->msix = &adap->msix_info[msix]; in setup_sge_queues()
1079 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, in setup_sge_queues()
1083 t4_get_tp_ch_map(adap, in setup_sge_queues()
1093 err = t4_sge_alloc_eth_txq(adap, t, dev, in setup_sge_queues()
1096 !!(adap->flags & CXGB4_SGE_DBQ_TIMER)); in setup_sge_queues()
1102 for_each_port(adap, i) { in setup_sge_queues()
1109 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i], in setup_sge_queues()
1115 if (!is_t4(adap->params.chip)) { in setup_sge_queues()
1116 err = t4_sge_alloc_eth_txq(adap, &s->ptptxq, adap->port[0], in setup_sge_queues()
1117 netdev_get_tx_queue(adap->port[0], 0) in setup_sge_queues()
1123 t4_write_reg(adap, is_t4(adap->params.chip) ? in setup_sge_queues()
1126 RSSCONTROL_V(netdev2pinfo(adap->port[0])->tx_chan) | in setup_sge_queues()
1130 dev_err(adap->pdev_dev, "Can't allocate queues, err=%d\n", -err); in setup_sge_queues()
1131 t4_free_sge_resources(adap); in setup_sge_queues()
1244 struct adapter *adap = q->adap; in cxgb4_set_rspq_intr_params() local
1253 new_idx = closest_thres(&adap->sge, cnt); in cxgb4_set_rspq_intr_params()
1260 err = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, in cxgb4_set_rspq_intr_params()
1268 us = us == 0 ? 6 : closest_timer(&adap->sge, us); in cxgb4_set_rspq_intr_params()
1290 static int setup_debugfs(struct adapter *adap) in setup_debugfs() argument
1292 if (IS_ERR_OR_NULL(adap->debugfs_root)) in setup_debugfs()
1296 t4_setup_debugfs(adap); in setup_debugfs()
1301 static void cxgb4_port_mirror_free_rxq(struct adapter *adap, in cxgb4_port_mirror_free_rxq() argument
1304 if ((adap->flags & CXGB4_FULL_INIT_DONE) && in cxgb4_port_mirror_free_rxq()
1305 !(adap->flags & CXGB4_SHUTTING_DOWN)) in cxgb4_port_mirror_free_rxq()
1308 if (adap->flags & CXGB4_USING_MSIX) { in cxgb4_port_mirror_free_rxq()
1312 cxgb4_free_msix_idx_in_bmap(adap, mirror_rxq->msix->idx); in cxgb4_port_mirror_free_rxq()
1315 free_rspq_fl(adap, &mirror_rxq->rspq, &mirror_rxq->fl); in cxgb4_port_mirror_free_rxq()
1321 struct adapter *adap = netdev2adap(dev); in cxgb4_port_mirror_alloc_queues() local
1323 struct sge *s = &adap->sge; in cxgb4_port_mirror_alloc_queues()
1340 if (!(adap->flags & CXGB4_USING_MSIX)) in cxgb4_port_mirror_alloc_queues()
1341 msix = -((int)adap->sge.intrq.abs_id + 1); in cxgb4_port_mirror_alloc_queues()
1348 msix = cxgb4_get_msix_idx_from_bmap(adap); in cxgb4_port_mirror_alloc_queues()
1354 mirror_rxq->msix = &adap->msix_info[msix]; in cxgb4_port_mirror_alloc_queues()
1360 init_rspq(adap, &mirror_rxq->rspq, in cxgb4_port_mirror_alloc_queues()
1368 ret = t4_sge_alloc_rxq(adap, &mirror_rxq->rspq, false, in cxgb4_port_mirror_alloc_queues()
1375 if (adap->flags & CXGB4_USING_MSIX) { in cxgb4_port_mirror_alloc_queues()
1383 cxgb4_set_msix_aff(adap, mirror_rxq->msix->vec, in cxgb4_port_mirror_alloc_queues()
1388 cxgb4_enable_rx(adap, &mirror_rxq->rspq); in cxgb4_port_mirror_alloc_queues()
1410 free_rspq_fl(adap, &mirror_rxq->rspq, &mirror_rxq->fl); in cxgb4_port_mirror_alloc_queues()
1413 cxgb4_free_msix_idx_in_bmap(adap, mirror_rxq->msix->idx); in cxgb4_port_mirror_alloc_queues()
1417 cxgb4_port_mirror_free_rxq(adap, in cxgb4_port_mirror_alloc_queues()
1428 struct adapter *adap = netdev2adap(dev); in cxgb4_port_mirror_free_queues() local
1429 struct sge *s = &adap->sge; in cxgb4_port_mirror_free_queues()
1439 cxgb4_port_mirror_free_rxq(adap, in cxgb4_port_mirror_free_queues()
1449 struct adapter *adap = netdev2adap(dev); in cxgb4_port_mirror_start() local
1460 ret = t4_set_rxmode(adap, adap->mbox, pi->viid, pi->viid_mirror, in cxgb4_port_mirror_start()
1465 dev_err(adap->pdev_dev, in cxgb4_port_mirror_start()
1478 dev_err(adap->pdev_dev, in cxgb4_port_mirror_start()
1491 ret = t4_enable_vi_params(adap, adap->mbox, pi->viid_mirror, true, true, in cxgb4_port_mirror_start()
1495 dev_err(adap->pdev_dev, in cxgb4_port_mirror_start()
1505 struct adapter *adap = netdev2adap(dev); in cxgb4_port_mirror_stop() local
1510 t4_enable_vi_params(adap, adap->mbox, pi->viid_mirror, false, false, in cxgb4_port_mirror_stop()
1517 struct adapter *adap = netdev2adap(dev); in cxgb4_port_mirror_alloc() local
1529 ret = t4_init_port_mirror(pi, adap->mbox, pi->port_id, adap->pf, 0, in cxgb4_port_mirror_alloc()
1536 if (adap->flags & CXGB4_FULL_INIT_DONE) { in cxgb4_port_mirror_alloc()
1554 t4_free_vi(adap, adap->mbox, adap->pf, 0, pi->viid_mirror); in cxgb4_port_mirror_alloc()
1565 struct adapter *adap = netdev2adap(dev); in cxgb4_port_mirror_free() local
1580 t4_free_vi(adap, adap->mbox, adap->pf, 0, pi->viid_mirror); in cxgb4_port_mirror_free()
1747 struct adapter *adap = container_of(t, struct adapter, tids); in cxgb4_queue_tid_release() local
1750 spin_lock_bh(&adap->tid_release_lock); in cxgb4_queue_tid_release()
1751 *p = adap->tid_release_head; in cxgb4_queue_tid_release()
1753 adap->tid_release_head = (void **)((uintptr_t)p | chan); in cxgb4_queue_tid_release()
1754 if (!adap->tid_release_task_busy) { in cxgb4_queue_tid_release()
1755 adap->tid_release_task_busy = true; in cxgb4_queue_tid_release()
1756 queue_work(adap->workq, &adap->tid_release_task); in cxgb4_queue_tid_release()
1758 spin_unlock_bh(&adap->tid_release_lock); in cxgb4_queue_tid_release()
1767 struct adapter *adap; in process_tid_release_list() local
1769 adap = container_of(work, struct adapter, tid_release_task); in process_tid_release_list()
1771 spin_lock_bh(&adap->tid_release_lock); in process_tid_release_list()
1772 while (adap->tid_release_head) { in process_tid_release_list()
1773 void **p = adap->tid_release_head; in process_tid_release_list()
1777 adap->tid_release_head = *p; in process_tid_release_list()
1779 spin_unlock_bh(&adap->tid_release_lock); in process_tid_release_list()
1785 mk_tid_release(skb, chan, p - adap->tids.tid_tab); in process_tid_release_list()
1786 t4_ofld_send(adap, skb); in process_tid_release_list()
1787 spin_lock_bh(&adap->tid_release_lock); in process_tid_release_list()
1789 adap->tid_release_task_busy = false; in process_tid_release_list()
1790 spin_unlock_bh(&adap->tid_release_lock); in process_tid_release_list()
1800 struct adapter *adap = container_of(t, struct adapter, tids); in cxgb4_remove_tid() local
1803 WARN_ON(tid_out_of_range(&adap->tids, tid)); in cxgb4_remove_tid()
1805 if (t->tid_tab[tid - adap->tids.tid_base]) { in cxgb4_remove_tid()
1806 t->tid_tab[tid - adap->tids.tid_base] = NULL; in cxgb4_remove_tid()
1824 t4_ofld_send(adap, skb); in cxgb4_remove_tid()
1835 struct adapter *adap = container_of(t, struct adapter, tids); in tid_init() local
1894 if (is_offload(adap)) { in tid_init()
1898 CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) in tid_init()
1929 struct adapter *adap; in cxgb4_create_server() local
1937 adap = netdev2adap(dev); in cxgb4_create_server()
1945 chan = rxq_to_chan(&adap->sge, queue); in cxgb4_create_server()
1949 ret = t4_mgmt_tx(adap, skb); in cxgb4_create_server()
1970 struct adapter *adap; in cxgb4_create_server6() local
1978 adap = netdev2adap(dev); in cxgb4_create_server6()
1988 chan = rxq_to_chan(&adap->sge, queue); in cxgb4_create_server6()
1992 ret = t4_mgmt_tx(adap, skb); in cxgb4_create_server6()
2001 struct adapter *adap; in cxgb4_remove_server() local
2005 adap = netdev2adap(dev); in cxgb4_remove_server()
2016 ret = t4_mgmt_tx(adap, skb); in cxgb4_remove_server()
2141 struct adapter *adap = netdev2adap(dev); in cxgb4_dbfifo_count() local
2144 v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A); in cxgb4_dbfifo_count()
2145 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A); in cxgb4_dbfifo_count()
2146 if (is_t4(adap->params.chip)) { in cxgb4_dbfifo_count()
2184 struct adapter *adap = pci_get_drvdata(pdev); in cxgb4_get_tcp_stats() local
2186 spin_lock(&adap->stats_lock); in cxgb4_get_tcp_stats()
2187 t4_tp_get_tcp_stats(adap, v4, v6, false); in cxgb4_get_tcp_stats()
2188 spin_unlock(&adap->stats_lock); in cxgb4_get_tcp_stats()
2195 struct adapter *adap = netdev2adap(dev); in cxgb4_iscsi_init() local
2197 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK_A, tag_mask); in cxgb4_iscsi_init()
2198 t4_write_reg(adap, ULP_RX_ISCSI_PSZ_A, HPZ0_V(pgsz_order[0]) | in cxgb4_iscsi_init()
2206 struct adapter *adap = netdev2adap(dev); in cxgb4_flush_eq_cache() local
2208 return t4_sge_ctxt_flush(adap, adap->mbox, CTXT_EGRESS); in cxgb4_flush_eq_cache()
2212 static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx) in read_eq_indices() argument
2214 u32 addr = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A) + 24 * qid + 8; in read_eq_indices()
2218 spin_lock(&adap->win0_lock); in read_eq_indices()
2219 ret = t4_memory_rw(adap, 0, MEM_EDC0, addr, in read_eq_indices()
2222 spin_unlock(&adap->win0_lock); in read_eq_indices()
2233 struct adapter *adap = netdev2adap(dev); in cxgb4_sync_txq_pidx() local
2237 ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx); in cxgb4_sync_txq_pidx()
2250 if (is_t4(adap->params.chip)) in cxgb4_sync_txq_pidx()
2255 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A), in cxgb4_sync_txq_pidx()
2268 struct adapter *adap; in cxgb4_read_tpte() local
2272 adap = netdev2adap(dev); in cxgb4_read_tpte()
2274 offset = ((stag >> 8) * 32) + adap->vres.stag.start; in cxgb4_read_tpte()
2282 size = t4_read_reg(adap, MA_EDRAM0_BAR_A); in cxgb4_read_tpte()
2284 size = t4_read_reg(adap, MA_EDRAM1_BAR_A); in cxgb4_read_tpte()
2286 size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A); in cxgb4_read_tpte()
2289 if (t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A) & HMA_MUX_F) { in cxgb4_read_tpte()
2290 size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A); in cxgb4_read_tpte()
2310 } else if (is_t5(adap->params.chip)) { in cxgb4_read_tpte()
2311 size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A); in cxgb4_read_tpte()
2327 spin_lock(&adap->win0_lock); in cxgb4_read_tpte()
2328 ret = t4_memory_rw(adap, 0, memtype, memaddr, 32, tpte, T4_MEMORY_READ); in cxgb4_read_tpte()
2329 spin_unlock(&adap->win0_lock); in cxgb4_read_tpte()
2333 dev_err(adap->pdev_dev, "stag %#x, offset %#x out of range\n", in cxgb4_read_tpte()
2342 struct adapter *adap; in cxgb4_read_sge_timestamp() local
2344 adap = netdev2adap(dev); in cxgb4_read_sge_timestamp()
2345 lo = t4_read_reg(adap, SGE_TIMESTAMP_LO_A); in cxgb4_read_sge_timestamp()
2346 hi = TSVAL_G(t4_read_reg(adap, SGE_TIMESTAMP_HI_A)); in cxgb4_read_sge_timestamp()
2403 static void drain_db_fifo(struct adapter *adap, int usecs) in drain_db_fifo() argument
2408 v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A); in drain_db_fifo()
2409 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A); in drain_db_fifo()
2410 if (is_t4(adap->params.chip)) { in drain_db_fifo()
2434 static void enable_txq_db(struct adapter *adap, struct sge_txq *q) in enable_txq_db() argument
2442 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A), in enable_txq_db()
2450 static void disable_dbs(struct adapter *adap) in disable_dbs() argument
2454 for_each_ethrxq(&adap->sge, i) in disable_dbs()
2455 disable_txq_db(&adap->sge.ethtxq[i].q); in disable_dbs()
2456 if (is_offload(adap)) { in disable_dbs()
2458 adap->sge.uld_txq_info[CXGB4_TX_OFLD]; in disable_dbs()
2461 for_each_ofldtxq(&adap->sge, i) { in disable_dbs()
2468 for_each_port(adap, i) in disable_dbs()
2469 disable_txq_db(&adap->sge.ctrlq[i].q); in disable_dbs()
2472 static void enable_dbs(struct adapter *adap) in enable_dbs() argument
2476 for_each_ethrxq(&adap->sge, i) in enable_dbs()
2477 enable_txq_db(adap, &adap->sge.ethtxq[i].q); in enable_dbs()
2478 if (is_offload(adap)) { in enable_dbs()
2480 adap->sge.uld_txq_info[CXGB4_TX_OFLD]; in enable_dbs()
2483 for_each_ofldtxq(&adap->sge, i) { in enable_dbs()
2486 enable_txq_db(adap, &txq->q); in enable_dbs()
2490 for_each_port(adap, i) in enable_dbs()
2491 enable_txq_db(adap, &adap->sge.ctrlq[i].q); in enable_dbs()
2494 static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd) in notify_rdma_uld() argument
2498 if (adap->uld && adap->uld[type].handle) in notify_rdma_uld()
2499 adap->uld[type].control(adap->uld[type].handle, cmd); in notify_rdma_uld()
2504 struct adapter *adap; in process_db_full() local
2506 adap = container_of(work, struct adapter, db_full_task); in process_db_full()
2508 drain_db_fifo(adap, dbfifo_drain_delay); in process_db_full()
2509 enable_dbs(adap); in process_db_full()
2510 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY); in process_db_full()
2511 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) in process_db_full()
2512 t4_set_reg_field(adap, SGE_INT_ENABLE3_A, in process_db_full()
2516 t4_set_reg_field(adap, SGE_INT_ENABLE3_A, in process_db_full()
2520 static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q) in sync_txq_pidx() argument
2526 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx); in sync_txq_pidx()
2538 if (is_t4(adap->params.chip)) in sync_txq_pidx()
2543 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A), in sync_txq_pidx()
2551 CH_WARN(adap, "DB drop recovery failed.\n"); in sync_txq_pidx()
2554 static void recover_all_queues(struct adapter *adap) in recover_all_queues() argument
2558 for_each_ethrxq(&adap->sge, i) in recover_all_queues()
2559 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q); in recover_all_queues()
2560 if (is_offload(adap)) { in recover_all_queues()
2562 adap->sge.uld_txq_info[CXGB4_TX_OFLD]; in recover_all_queues()
2564 for_each_ofldtxq(&adap->sge, i) { in recover_all_queues()
2567 sync_txq_pidx(adap, &txq->q); in recover_all_queues()
2571 for_each_port(adap, i) in recover_all_queues()
2572 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q); in recover_all_queues()
2577 struct adapter *adap; in process_db_drop() local
2579 adap = container_of(work, struct adapter, db_drop_task); in process_db_drop()
2581 if (is_t4(adap->params.chip)) { in process_db_drop()
2582 drain_db_fifo(adap, dbfifo_drain_delay); in process_db_drop()
2583 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP); in process_db_drop()
2584 drain_db_fifo(adap, dbfifo_drain_delay); in process_db_drop()
2585 recover_all_queues(adap); in process_db_drop()
2586 drain_db_fifo(adap, dbfifo_drain_delay); in process_db_drop()
2587 enable_dbs(adap); in process_db_drop()
2588 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY); in process_db_drop()
2589 } else if (is_t5(adap->params.chip)) { in process_db_drop()
2590 u32 dropped_db = t4_read_reg(adap, 0x010ac); in process_db_drop()
2597 ret = t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS, in process_db_drop()
2600 dev_err(adap->pdev_dev, "doorbell drop recovery: " in process_db_drop()
2604 adap->bar2 + bar2_qoffset + SGE_UDB_KDOORBELL); in process_db_drop()
2607 t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15); in process_db_drop()
2610 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) in process_db_drop()
2611 t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, 0); in process_db_drop()
2614 void t4_db_full(struct adapter *adap) in t4_db_full() argument
2616 if (is_t4(adap->params.chip)) { in t4_db_full()
2617 disable_dbs(adap); in t4_db_full()
2618 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL); in t4_db_full()
2619 t4_set_reg_field(adap, SGE_INT_ENABLE3_A, in t4_db_full()
2621 queue_work(adap->workq, &adap->db_full_task); in t4_db_full()
2625 void t4_db_dropped(struct adapter *adap) in t4_db_dropped() argument
2627 if (is_t4(adap->params.chip)) { in t4_db_dropped()
2628 disable_dbs(adap); in t4_db_dropped()
2629 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL); in t4_db_dropped()
2631 queue_work(adap->workq, &adap->db_drop_task); in t4_db_dropped()
2642 static void detach_ulds(struct adapter *adap) in detach_ulds() argument
2646 if (!is_uld(adap)) in detach_ulds()
2650 list_del(&adap->list_node); in detach_ulds()
2653 if (adap->uld && adap->uld[i].handle) in detach_ulds()
2654 adap->uld[i].state_change(adap->uld[i].handle, in detach_ulds()
2664 static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state) in notify_ulds() argument
2670 if (adap->uld && adap->uld[i].handle) in notify_ulds()
2671 adap->uld[i].state_change(adap->uld[i].handle, in notify_ulds()
2684 struct adapter *adap; in cxgb4_inet6addr_handler() local
2690 list_for_each_entry(adap, &adapter_list, list_node) { in cxgb4_inet6addr_handler()
2693 cxgb4_clip_get(adap->port[0], in cxgb4_inet6addr_handler()
2697 cxgb4_clip_release(adap->port[0], in cxgb4_inet6addr_handler()
2731 static void update_clip(const struct adapter *adap) in update_clip() argument
2740 dev = adap->port[i]; in update_clip()
2763 static int cxgb_up(struct adapter *adap) in cxgb_up() argument
2765 struct sge *s = &adap->sge; in cxgb_up()
2769 err = setup_sge_queues(adap); in cxgb_up()
2772 err = setup_rss(adap); in cxgb_up()
2776 if (adap->flags & CXGB4_USING_MSIX) { in cxgb_up()
2782 err = request_irq(adap->msix_info[s->nd_msix_idx].vec, in cxgb_up()
2784 adap->msix_info[s->nd_msix_idx].desc, adap); in cxgb_up()
2788 err = request_msix_queue_irqs(adap); in cxgb_up()
2792 err = request_irq(adap->pdev->irq, t4_intr_handler(adap), in cxgb_up()
2793 (adap->flags & CXGB4_USING_MSI) ? 0 in cxgb_up()
2795 adap->port[0]->name, adap); in cxgb_up()
2800 enable_rx(adap); in cxgb_up()
2801 t4_sge_start(adap); in cxgb_up()
2802 t4_intr_enable(adap); in cxgb_up()
2803 adap->flags |= CXGB4_FULL_INIT_DONE; in cxgb_up()
2806 notify_ulds(adap, CXGB4_STATE_UP); in cxgb_up()
2808 update_clip(adap); in cxgb_up()
2813 free_irq(adap->msix_info[s->nd_msix_idx].vec, adap); in cxgb_up()
2815 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err); in cxgb_up()
2817 t4_free_sge_resources(adap); in cxgb_up()
2921 struct adapter *adap; in cxgb4_create_server_filter() local
2925 adap = netdev2adap(dev); in cxgb4_create_server_filter()
2928 stid -= adap->tids.sftid_base; in cxgb4_create_server_filter()
2929 stid += adap->tids.nftids; in cxgb4_create_server_filter()
2933 f = &adap->tids.ftid_tab[stid]; in cxgb4_create_server_filter()
2942 clear_filter(adap, f); in cxgb4_create_server_filter()
2954 if (adap->params.tp.vlan_pri_map & PORT_F) { in cxgb4_create_server_filter()
2960 if (adap->params.tp.vlan_pri_map & PROTOCOL_F) { in cxgb4_create_server_filter()
2974 f->tid = stid + adap->tids.ftid_base; in cxgb4_create_server_filter()
2975 ret = set_filter_wr(adap, stid); in cxgb4_create_server_filter()
2977 clear_filter(adap, f); in cxgb4_create_server_filter()
2989 struct adapter *adap; in cxgb4_remove_server_filter() local
2991 adap = netdev2adap(dev); in cxgb4_remove_server_filter()
2994 stid -= adap->tids.sftid_base; in cxgb4_remove_server_filter()
2995 stid += adap->tids.nftids; in cxgb4_remove_server_filter()
2997 f = &adap->tids.ftid_tab[stid]; in cxgb4_remove_server_filter()
3001 return delete_filter(adap, stid); in cxgb4_remove_server_filter()
3199 static void cxgb4_mgmt_fill_vf_station_mac_addr(struct adapter *adap) in cxgb4_mgmt_fill_vf_station_mac_addr() argument
3207 err = t4_get_raw_vpd_params(adap, &adap->params.vpd); in cxgb4_mgmt_fill_vf_station_mac_addr()
3211 na = adap->params.vpd.na; in cxgb4_mgmt_fill_vf_station_mac_addr()
3227 for (vf = 0, nvfs = pci_sriov_get_totalvfs(adap->pdev); in cxgb4_mgmt_fill_vf_station_mac_addr()
3229 macaddr[5] = adap->pf * nvfs + vf; in cxgb4_mgmt_fill_vf_station_mac_addr()
3230 ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, macaddr); in cxgb4_mgmt_fill_vf_station_mac_addr()
3237 struct adapter *adap = pi->adapter; in cxgb4_mgmt_set_vf_mac() local
3250 ret = t4_set_vf_mac_acl(adap, vf + 1, 1, mac); in cxgb4_mgmt_set_vf_mac()
3252 ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, mac); in cxgb4_mgmt_set_vf_mac()
3260 struct adapter *adap = pi->adapter; in cxgb4_mgmt_get_vf_config() local
3263 if (vf >= adap->num_vfs) in cxgb4_mgmt_get_vf_config()
3265 vfinfo = &adap->vfinfo[vf]; in cxgb4_mgmt_get_vf_config()
3292 struct adapter *adap = pi->adapter; in cxgb4_mgmt_set_vf_rate() local
3299 if (vf >= adap->num_vfs) in cxgb4_mgmt_set_vf_rate()
3303 dev_err(adap->pdev_dev, in cxgb4_mgmt_set_vf_rate()
3315 ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1, in cxgb4_mgmt_set_vf_rate()
3318 dev_err(adap->pdev_dev, in cxgb4_mgmt_set_vf_rate()
3320 ret, adap->pf, vf); in cxgb4_mgmt_set_vf_rate()
3323 dev_info(adap->pdev_dev, in cxgb4_mgmt_set_vf_rate()
3325 adap->pf, vf); in cxgb4_mgmt_set_vf_rate()
3326 adap->vfinfo[vf].tx_rate = 0; in cxgb4_mgmt_set_vf_rate()
3332 dev_err(adap->pdev_dev, in cxgb4_mgmt_set_vf_rate()
3338 dev_err(adap->pdev_dev, "Link down for VF %d\n", vf); in cxgb4_mgmt_set_vf_rate()
3343 dev_err(adap->pdev_dev, in cxgb4_mgmt_set_vf_rate()
3355 ret = t4_sched_params(adap, SCHED_CLASS_TYPE_PACKET, in cxgb4_mgmt_set_vf_rate()
3363 dev_err(adap->pdev_dev, "Err %d for Traffic Class config\n", in cxgb4_mgmt_set_vf_rate()
3367 dev_info(adap->pdev_dev, in cxgb4_mgmt_set_vf_rate()
3375 ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1, &fw_pfvf, in cxgb4_mgmt_set_vf_rate()
3378 dev_err(adap->pdev_dev, in cxgb4_mgmt_set_vf_rate()
3380 ret, adap->pf, vf, class_id); in cxgb4_mgmt_set_vf_rate()
3383 dev_info(adap->pdev_dev, "PF %d VF %d is bound to Class %d\n", in cxgb4_mgmt_set_vf_rate()
3384 adap->pf, vf, class_id); in cxgb4_mgmt_set_vf_rate()
3385 adap->vfinfo[vf].tx_rate = max_tx_rate; in cxgb4_mgmt_set_vf_rate()
3393 struct adapter *adap = pi->adapter; in cxgb4_mgmt_set_vf_vlan() local
3396 if (vf >= adap->num_vfs || vlan > 4095 || qos > 7) in cxgb4_mgmt_set_vf_vlan()
3402 ret = t4_set_vlan_acl(adap, adap->mbox, vf + 1, vlan); in cxgb4_mgmt_set_vf_vlan()
3404 adap->vfinfo[vf].vlan = vlan; in cxgb4_mgmt_set_vf_vlan()
3408 dev_err(adap->pdev_dev, "Err %d %s VLAN ACL for PF/VF %d/%d\n", in cxgb4_mgmt_set_vf_vlan()
3409 ret, (vlan ? "setting" : "clearing"), adap->pf, vf); in cxgb4_mgmt_set_vf_vlan()
3417 struct adapter *adap = pi->adapter; in cxgb4_mgmt_set_vf_link_state() local
3421 if (vf >= adap->num_vfs) in cxgb4_mgmt_set_vf_link_state()
3443 ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1, in cxgb4_mgmt_set_vf_link_state()
3446 dev_err(adap->pdev_dev, in cxgb4_mgmt_set_vf_link_state()
3448 ret, adap->pf, vf); in cxgb4_mgmt_set_vf_link_state()
3452 adap->vfinfo[vf].link_state = link; in cxgb4_mgmt_set_vf_link_state()
3479 struct adapter *adap = pi->adapter; in cxgb_netpoll() local
3481 if (adap->flags & CXGB4_USING_MSIX) { in cxgb_netpoll()
3483 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset]; in cxgb_netpoll()
3488 t4_intr_handler(adap)(0, adap); in cxgb_netpoll()
3495 struct adapter *adap = pi->adapter; in cxgb_set_tx_maxrate() local
3508 if (!(adap->flags & CXGB4_FULL_INIT_DONE)) { in cxgb_set_tx_maxrate()
3509 dev_err(adap->pdev_dev, in cxgb_set_tx_maxrate()
3518 dev_err(adap->pdev_dev, in cxgb_set_tx_maxrate()
3529 dev_err(adap->pdev_dev, in cxgb_set_tx_maxrate()
3542 dev_err(adap->pdev_dev, in cxgb_set_tx_maxrate()
3576 dev_err(adap->pdev_dev, in cxgb_set_tx_maxrate()
3614 struct adapter *adap = netdev2adap(dev); in cxgb_setup_tc_matchall() local
3616 if (!adap->tc_matchall) in cxgb_setup_tc_matchall()
3640 struct adapter *adap = netdev2adap(dev); in cxgb_setup_tc_block_ingress_cb() local
3642 if (!(adap->flags & CXGB4_FULL_INIT_DONE)) { in cxgb_setup_tc_block_ingress_cb()
3643 dev_err(adap->pdev_dev, in cxgb_setup_tc_block_ingress_cb()
3669 struct adapter *adap = netdev2adap(dev); in cxgb_setup_tc_block_egress_cb() local
3671 if (!(adap->flags & CXGB4_FULL_INIT_DONE)) { in cxgb_setup_tc_block_egress_cb()
3672 dev_err(adap->pdev_dev, in cxgb_setup_tc_block_egress_cb()
3694 struct adapter *adap = netdev2adap(dev); in cxgb_setup_tc_mqprio() local
3696 if (!is_ethofld(adap) || !adap->tc_mqprio) in cxgb_setup_tc_mqprio()
3918 struct adapter *adap; in notify_fatal_err() local
3920 adap = container_of(work, struct adapter, fatal_err_notify_task); in notify_fatal_err()
3921 notify_ulds(adap, CXGB4_STATE_FATAL_ERROR); in notify_fatal_err()
3924 void t4_fatal_err(struct adapter *adap) in t4_fatal_err() argument
3928 if (pci_channel_offline(adap->pdev)) in t4_fatal_err()
3934 t4_shutdown_adapter(adap); in t4_fatal_err()
3935 for_each_port(adap, port) { in t4_fatal_err()
3936 struct net_device *dev = adap->port[port]; in t4_fatal_err()
3947 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n"); in t4_fatal_err()
3948 queue_work(adap->workq, &adap->fatal_err_notify_task); in t4_fatal_err()
3951 static void setup_memwin(struct adapter *adap) in setup_memwin() argument
3953 u32 nic_win_base = t4_get_util_window(adap); in setup_memwin()
3955 t4_setup_memwin(adap, nic_win_base, MEMWIN_NIC); in setup_memwin()
3958 static void setup_memwin_rdma(struct adapter *adap) in setup_memwin_rdma() argument
3960 if (adap->vres.ocq.size) { in setup_memwin_rdma()
3964 start = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_2); in setup_memwin_rdma()
3966 start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres); in setup_memwin_rdma()
3967 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10; in setup_memwin_rdma()
3968 t4_write_reg(adap, in setup_memwin_rdma()
3971 t4_write_reg(adap, in setup_memwin_rdma()
3973 adap->vres.ocq.start); in setup_memwin_rdma()
3974 t4_read_reg(adap, in setup_memwin_rdma()
4179 static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c) in adap_init1() argument
4187 ret = t4_get_pfres(adap); in adap_init1()
4189 dev_err(adap->pdev_dev, in adap_init1()
4199 ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), c); in adap_init1()
4205 ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), NULL); in adap_init1()
4209 ret = t4_config_glbl_rss(adap, adap->pf, in adap_init1()
4216 ret = t4_cfg_pfvf(adap, adap->mbox, adap->pf, 0, adap->sge.egr_sz, 64, in adap_init1()
4222 t4_sge_init(adap); in adap_init1()
4225 t4_write_reg(adap, TP_SHIFT_CNT_A, 0x64f8849); in adap_init1()
4226 t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(PAGE_SHIFT - 12)); in adap_init1()
4227 t4_write_reg(adap, TP_PIO_ADDR_A, TP_INGRESS_CONFIG_A); in adap_init1()
4228 v = t4_read_reg(adap, TP_PIO_DATA_A); in adap_init1()
4229 t4_write_reg(adap, TP_PIO_DATA_A, v & ~CSUM_HAS_PSEUDO_HDR_F); in adap_init1()
4232 adap->params.tp.tx_modq_map = 0xE4; in adap_init1()
4233 t4_write_reg(adap, TP_TX_MOD_QUEUE_REQ_MAP_A, in adap_init1()
4234 TX_MOD_QUEUE_REQ_MAP_V(adap->params.tp.tx_modq_map)); in adap_init1()
4238 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, in adap_init1()
4240 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, in adap_init1()
4242 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, in adap_init1()
4246 if (is_offload(adap)) { in adap_init1()
4247 t4_write_reg(adap, TP_TX_MOD_QUEUE_WEIGHT0_A, in adap_init1()
4252 t4_write_reg(adap, TP_TX_MOD_CHANNEL_WEIGHT_A, in adap_init1()
4260 return t4_early_init(adap, adap->pf); in adap_init1()
4384 static int adap_init0_phy(struct adapter *adap) in adap_init0_phy() argument
4392 phy_info = find_phy_info(adap->pdev->device); in adap_init0_phy()
4394 dev_warn(adap->pdev_dev, in adap_init0_phy()
4405 adap->pdev_dev); in adap_init0_phy()
4413 dev_err(adap->pdev_dev, "unable to find PHY Firmware image " in adap_init0_phy()
4419 t4_phy_fw_ver(adap, &cur_phy_fw_ver); in adap_init0_phy()
4420 dev_warn(adap->pdev_dev, "continuing with, on-adapter " in adap_init0_phy()
4430 ret = t4_load_phy_fw(adap, MEMWIN_NIC, phy_info->phy_fw_version, in adap_init0_phy()
4433 dev_err(adap->pdev_dev, "PHY Firmware transfer error %d\n", in adap_init0_phy()
4441 dev_info(adap->pdev_dev, "Successfully transferred PHY " in adap_init0_phy()
4762 static int adap_init0(struct adapter *adap, int vpd_skip) in adap_init0() argument
4774 ret = t4_init_devlog_params(adap); in adap_init0()
4779 ret = t4_fw_hello(adap, adap->mbox, adap->mbox, in adap_init0()
4782 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n", in adap_init0()
4786 if (ret == adap->mbox) in adap_init0()
4787 adap->flags |= CXGB4_MASTER_PF; in adap_init0()
4797 t4_get_version_info(adap); in adap_init0()
4798 ret = t4_check_fw_version(adap); in adap_init0()
4802 if ((adap->flags & CXGB4_MASTER_PF) && state != DEV_STATE_INIT) { in adap_init0()
4812 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip)); in adap_init0()
4814 dev_err(adap->pdev_dev, in adap_init0()
4816 CHELSIO_CHIP_VERSION(adap->params.chip)); in adap_init0()
4831 adap->pdev_dev); in adap_init0()
4833 dev_err(adap->pdev_dev, in adap_init0()
4842 ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw, in adap_init0()
4857 ret = adap_config_hma(adap); in adap_init0()
4859 dev_err(adap->pdev_dev, in adap_init0()
4862 dev_info(adap->pdev_dev, "Coming up as %s: "\ in adap_init0()
4864 adap->flags & CXGB4_MASTER_PF ? "MASTER" : "SLAVE"); in adap_init0()
4866 dev_info(adap->pdev_dev, "Coming up as MASTER: "\ in adap_init0()
4874 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, in adap_init0()
4881 dev_err(adap->pdev_dev, "firmware doesn't support " in adap_init0()
4890 ret = adap_init0_config(adap, reset); in adap_init0()
4892 dev_err(adap->pdev_dev, "no Configuration File " in adap_init0()
4897 dev_err(adap->pdev_dev, "could not initialize " in adap_init0()
4907 ret = t4_get_pfres(adap); in adap_init0()
4909 dev_err(adap->pdev_dev, in adap_init0()
4925 ret = t4_get_vpd_params(adap, &adap->params.vpd); in adap_init0()
4937 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec); in adap_init0()
4941 adap->params.nports = hweight32(port_vec); in adap_init0()
4942 adap->params.portvec = port_vec; in adap_init0()
4948 ret = t4_sge_init(adap); in adap_init0()
4957 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, in adap_init0()
4961 adap->sge.dbqtimer_tick = val[0]; in adap_init0()
4962 ret = t4_read_sge_dbqtimers(adap, in adap_init0()
4963 ARRAY_SIZE(adap->sge.dbqtimer_val), in adap_init0()
4964 adap->sge.dbqtimer_val); in adap_init0()
4968 adap->flags |= CXGB4_SGE_DBQ_TIMER; in adap_init0()
4970 if (is_bypass_device(adap->pdev->device)) in adap_init0()
4971 adap->params.bypass = 1; in adap_init0()
4982 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params, val); in adap_init0()
4985 adap->sge.egr_start = val[0]; in adap_init0()
4986 adap->l2t_start = val[1]; in adap_init0()
4987 adap->l2t_end = val[2]; in adap_init0()
4988 adap->tids.ftid_base = val[3]; in adap_init0()
4989 adap->tids.nftids = val[4] - val[3] + 1; in adap_init0()
4990 adap->sge.ingr_start = val[5]; in adap_init0()
4992 if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) { in adap_init0()
4995 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, in adap_init0()
5000 adap->tids.hpftid_base = val[0]; in adap_init0()
5001 adap->tids.nhpftids = val[1] - val[0] + 1; in adap_init0()
5008 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, in adap_init0()
5011 adap->rawf_start = val[0]; in adap_init0()
5012 adap->rawf_cnt = val[1] - val[0] + 1; in adap_init0()
5015 adap->tids.tid_base = in adap_init0()
5016 t4_read_reg(adap, LE_DB_ACTIVE_TABLE_START_INDEX_A); in adap_init0()
5027 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val); in adap_init0()
5030 adap->sge.egr_sz = val[0] - adap->sge.egr_start + 1; in adap_init0()
5031 adap->sge.ingr_sz = val[1] - adap->sge.ingr_start + 1; in adap_init0()
5033 adap->sge.egr_map = kcalloc(adap->sge.egr_sz, in adap_init0()
5034 sizeof(*adap->sge.egr_map), GFP_KERNEL); in adap_init0()
5035 if (!adap->sge.egr_map) { in adap_init0()
5040 adap->sge.ingr_map = kcalloc(adap->sge.ingr_sz, in adap_init0()
5041 sizeof(*adap->sge.ingr_map), GFP_KERNEL); in adap_init0()
5042 if (!adap->sge.ingr_map) { in adap_init0()
5050 adap->sge.starving_fl = bitmap_zalloc(adap->sge.egr_sz, GFP_KERNEL); in adap_init0()
5051 if (!adap->sge.starving_fl) { in adap_init0()
5056 adap->sge.txq_maperr = bitmap_zalloc(adap->sge.egr_sz, GFP_KERNEL); in adap_init0()
5057 if (!adap->sge.txq_maperr) { in adap_init0()
5063 adap->sge.blocked_fl = bitmap_zalloc(adap->sge.egr_sz, GFP_KERNEL); in adap_init0()
5064 if (!adap->sge.blocked_fl) { in adap_init0()
5072 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val); in adap_init0()
5075 adap->clipt_start = val[0]; in adap_init0()
5076 adap->clipt_end = val[1]; in adap_init0()
5080 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params, val); in adap_init0()
5086 adap->params.nsched_cls = is_t4(adap->params.chip) ? 15 : 16; in adap_init0()
5088 adap->params.nsched_cls = val[0]; in adap_init0()
5094 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val); in adap_init0()
5099 adap->flags |= CXGB4_FW_OFLD_CONN; in adap_init0()
5100 adap->tids.aftid_base = val[0]; in adap_init0()
5101 adap->tids.aftid_end = val[1]; in adap_init0()
5111 (void)t4_set_params(adap, adap->mbox, adap->pf, 0, 1, params, val); in adap_init0()
5119 if (is_t4(adap->params.chip)) { in adap_init0()
5120 adap->params.ulptx_memwrite_dsgl = false; in adap_init0()
5123 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, in adap_init0()
5125 adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0); in adap_init0()
5130 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, in adap_init0()
5132 adap->params.fr_nsmr_tpte_wr_support = (ret == 0 && val[0] != 0); in adap_init0()
5135 if (is_t4(adap->params.chip)) { in adap_init0()
5136 adap->params.filter2_wr_support = false; in adap_init0()
5139 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, in adap_init0()
5141 adap->params.filter2_wr_support = (ret == 0 && val[0] != 0); in adap_init0()
5149 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, in adap_init0()
5151 adap->params.viid_smt_extn_support = (ret == 0 && val[0] != 0); in adap_init0()
5161 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd), in adap_init0()
5171 adap->params.offload = 1; in adap_init0()
5183 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, in adap_init0()
5187 adap->tids.ntids = val[0]; in adap_init0()
5188 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS); in adap_init0()
5189 adap->tids.stid_base = val[1]; in adap_init0()
5190 adap->tids.nstids = val[2] - val[1] + 1; in adap_init0()
5200 if (adap->flags & CXGB4_FW_OFLD_CONN && !is_bypass(adap)) { in adap_init0()
5201 adap->tids.sftid_base = adap->tids.ftid_base + in adap_init0()
5202 DIV_ROUND_UP(adap->tids.nftids, 3); in adap_init0()
5203 adap->tids.nsftids = adap->tids.nftids - in adap_init0()
5204 DIV_ROUND_UP(adap->tids.nftids, 3); in adap_init0()
5205 adap->tids.nftids = adap->tids.sftid_base - in adap_init0()
5206 adap->tids.ftid_base; in adap_init0()
5208 adap->vres.ddp.start = val[3]; in adap_init0()
5209 adap->vres.ddp.size = val[4] - val[3] + 1; in adap_init0()
5210 adap->params.ofldq_wr_cred = val[5]; in adap_init0()
5213 init_hash_filter(adap); in adap_init0()
5215 adap->num_ofld_uld += 1; in adap_init0()
5221 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, in adap_init0()
5224 adap->tids.eotid_base = val[0]; in adap_init0()
5225 adap->tids.neotids = min_t(u32, MAX_ATIDS, in adap_init0()
5227 adap->params.ethofld = 1; in adap_init0()
5238 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, in adap_init0()
5242 adap->vres.stag.start = val[0]; in adap_init0()
5243 adap->vres.stag.size = val[1] - val[0] + 1; in adap_init0()
5244 adap->vres.rq.start = val[2]; in adap_init0()
5245 adap->vres.rq.size = val[3] - val[2] + 1; in adap_init0()
5246 adap->vres.pbl.start = val[4]; in adap_init0()
5247 adap->vres.pbl.size = val[5] - val[4] + 1; in adap_init0()
5251 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, in adap_init0()
5254 adap->vres.srq.start = val[0]; in adap_init0()
5255 adap->vres.srq.size = val[1] - val[0] + 1; in adap_init0()
5257 if (adap->vres.srq.size) { in adap_init0()
5258 adap->srq = t4_init_srq(adap->vres.srq.size); in adap_init0()
5259 if (!adap->srq) in adap_init0()
5260 dev_warn(&adap->pdev->dev, "could not allocate SRQ, continuing\n"); in adap_init0()
5269 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params, in adap_init0()
5273 adap->vres.qp.start = val[0]; in adap_init0()
5274 adap->vres.qp.size = val[1] - val[0] + 1; in adap_init0()
5275 adap->vres.cq.start = val[2]; in adap_init0()
5276 adap->vres.cq.size = val[3] - val[2] + 1; in adap_init0()
5277 adap->vres.ocq.start = val[4]; in adap_init0()
5278 adap->vres.ocq.size = val[5] - val[4] + 1; in adap_init0()
5282 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, in adap_init0()
5285 adap->params.max_ordird_qp = 8; in adap_init0()
5286 adap->params.max_ird_adapter = 32 * adap->tids.ntids; in adap_init0()
5289 adap->params.max_ordird_qp = val[0]; in adap_init0()
5290 adap->params.max_ird_adapter = val[1]; in adap_init0()
5292 dev_info(adap->pdev_dev, in adap_init0()
5294 adap->params.max_ordird_qp, in adap_init0()
5295 adap->params.max_ird_adapter); in adap_init0()
5299 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params, in adap_init0()
5301 adap->params.write_w_imm_support = (ret == 0 && val[0] != 0); in adap_init0()
5305 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params, in adap_init0()
5307 adap->params.write_cmpl_support = (ret == 0 && val[0] != 0); in adap_init0()
5308 adap->num_ofld_uld += 2; in adap_init0()
5313 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, in adap_init0()
5317 adap->vres.iscsi.start = val[0]; in adap_init0()
5318 adap->vres.iscsi.size = val[1] - val[0] + 1; in adap_init0()
5319 if (is_t6(adap->params.chip)) { in adap_init0()
5322 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, in adap_init0()
5325 adap->vres.ppod_edram.start = val[0]; in adap_init0()
5326 adap->vres.ppod_edram.size = in adap_init0()
5329 dev_info(adap->pdev_dev, in adap_init0()
5332 adap->vres.ppod_edram.size); in adap_init0()
5336 adap->num_ofld_uld += 2; in adap_init0()
5342 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, in adap_init0()
5348 adap->vres.ncrypto_fc = val[0]; in adap_init0()
5350 adap->num_ofld_uld += 1; in adap_init0()
5356 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, in adap_init0()
5360 adap->vres.key.start = val[0]; in adap_init0()
5361 adap->vres.key.size = val[1] - val[0] + 1; in adap_init0()
5362 adap->num_uld += 1; in adap_init0()
5364 adap->params.crypto = ntohs(caps_cmd.cryptocaps); in adap_init0()
5372 t4_read_mtu_tbl(adap, adap->params.mtus, NULL); in adap_init0()
5394 if (adap->params.mtus[i] == 1492) { in adap_init0()
5395 adap->params.mtus[i] = 1488; in adap_init0()
5399 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, in adap_init0()
5400 adap->params.b_wnd); in adap_init0()
5402 t4_init_sge_params(adap); in adap_init0()
5403 adap->flags |= CXGB4_FW_OK; in adap_init0()
5404 t4_init_tp_params(adap, true); in adap_init0()
5413 adap_free_hma_mem(adap); in adap_init0()
5414 kfree(adap->sge.egr_map); in adap_init0()
5415 kfree(adap->sge.ingr_map); in adap_init0()
5416 bitmap_free(adap->sge.starving_fl); in adap_init0()
5417 bitmap_free(adap->sge.txq_maperr); in adap_init0()
5419 bitmap_free(adap->sge.blocked_fl); in adap_init0()
5422 t4_fw_bye(adap, adap->mbox); in adap_init0()
5432 struct adapter *adap = pci_get_drvdata(pdev); in eeh_err_detected() local
5434 if (!adap) in eeh_err_detected()
5438 adap->flags &= ~CXGB4_FW_OK; in eeh_err_detected()
5439 notify_ulds(adap, CXGB4_STATE_START_RECOVERY); in eeh_err_detected()
5440 spin_lock(&adap->stats_lock); in eeh_err_detected()
5441 for_each_port(adap, i) { in eeh_err_detected()
5442 struct net_device *dev = adap->port[i]; in eeh_err_detected()
5448 spin_unlock(&adap->stats_lock); in eeh_err_detected()
5449 disable_interrupts(adap); in eeh_err_detected()
5450 if (adap->flags & CXGB4_FULL_INIT_DONE) in eeh_err_detected()
5451 cxgb_down(adap); in eeh_err_detected()
5453 if ((adap->flags & CXGB4_DEV_ENABLED)) { in eeh_err_detected()
5455 adap->flags &= ~CXGB4_DEV_ENABLED; in eeh_err_detected()
5465 struct adapter *adap = pci_get_drvdata(pdev); in eeh_slot_reset() local
5467 if (!adap) { in eeh_slot_reset()
5473 if (!(adap->flags & CXGB4_DEV_ENABLED)) { in eeh_slot_reset()
5479 adap->flags |= CXGB4_DEV_ENABLED; in eeh_slot_reset()
5486 if (t4_wait_dev_ready(adap->regs) < 0) in eeh_slot_reset()
5488 if (t4_fw_hello(adap, adap->mbox, adap->pf, MASTER_MUST, NULL) < 0) in eeh_slot_reset()
5490 adap->flags |= CXGB4_FW_OK; in eeh_slot_reset()
5491 if (adap_init1(adap, &c)) in eeh_slot_reset()
5494 for_each_port(adap, i) { in eeh_slot_reset()
5495 struct port_info *pi = adap2pinfo(adap, i); in eeh_slot_reset()
5498 ret = t4_alloc_vi(adap, adap->mbox, pi->tx_chan, adap->pf, 0, 1, in eeh_slot_reset()
5507 if (adap->params.viid_smt_extn_support) { in eeh_slot_reset()
5517 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, in eeh_slot_reset()
5518 adap->params.b_wnd); in eeh_slot_reset()
5519 setup_memwin(adap); in eeh_slot_reset()
5520 if (cxgb_up(adap)) in eeh_slot_reset()
5528 struct adapter *adap = pci_get_drvdata(pdev); in eeh_resume() local
5530 if (!adap) in eeh_resume()
5534 for_each_port(adap, i) { in eeh_resume()
5535 struct net_device *dev = adap->port[i]; in eeh_resume()
5654 static int cfg_queues(struct adapter *adap) in cfg_queues() argument
5659 struct sge *s = &adap->sge; in cfg_queues()
5664 if (is_kdump_kernel() || (is_uld(adap) && t4_uld_mem_alloc(adap))) { in cfg_queues()
5665 adap->params.offload = 0; in cfg_queues()
5666 adap->params.crypto = 0; in cfg_queues()
5667 adap->params.ethofld = 0; in cfg_queues()
5682 niqflint = adap->params.pfres.niqflint - 1; in cfg_queues()
5683 if (!(adap->flags & CXGB4_USING_MSIX)) in cfg_queues()
5685 neq = adap->params.pfres.neq / 2; in cfg_queues()
5688 if (avail_qsets < adap->params.nports) { in cfg_queues()
5689 dev_err(adap->pdev_dev, "avail_eth_qsets=%d < nports=%d\n", in cfg_queues()
5690 avail_qsets, adap->params.nports); in cfg_queues()
5695 for_each_port(adap, i) in cfg_queues()
5696 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg); in cfg_queues()
5704 q10g = (avail_eth_qsets - (adap->params.nports - n10g)) / n10g; in cfg_queues()
5712 if (adap->params.nports * 8 > avail_eth_qsets) { in cfg_queues()
5713 dev_err(adap->pdev_dev, "DCB avail_eth_qsets=%d < %d!\n", in cfg_queues()
5714 avail_eth_qsets, adap->params.nports * 8); in cfg_queues()
5718 if (adap->params.nports * ncpus < avail_eth_qsets) in cfg_queues()
5724 (avail_eth_qsets - (adap->params.nports - n10g) * q1g)) in cfg_queues()
5736 for_each_port(adap, i) { in cfg_queues()
5737 struct port_info *pi = adap2pinfo(adap, i); in cfg_queues()
5748 if (is_uld(adap)) { in cfg_queues()
5753 num_ulds = adap->num_uld + adap->num_ofld_uld; in cfg_queues()
5755 avail_uld_qsets = roundup(i, adap->params.nports); in cfg_queues()
5756 if (avail_qsets < num_ulds * adap->params.nports) { in cfg_queues()
5757 adap->params.offload = 0; in cfg_queues()
5758 adap->params.crypto = 0; in cfg_queues()
5761 s->ofldqsets = adap->params.nports; in cfg_queues()
5772 if (is_ethofld(adap)) { in cfg_queues()
5774 adap->params.ethofld = 0; in cfg_queues()
5789 else if (avail_qsets >= adap->params.nports) in cfg_queues()
5790 s->mirrorqsets = adap->params.nports; in cfg_queues()
5798 init_rspq(adap, &r->rspq, 5, 10, 1024, 64); in cfg_queues()
5808 if (!is_t4(adap->params.chip)) in cfg_queues()
5811 init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64); in cfg_queues()
5812 init_rspq(adap, &s->intrq, 0, 1, 512, 64); in cfg_queues()
5821 static void reduce_ethqs(struct adapter *adap, int n) in reduce_ethqs() argument
5826 while (n < adap->sge.ethqsets) in reduce_ethqs()
5827 for_each_port(adap, i) { in reduce_ethqs()
5828 pi = adap2pinfo(adap, i); in reduce_ethqs()
5831 adap->sge.ethqsets--; in reduce_ethqs()
5832 if (adap->sge.ethqsets <= n) in reduce_ethqs()
5838 for_each_port(adap, i) { in reduce_ethqs()
5839 pi = adap2pinfo(adap, i); in reduce_ethqs()
5845 static int alloc_msix_info(struct adapter *adap, u32 num_vec) in alloc_msix_info() argument
5853 adap->msix_bmap.msix_bmap = bitmap_zalloc(num_vec, GFP_KERNEL); in alloc_msix_info()
5854 if (!adap->msix_bmap.msix_bmap) { in alloc_msix_info()
5859 spin_lock_init(&adap->msix_bmap.lock); in alloc_msix_info()
5860 adap->msix_bmap.mapsize = num_vec; in alloc_msix_info()
5862 adap->msix_info = msix_info; in alloc_msix_info()
5866 static void free_msix_info(struct adapter *adap) in free_msix_info() argument
5868 bitmap_free(adap->msix_bmap.msix_bmap); in free_msix_info()
5869 kfree(adap->msix_info); in free_msix_info()
5872 int cxgb4_get_msix_idx_from_bmap(struct adapter *adap) in cxgb4_get_msix_idx_from_bmap() argument
5874 struct msix_bmap *bmap = &adap->msix_bmap; in cxgb4_get_msix_idx_from_bmap()
5891 void cxgb4_free_msix_idx_in_bmap(struct adapter *adap, in cxgb4_free_msix_idx_in_bmap() argument
5894 struct msix_bmap *bmap = &adap->msix_bmap; in cxgb4_free_msix_idx_in_bmap()
5905 static int enable_msix(struct adapter *adap) in enable_msix() argument
5909 u8 num_uld = 0, nchan = adap->params.nports; in enable_msix()
5911 struct sge *s = &adap->sge; in enable_msix()
5926 if (is_uld(adap)) { in enable_msix()
5927 num_uld = adap->num_ofld_uld + adap->num_uld; in enable_msix()
5933 if (is_ethofld(adap)) { in enable_msix()
5955 allocated = pci_enable_msix_range(adap->pdev, entries, need, want); in enable_msix()
5962 allocated = pci_enable_msix_range(adap->pdev, entries, in enable_msix()
5965 dev_info(adap->pdev_dev, in enable_msix()
5971 dev_info(adap->pdev_dev, in enable_msix()
5973 adap->params.offload = 0; in enable_msix()
5974 adap->params.crypto = 0; in enable_msix()
5975 adap->params.ethofld = 0; in enable_msix()
5991 if (is_uld(adap)) in enable_msix()
5993 if (is_ethofld(adap)) in enable_msix()
6004 for_each_port(adap, i) { in enable_msix()
6005 pi = adap2pinfo(adap, i); in enable_msix()
6018 if (is_uld(adap)) { in enable_msix()
6041 if (is_uld(adap)) in enable_msix()
6043 if (is_ethofld(adap)) in enable_msix()
6051 reduce_ethqs(adap, ethqsets); in enable_msix()
6054 if (is_uld(adap)) { in enable_msix()
6059 if (is_ethofld(adap)) in enable_msix()
6064 for_each_port(adap, i) { in enable_msix()
6065 pi = adap2pinfo(adap, i); in enable_msix()
6072 ret = alloc_msix_info(adap, allocated); in enable_msix()
6077 adap->msix_info[i].vec = entries[i].vector; in enable_msix()
6078 adap->msix_info[i].idx = i; in enable_msix()
6081 dev_info(adap->pdev_dev, in enable_msix()
6090 pci_disable_msix(adap->pdev); in enable_msix()
6099 static int init_rss(struct adapter *adap) in init_rss() argument
6104 err = t4_init_rss_mode(adap, adap->mbox); in init_rss()
6108 for_each_port(adap, i) { in init_rss()
6109 struct port_info *pi = adap2pinfo(adap, i); in init_rss()
6137 const struct adapter *adap = pi->adapter; in print_port_info() local
6161 netdev_info(dev, "Chelsio %s %s\n", adap->params.vpd.id, buf); in print_port_info()
6214 static int t4_get_chip_type(struct adapter *adap, int ver) in t4_get_chip_type() argument
6216 u32 pl_rev = REV_G(t4_read_reg(adap, PL_REV_A)); in t4_get_chip_type()
6249 struct adapter *adap = pci_get_drvdata(pdev); in cxgb4_iov_configure() local
6254 pcie_fw = readl(adap->regs + PCIE_FW_A); in cxgb4_iov_configure()
6284 unregister_netdev(adap->port[0]); in cxgb4_iov_configure()
6285 free_netdev(adap->port[0]); in cxgb4_iov_configure()
6286 adap->port[0] = NULL; in cxgb4_iov_configure()
6289 adap->num_vfs = 0; in cxgb4_iov_configure()
6290 kfree(adap->vfinfo); in cxgb4_iov_configure()
6291 adap->vfinfo = NULL; in cxgb4_iov_configure()
6329 FW_PFVF_CMD_PFN_V(adap->pf) | in cxgb4_iov_configure()
6332 err = t4_wr_mbox(adap, adap->mbox, &port_cmd, sizeof(port_cmd), in cxgb4_iov_configure()
6339 snprintf(name, IFNAMSIZ, "mgmtpf%d,%d", adap->adap_idx, in cxgb4_iov_configure()
6340 adap->pf); in cxgb4_iov_configure()
6347 pi->adapter = adap; in cxgb4_iov_configure()
6352 adap->port[0] = netdev; in cxgb4_iov_configure()
6355 err = register_netdev(adap->port[0]); in cxgb4_iov_configure()
6358 free_netdev(adap->port[0]); in cxgb4_iov_configure()
6359 adap->port[0] = NULL; in cxgb4_iov_configure()
6363 adap->vfinfo = kcalloc(pci_sriov_get_totalvfs(pdev), in cxgb4_iov_configure()
6365 if (!adap->vfinfo) { in cxgb4_iov_configure()
6366 unregister_netdev(adap->port[0]); in cxgb4_iov_configure()
6367 free_netdev(adap->port[0]); in cxgb4_iov_configure()
6368 adap->port[0] = NULL; in cxgb4_iov_configure()
6371 cxgb4_mgmt_fill_vf_station_mac_addr(adap); in cxgb4_iov_configure()
6378 unregister_netdev(adap->port[0]); in cxgb4_iov_configure()
6379 free_netdev(adap->port[0]); in cxgb4_iov_configure()
6380 adap->port[0] = NULL; in cxgb4_iov_configure()
6381 kfree(adap->vfinfo); in cxgb4_iov_configure()
6382 adap->vfinfo = NULL; in cxgb4_iov_configure()
6387 adap->num_vfs = num_vfs; in cxgb4_iov_configure()
6394 static int chcr_offload_state(struct adapter *adap, in chcr_offload_state() argument
6400 if (!adap->uld[CXGB4_ULD_KTLS].handle) { in chcr_offload_state()
6401 dev_dbg(adap->pdev_dev, "ch_ktls driver is not loaded\n"); in chcr_offload_state()
6404 if (!adap->uld[CXGB4_ULD_KTLS].tlsdev_ops) { in chcr_offload_state()
6405 dev_dbg(adap->pdev_dev, in chcr_offload_state()
6413 if (!adap->uld[CXGB4_ULD_IPSEC].handle) { in chcr_offload_state()
6414 dev_dbg(adap->pdev_dev, "chipsec driver is not loaded\n"); in chcr_offload_state()
6417 if (!adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops) { in chcr_offload_state()
6418 dev_dbg(adap->pdev_dev, in chcr_offload_state()
6425 dev_dbg(adap->pdev_dev, in chcr_offload_state()
6442 struct adapter *adap = netdev2adap(netdev); in cxgb4_ktls_dev_add() local
6446 ret = chcr_offload_state(adap, CXGB4_TLSDEV_OPS); in cxgb4_ktls_dev_add()
6450 ret = cxgb4_set_ktls_feature(adap, FW_PARAMS_PARAM_DEV_KTLS_HW_ENABLE); in cxgb4_ktls_dev_add()
6454 ret = adap->uld[CXGB4_ULD_KTLS].tlsdev_ops->tls_dev_add(netdev, sk, in cxgb4_ktls_dev_add()
6460 cxgb4_set_ktls_feature(adap, in cxgb4_ktls_dev_add()
6471 struct adapter *adap = netdev2adap(netdev); in cxgb4_ktls_dev_del() local
6474 if (chcr_offload_state(adap, CXGB4_TLSDEV_OPS)) in cxgb4_ktls_dev_del()
6477 adap->uld[CXGB4_ULD_KTLS].tlsdev_ops->tls_dev_del(netdev, tls_ctx, in cxgb4_ktls_dev_del()
6481 cxgb4_set_ktls_feature(adap, FW_PARAMS_PARAM_DEV_KTLS_HW_DISABLE); in cxgb4_ktls_dev_del()
6496 struct adapter *adap = netdev2adap(x->xso.dev); in cxgb4_xfrm_add_state() local
6503 ret = chcr_offload_state(adap, CXGB4_XFRMDEV_OPS); in cxgb4_xfrm_add_state()
6507 ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_add(x, extack); in cxgb4_xfrm_add_state()
6517 struct adapter *adap = netdev2adap(x->xso.dev); in cxgb4_xfrm_del_state() local
6520 dev_dbg(adap->pdev_dev, in cxgb4_xfrm_del_state()
6524 if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS)) in cxgb4_xfrm_del_state()
6527 adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_delete(x); in cxgb4_xfrm_del_state()
6535 struct adapter *adap = netdev2adap(x->xso.dev); in cxgb4_xfrm_free_state() local
6538 dev_dbg(adap->pdev_dev, in cxgb4_xfrm_free_state()
6542 if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS)) in cxgb4_xfrm_free_state()
6545 adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_free(x); in cxgb4_xfrm_free_state()
6553 struct adapter *adap = netdev2adap(x->xso.dev); in cxgb4_ipsec_offload_ok() local
6557 dev_dbg(adap->pdev_dev, in cxgb4_ipsec_offload_ok()
6561 if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS)) in cxgb4_ipsec_offload_ok()
6564 ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_offload_ok(skb, x); in cxgb4_ipsec_offload_ok()
6573 struct adapter *adap = netdev2adap(x->xso.dev); in cxgb4_advance_esn_state() local
6576 dev_dbg(adap->pdev_dev, in cxgb4_advance_esn_state()
6580 if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS)) in cxgb4_advance_esn_state()
6583 adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_advance_esn(x); in cxgb4_advance_esn_state()