Lines Matching refs:vf

191 static unsigned abs_index(struct siena_vf *vf, unsigned index)  in abs_index()  argument
193 return EFX_VI_BASE + vf->index * efx_vf_size(vf->efx) + index; in abs_index()
302 static void efx_siena_sriov_reset_tx_filter(struct siena_vf *vf) in efx_siena_sriov_reset_tx_filter() argument
304 struct efx_nic *efx = vf->efx; in efx_siena_sriov_reset_tx_filter()
309 if (vf->tx_filter_id != -1) { in efx_siena_sriov_reset_tx_filter()
311 vf->tx_filter_id); in efx_siena_sriov_reset_tx_filter()
313 vf->pci_name, vf->tx_filter_id); in efx_siena_sriov_reset_tx_filter()
314 vf->tx_filter_id = -1; in efx_siena_sriov_reset_tx_filter()
317 if (is_zero_ether_addr(vf->addr.mac_addr)) in efx_siena_sriov_reset_tx_filter()
323 if (vf->tx_filter_mode == VF_TX_FILTER_AUTO && vf_max_tx_channels <= 2) in efx_siena_sriov_reset_tx_filter()
324 vf->tx_filter_mode = VF_TX_FILTER_ON; in efx_siena_sriov_reset_tx_filter()
326 vlan = ntohs(vf->addr.tci) & VLAN_VID_MASK; in efx_siena_sriov_reset_tx_filter()
327 efx_filter_init_tx(&filter, abs_index(vf, 0)); in efx_siena_sriov_reset_tx_filter()
330 vf->addr.mac_addr); in efx_siena_sriov_reset_tx_filter()
337 vf->pci_name); in efx_siena_sriov_reset_tx_filter()
340 vf->pci_name, rc); in efx_siena_sriov_reset_tx_filter()
341 vf->tx_filter_id = rc; in efx_siena_sriov_reset_tx_filter()
346 static void efx_siena_sriov_reset_rx_filter(struct siena_vf *vf) in efx_siena_sriov_reset_rx_filter() argument
348 struct efx_nic *efx = vf->efx; in efx_siena_sriov_reset_rx_filter()
353 if (vf->rx_filter_id != -1) { in efx_siena_sriov_reset_rx_filter()
355 vf->rx_filter_id); in efx_siena_sriov_reset_rx_filter()
357 vf->pci_name, vf->rx_filter_id); in efx_siena_sriov_reset_rx_filter()
358 vf->rx_filter_id = -1; in efx_siena_sriov_reset_rx_filter()
361 if (!vf->rx_filtering || is_zero_ether_addr(vf->addr.mac_addr)) in efx_siena_sriov_reset_rx_filter()
364 vlan = ntohs(vf->addr.tci) & VLAN_VID_MASK; in efx_siena_sriov_reset_rx_filter()
366 vf->rx_filter_flags, in efx_siena_sriov_reset_rx_filter()
367 abs_index(vf, vf->rx_filter_qid)); in efx_siena_sriov_reset_rx_filter()
370 vf->addr.mac_addr); in efx_siena_sriov_reset_rx_filter()
377 vf->pci_name); in efx_siena_sriov_reset_rx_filter()
380 vf->pci_name, rc); in efx_siena_sriov_reset_rx_filter()
381 vf->rx_filter_id = rc; in efx_siena_sriov_reset_rx_filter()
385 static void __efx_siena_sriov_update_vf_addr(struct siena_vf *vf) in __efx_siena_sriov_update_vf_addr() argument
387 struct efx_nic *efx = vf->efx; in __efx_siena_sriov_update_vf_addr()
390 efx_siena_sriov_reset_tx_filter(vf); in __efx_siena_sriov_update_vf_addr()
391 efx_siena_sriov_reset_rx_filter(vf); in __efx_siena_sriov_update_vf_addr()
400 static void __efx_siena_sriov_push_vf_status(struct siena_vf *vf) in __efx_siena_sriov_push_vf_status() argument
402 struct efx_nic *efx = vf->efx; in __efx_siena_sriov_push_vf_status()
411 WARN_ON(!mutex_is_locked(&vf->status_lock)); in __efx_siena_sriov_push_vf_status()
412 WARN_ON(!vf->status_addr); in __efx_siena_sriov_push_vf_status()
414 status->local = vf->addr; in __efx_siena_sriov_push_vf_status()
420 copy[0].to_rid = vf->pci_rid; in __efx_siena_sriov_push_vf_status()
421 copy[0].to_addr = vf->status_addr + offsetof(struct vfdi_status, in __efx_siena_sriov_push_vf_status()
431 copy[1].to_rid = vf->pci_rid; in __efx_siena_sriov_push_vf_status()
432 copy[1].to_addr = vf->status_addr + data_offset; in __efx_siena_sriov_push_vf_status()
439 if (count == vf->peer_page_count) { in __efx_siena_sriov_push_vf_status()
448 copy[pos].to_rid = vf->pci_rid; in __efx_siena_sriov_push_vf_status()
449 copy[pos].to_addr = vf->peer_page_addrs[count]; in __efx_siena_sriov_push_vf_status()
461 copy[pos].to_rid = vf->pci_rid; in __efx_siena_sriov_push_vf_status()
462 copy[pos].to_addr = vf->status_addr + offsetof(struct vfdi_status, in __efx_siena_sriov_push_vf_status()
470 VFDI_EV_SEQ, (vf->msg_seqno & 0xff), in __efx_siena_sriov_push_vf_status()
472 ++vf->msg_seqno; in __efx_siena_sriov_push_vf_status()
474 EFX_VI_BASE + vf->index * efx_vf_size(efx), in __efx_siena_sriov_push_vf_status()
524 *vf_out = nic_data->vf + vf_i; in map_vi_index()
530 static int efx_vfdi_init_evq(struct siena_vf *vf) in efx_vfdi_init_evq() argument
532 struct efx_nic *efx = vf->efx; in efx_vfdi_init_evq()
533 struct vfdi_req *req = vf->buf.addr; in efx_vfdi_init_evq()
536 unsigned abs_evq = abs_index(vf, vf_evq); in efx_vfdi_init_evq()
537 unsigned buftbl = EFX_BUFTBL_EVQ_BASE(vf, vf_evq); in efx_vfdi_init_evq()
545 vf->pci_name, vf_evq, buf_count); in efx_vfdi_init_evq()
563 memcpy(vf->evq0_addrs, req->u.init_evq.addr, in efx_vfdi_init_evq()
565 vf->evq0_count = buf_count; in efx_vfdi_init_evq()
571 static int efx_vfdi_init_rxq(struct siena_vf *vf) in efx_vfdi_init_rxq() argument
573 struct efx_nic *efx = vf->efx; in efx_vfdi_init_rxq()
574 struct vfdi_req *req = vf->buf.addr; in efx_vfdi_init_rxq()
578 unsigned buftbl = EFX_BUFTBL_RXQ_BASE(vf, vf_rxq); in efx_vfdi_init_rxq()
588 "buf_count %d\n", vf->pci_name, vf_rxq, in efx_vfdi_init_rxq()
592 if (__test_and_set_bit(req->u.init_rxq.index, vf->rxq_mask)) in efx_vfdi_init_rxq()
593 ++vf->rxq_count; in efx_vfdi_init_rxq()
599 FRF_AZ_RX_DESCQ_EVQ_ID, abs_index(vf, vf_evq), in efx_vfdi_init_rxq()
607 abs_index(vf, vf_rxq)); in efx_vfdi_init_rxq()
612 static int efx_vfdi_init_txq(struct siena_vf *vf) in efx_vfdi_init_txq() argument
614 struct efx_nic *efx = vf->efx; in efx_vfdi_init_txq()
615 struct vfdi_req *req = vf->buf.addr; in efx_vfdi_init_txq()
619 unsigned buftbl = EFX_BUFTBL_TXQ_BASE(vf, vf_txq); in efx_vfdi_init_txq()
629 "buf_count %d\n", vf->pci_name, vf_txq, in efx_vfdi_init_txq()
634 mutex_lock(&vf->txq_lock); in efx_vfdi_init_txq()
635 if (__test_and_set_bit(req->u.init_txq.index, vf->txq_mask)) in efx_vfdi_init_txq()
636 ++vf->txq_count; in efx_vfdi_init_txq()
637 mutex_unlock(&vf->txq_lock); in efx_vfdi_init_txq()
640 eth_filt_en = vf->tx_filter_mode == VF_TX_FILTER_ON; in efx_vfdi_init_txq()
648 FRF_AZ_TX_DESCQ_EVQ_ID, abs_index(vf, vf_evq), in efx_vfdi_init_txq()
653 abs_index(vf, vf_txq)); in efx_vfdi_init_txq()
659 static bool efx_vfdi_flush_wake(struct siena_vf *vf) in efx_vfdi_flush_wake() argument
664 return (!vf->txq_count && !vf->rxq_count) || in efx_vfdi_flush_wake()
665 atomic_read(&vf->rxq_retry_count); in efx_vfdi_flush_wake()
668 static void efx_vfdi_flush_clear(struct siena_vf *vf) in efx_vfdi_flush_clear() argument
670 memset(vf->txq_mask, 0, sizeof(vf->txq_mask)); in efx_vfdi_flush_clear()
671 vf->txq_count = 0; in efx_vfdi_flush_clear()
672 memset(vf->rxq_mask, 0, sizeof(vf->rxq_mask)); in efx_vfdi_flush_clear()
673 vf->rxq_count = 0; in efx_vfdi_flush_clear()
674 memset(vf->rxq_retry_mask, 0, sizeof(vf->rxq_retry_mask)); in efx_vfdi_flush_clear()
675 atomic_set(&vf->rxq_retry_count, 0); in efx_vfdi_flush_clear()
678 static int efx_vfdi_fini_all_queues(struct siena_vf *vf) in efx_vfdi_fini_all_queues() argument
680 struct efx_nic *efx = vf->efx; in efx_vfdi_fini_all_queues()
683 unsigned vf_offset = EFX_VI_BASE + vf->index * efx_vf_size(efx); in efx_vfdi_fini_all_queues()
699 if (test_bit(index, vf->txq_mask)) { in efx_vfdi_fini_all_queues()
706 if (test_bit(index, vf->rxq_mask)) { in efx_vfdi_fini_all_queues()
714 atomic_set(&vf->rxq_retry_count, 0); in efx_vfdi_fini_all_queues()
715 while (timeout && (vf->rxq_count || vf->txq_count)) { in efx_vfdi_fini_all_queues()
721 timeout = wait_event_timeout(vf->flush_waitq, in efx_vfdi_fini_all_queues()
722 efx_vfdi_flush_wake(vf), in efx_vfdi_fini_all_queues()
726 if (test_and_clear_bit(index, vf->rxq_retry_mask)) { in efx_vfdi_fini_all_queues()
727 atomic_dec(&vf->rxq_retry_count); in efx_vfdi_fini_all_queues()
752 efx_siena_sriov_bufs(efx, vf->buftbl_base, NULL, in efx_vfdi_fini_all_queues()
754 efx_vfdi_flush_clear(vf); in efx_vfdi_fini_all_queues()
756 vf->evq0_count = 0; in efx_vfdi_fini_all_queues()
761 static int efx_vfdi_insert_filter(struct siena_vf *vf) in efx_vfdi_insert_filter() argument
763 struct efx_nic *efx = vf->efx; in efx_vfdi_insert_filter()
765 struct vfdi_req *req = vf->buf.addr; in efx_vfdi_insert_filter()
769 if (bad_vf_index(efx, vf_rxq) || vf->rx_filtering) { in efx_vfdi_insert_filter()
773 "flags 0x%x\n", vf->pci_name, vf_rxq, in efx_vfdi_insert_filter()
783 vf->rx_filter_flags = flags; in efx_vfdi_insert_filter()
784 vf->rx_filter_qid = vf_rxq; in efx_vfdi_insert_filter()
785 vf->rx_filtering = true; in efx_vfdi_insert_filter()
787 efx_siena_sriov_reset_rx_filter(vf); in efx_vfdi_insert_filter()
793 static int efx_vfdi_remove_all_filters(struct siena_vf *vf) in efx_vfdi_remove_all_filters() argument
795 struct efx_nic *efx = vf->efx; in efx_vfdi_remove_all_filters()
798 vf->rx_filtering = false; in efx_vfdi_remove_all_filters()
799 efx_siena_sriov_reset_rx_filter(vf); in efx_vfdi_remove_all_filters()
805 static int efx_vfdi_set_status_page(struct siena_vf *vf) in efx_vfdi_set_status_page() argument
807 struct efx_nic *efx = vf->efx; in efx_vfdi_set_status_page()
809 struct vfdi_req *req = vf->buf.addr; in efx_vfdi_set_status_page()
820 vf->pci_name); in efx_vfdi_set_status_page()
825 mutex_lock(&vf->status_lock); in efx_vfdi_set_status_page()
826 vf->status_addr = req->u.set_status_page.dma_addr; in efx_vfdi_set_status_page()
828 kfree(vf->peer_page_addrs); in efx_vfdi_set_status_page()
829 vf->peer_page_addrs = NULL; in efx_vfdi_set_status_page()
830 vf->peer_page_count = 0; in efx_vfdi_set_status_page()
833 vf->peer_page_addrs = kcalloc(page_count, sizeof(u64), in efx_vfdi_set_status_page()
835 if (vf->peer_page_addrs) { in efx_vfdi_set_status_page()
836 memcpy(vf->peer_page_addrs, in efx_vfdi_set_status_page()
839 vf->peer_page_count = page_count; in efx_vfdi_set_status_page()
843 __efx_siena_sriov_push_vf_status(vf); in efx_vfdi_set_status_page()
844 mutex_unlock(&vf->status_lock); in efx_vfdi_set_status_page()
850 static int efx_vfdi_clear_status_page(struct siena_vf *vf) in efx_vfdi_clear_status_page() argument
852 mutex_lock(&vf->status_lock); in efx_vfdi_clear_status_page()
853 vf->status_addr = 0; in efx_vfdi_clear_status_page()
854 mutex_unlock(&vf->status_lock); in efx_vfdi_clear_status_page()
859 typedef int (*efx_vfdi_op_t)(struct siena_vf *vf);
874 struct siena_vf *vf = container_of(work, struct siena_vf, req); in efx_siena_sriov_vfdi() local
875 struct efx_nic *efx = vf->efx; in efx_siena_sriov_vfdi()
876 struct vfdi_req *req = vf->buf.addr; in efx_siena_sriov_vfdi()
882 copy[0].from_rid = vf->pci_rid; in efx_siena_sriov_vfdi()
883 copy[0].from_addr = vf->req_addr; in efx_siena_sriov_vfdi()
885 copy[0].to_addr = vf->buf.dma_addr; in efx_siena_sriov_vfdi()
893 vf->pci_name, -rc); in efx_siena_sriov_vfdi()
894 vf->busy = false; in efx_siena_sriov_vfdi()
899 rc = vfdi_ops[req->op](vf); in efx_siena_sriov_vfdi()
903 req->op, vf->pci_name); in efx_siena_sriov_vfdi()
908 "%llx\n", req->op, vf->pci_name, in efx_siena_sriov_vfdi()
909 (unsigned long long)vf->req_addr); in efx_siena_sriov_vfdi()
914 vf->busy = false; in efx_siena_sriov_vfdi()
923 copy[0].to_rid = vf->pci_rid; in efx_siena_sriov_vfdi()
924 copy[0].to_addr = vf->req_addr + offsetof(struct vfdi_req, rc); in efx_siena_sriov_vfdi()
927 copy[1].to_rid = vf->pci_rid; in efx_siena_sriov_vfdi()
928 copy[1].to_addr = vf->req_addr + offsetof(struct vfdi_req, op); in efx_siena_sriov_vfdi()
941 static void efx_siena_sriov_reset_vf(struct siena_vf *vf, in efx_siena_sriov_reset_vf() argument
944 struct efx_nic *efx = vf->efx; in efx_siena_sriov_reset_vf()
954 if (!vf->evq0_count) in efx_siena_sriov_reset_vf()
956 BUG_ON(vf->evq0_count & (vf->evq0_count - 1)); in efx_siena_sriov_reset_vf()
958 mutex_lock(&vf->status_lock); in efx_siena_sriov_reset_vf()
961 VFDI_EV_SEQ, vf->msg_seqno, in efx_siena_sriov_reset_vf()
963 vf->msg_seqno++; in efx_siena_sriov_reset_vf()
967 for (pos = 0; pos < vf->evq0_count; pos += count) { in efx_siena_sriov_reset_vf()
968 count = min_t(unsigned, vf->evq0_count - pos, in efx_siena_sriov_reset_vf()
974 copy_req[k].to_rid = vf->pci_rid; in efx_siena_sriov_reset_vf()
975 copy_req[k].to_addr = vf->evq0_addrs[pos + k]; in efx_siena_sriov_reset_vf()
983 ": %d\n", vf->pci_name, -rc); in efx_siena_sriov_reset_vf()
989 abs_evq = abs_index(vf, 0); in efx_siena_sriov_reset_vf()
990 buftbl = EFX_BUFTBL_EVQ_BASE(vf, 0); in efx_siena_sriov_reset_vf()
991 efx_siena_sriov_bufs(efx, buftbl, vf->evq0_addrs, vf->evq0_count); in efx_siena_sriov_reset_vf()
1000 FRF_AZ_EVQ_SIZE, __ffs(vf->evq0_count), in efx_siena_sriov_reset_vf()
1006 mutex_unlock(&vf->status_lock); in efx_siena_sriov_reset_vf()
1011 struct siena_vf *vf = container_of(work, struct siena_vf, req); in efx_siena_sriov_reset_vf_work() local
1012 struct efx_nic *efx = vf->efx; in efx_siena_sriov_reset_vf_work()
1016 efx_siena_sriov_reset_vf(vf, &buf); in efx_siena_sriov_reset_vf_work()
1084 struct siena_vf *vf; in efx_siena_sriov_peer_work() local
1106 vf = nic_data->vf + pos; in efx_siena_sriov_peer_work()
1108 mutex_lock(&vf->status_lock); in efx_siena_sriov_peer_work()
1109 if (vf->rx_filtering && !is_zero_ether_addr(vf->addr.mac_addr)) { in efx_siena_sriov_peer_work()
1110 *peer++ = vf->addr; in efx_siena_sriov_peer_work()
1115 mutex_unlock(&vf->status_lock); in efx_siena_sriov_peer_work()
1162 vf = nic_data->vf + pos; in efx_siena_sriov_peer_work()
1164 mutex_lock(&vf->status_lock); in efx_siena_sriov_peer_work()
1165 if (vf->status_addr) in efx_siena_sriov_peer_work()
1166 __efx_siena_sriov_push_vf_status(vf); in efx_siena_sriov_peer_work()
1167 mutex_unlock(&vf->status_lock); in efx_siena_sriov_peer_work()
1197 struct siena_vf *vf; in efx_siena_sriov_vf_alloc() local
1200 nic_data->vf = kcalloc(efx->vf_count, sizeof(*nic_data->vf), in efx_siena_sriov_vf_alloc()
1202 if (!nic_data->vf) in efx_siena_sriov_vf_alloc()
1206 vf = nic_data->vf + index; in efx_siena_sriov_vf_alloc()
1208 vf->efx = efx; in efx_siena_sriov_vf_alloc()
1209 vf->index = index; in efx_siena_sriov_vf_alloc()
1210 vf->rx_filter_id = -1; in efx_siena_sriov_vf_alloc()
1211 vf->tx_filter_mode = VF_TX_FILTER_AUTO; in efx_siena_sriov_vf_alloc()
1212 vf->tx_filter_id = -1; in efx_siena_sriov_vf_alloc()
1213 INIT_WORK(&vf->req, efx_siena_sriov_vfdi); in efx_siena_sriov_vf_alloc()
1214 INIT_WORK(&vf->reset_work, efx_siena_sriov_reset_vf_work); in efx_siena_sriov_vf_alloc()
1215 init_waitqueue_head(&vf->flush_waitq); in efx_siena_sriov_vf_alloc()
1216 mutex_init(&vf->status_lock); in efx_siena_sriov_vf_alloc()
1217 mutex_init(&vf->txq_lock); in efx_siena_sriov_vf_alloc()
1226 struct siena_vf *vf; in efx_siena_sriov_vfs_fini() local
1230 vf = nic_data->vf + pos; in efx_siena_sriov_vfs_fini()
1232 efx_siena_free_buffer(efx, &vf->buf); in efx_siena_sriov_vfs_fini()
1233 kfree(vf->peer_page_addrs); in efx_siena_sriov_vfs_fini()
1234 vf->peer_page_addrs = NULL; in efx_siena_sriov_vfs_fini()
1235 vf->peer_page_count = 0; in efx_siena_sriov_vfs_fini()
1237 vf->evq0_count = 0; in efx_siena_sriov_vfs_fini()
1247 struct siena_vf *vf; in efx_siena_sriov_vfs_init() local
1260 vf = nic_data->vf + index; in efx_siena_sriov_vfs_init()
1263 vf->buftbl_base = buftbl_base; in efx_siena_sriov_vfs_init()
1266 vf->pci_rid = devfn; in efx_siena_sriov_vfs_init()
1267 snprintf(vf->pci_name, sizeof(vf->pci_name), in efx_siena_sriov_vfs_init()
1272 rc = efx_siena_alloc_buffer(efx, &vf->buf, EFX_PAGE_SIZE, in efx_siena_sriov_vfs_init()
1360 kfree(nic_data->vf); in efx_siena_sriov_init()
1371 struct siena_vf *vf; in efx_siena_sriov_fini() local
1387 vf = nic_data->vf + pos; in efx_siena_sriov_fini()
1388 cancel_work_sync(&vf->req); in efx_siena_sriov_fini()
1389 cancel_work_sync(&vf->reset_work); in efx_siena_sriov_fini()
1398 kfree(nic_data->vf); in efx_siena_sriov_fini()
1406 struct siena_vf *vf; in efx_siena_sriov_event() local
1421 if (map_vi_index(efx, qid, &vf, NULL)) in efx_siena_sriov_event()
1423 if (vf->busy) in efx_siena_sriov_event()
1428 vf->req_type = VFDI_EV_TYPE_REQ_WORD0; in efx_siena_sriov_event()
1429 vf->req_seqno = seq + 1; in efx_siena_sriov_event()
1430 vf->req_addr = 0; in efx_siena_sriov_event()
1431 } else if (seq != (vf->req_seqno++ & 0xff) || type != vf->req_type) in efx_siena_sriov_event()
1434 switch (vf->req_type) { in efx_siena_sriov_event()
1438 vf->req_addr |= (u64)data << (vf->req_type << 4); in efx_siena_sriov_event()
1439 ++vf->req_type; in efx_siena_sriov_event()
1443 vf->req_addr |= (u64)data << 48; in efx_siena_sriov_event()
1444 vf->req_type = VFDI_EV_TYPE_REQ_WORD0; in efx_siena_sriov_event()
1445 vf->busy = true; in efx_siena_sriov_event()
1446 queue_work(vfdi_workqueue, &vf->req); in efx_siena_sriov_event()
1454 vf->pci_name); in efx_siena_sriov_event()
1456 vf->req_type = VFDI_EV_TYPE_REQ_WORD0; in efx_siena_sriov_event()
1457 vf->req_seqno = seq + 1; in efx_siena_sriov_event()
1463 struct siena_vf *vf; in efx_siena_sriov_flr() local
1467 vf = nic_data->vf + vf_i; in efx_siena_sriov_flr()
1469 "FLR on VF %s\n", vf->pci_name); in efx_siena_sriov_flr()
1471 vf->status_addr = 0; in efx_siena_sriov_flr()
1472 efx_vfdi_remove_all_filters(vf); in efx_siena_sriov_flr()
1473 efx_vfdi_flush_clear(vf); in efx_siena_sriov_flr()
1475 vf->evq0_count = 0; in efx_siena_sriov_flr()
1494 struct siena_vf *vf; in efx_siena_sriov_tx_flush_done() local
1498 if (map_vi_index(efx, queue, &vf, &qid)) in efx_siena_sriov_tx_flush_done()
1501 if (!test_bit(qid, vf->txq_mask)) in efx_siena_sriov_tx_flush_done()
1504 __clear_bit(qid, vf->txq_mask); in efx_siena_sriov_tx_flush_done()
1505 --vf->txq_count; in efx_siena_sriov_tx_flush_done()
1507 if (efx_vfdi_flush_wake(vf)) in efx_siena_sriov_tx_flush_done()
1508 wake_up(&vf->flush_waitq); in efx_siena_sriov_tx_flush_done()
1513 struct siena_vf *vf; in efx_siena_sriov_rx_flush_done() local
1519 if (map_vi_index(efx, queue, &vf, &qid)) in efx_siena_sriov_rx_flush_done()
1521 if (!test_bit(qid, vf->rxq_mask)) in efx_siena_sriov_rx_flush_done()
1525 set_bit(qid, vf->rxq_retry_mask); in efx_siena_sriov_rx_flush_done()
1526 atomic_inc(&vf->rxq_retry_count); in efx_siena_sriov_rx_flush_done()
1528 __clear_bit(qid, vf->rxq_mask); in efx_siena_sriov_rx_flush_done()
1529 --vf->rxq_count; in efx_siena_sriov_rx_flush_done()
1531 if (efx_vfdi_flush_wake(vf)) in efx_siena_sriov_rx_flush_done()
1532 wake_up(&vf->flush_waitq); in efx_siena_sriov_rx_flush_done()
1538 struct siena_vf *vf; in efx_siena_sriov_desc_fetch_err() local
1541 if (map_vi_index(efx, dmaq, &vf, &rel)) in efx_siena_sriov_desc_fetch_err()
1547 vf->index, rel); in efx_siena_sriov_desc_fetch_err()
1548 queue_work(vfdi_workqueue, &vf->reset_work); in efx_siena_sriov_desc_fetch_err()
1557 struct siena_vf *vf; in efx_siena_sriov_reset() local
1571 vf = nic_data->vf + vf_i; in efx_siena_sriov_reset()
1572 efx_siena_sriov_reset_vf(vf, &buf); in efx_siena_sriov_reset()
1598 struct siena_vf *vf; in efx_siena_sriov_set_vf_mac() local
1602 vf = nic_data->vf + vf_i; in efx_siena_sriov_set_vf_mac()
1604 mutex_lock(&vf->status_lock); in efx_siena_sriov_set_vf_mac()
1605 ether_addr_copy(vf->addr.mac_addr, mac); in efx_siena_sriov_set_vf_mac()
1606 __efx_siena_sriov_update_vf_addr(vf); in efx_siena_sriov_set_vf_mac()
1607 mutex_unlock(&vf->status_lock); in efx_siena_sriov_set_vf_mac()
1616 struct siena_vf *vf; in efx_siena_sriov_set_vf_vlan() local
1621 vf = nic_data->vf + vf_i; in efx_siena_sriov_set_vf_vlan()
1623 mutex_lock(&vf->status_lock); in efx_siena_sriov_set_vf_vlan()
1625 vf->addr.tci = htons(tci); in efx_siena_sriov_set_vf_vlan()
1626 __efx_siena_sriov_update_vf_addr(vf); in efx_siena_sriov_set_vf_vlan()
1627 mutex_unlock(&vf->status_lock); in efx_siena_sriov_set_vf_vlan()
1636 struct siena_vf *vf; in efx_siena_sriov_set_vf_spoofchk() local
1641 vf = nic_data->vf + vf_i; in efx_siena_sriov_set_vf_spoofchk()
1643 mutex_lock(&vf->txq_lock); in efx_siena_sriov_set_vf_spoofchk()
1644 if (vf->txq_count == 0) { in efx_siena_sriov_set_vf_spoofchk()
1645 vf->tx_filter_mode = in efx_siena_sriov_set_vf_spoofchk()
1652 mutex_unlock(&vf->txq_lock); in efx_siena_sriov_set_vf_spoofchk()
1660 struct siena_vf *vf; in efx_siena_sriov_get_vf_config() local
1665 vf = nic_data->vf + vf_i; in efx_siena_sriov_get_vf_config()
1667 ivi->vf = vf_i; in efx_siena_sriov_get_vf_config()
1668 ether_addr_copy(ivi->mac, vf->addr.mac_addr); in efx_siena_sriov_get_vf_config()
1671 tci = ntohs(vf->addr.tci); in efx_siena_sriov_get_vf_config()
1674 ivi->spoofchk = vf->tx_filter_mode == VF_TX_FILTER_ON; in efx_siena_sriov_get_vf_config()