Lines Matching refs:iq

59 		ioq_vector->iq = oct->iq[i];  in octep_alloc_ioq_vectors()
355 static void octep_enable_ioq_irq(struct octep_iq *iq, struct octep_oq *oq) in octep_enable_ioq_irq() argument
359 netdev_dbg(iq->netdev, "enabling intr for Q-%u\n", iq->q_no); in octep_enable_ioq_irq()
360 if (iq->pkts_processed) { in octep_enable_ioq_irq()
361 writel(iq->pkts_processed, iq->inst_cnt_reg); in octep_enable_ioq_irq()
362 iq->pkt_in_done -= iq->pkts_processed; in octep_enable_ioq_irq()
363 iq->pkts_processed = 0; in octep_enable_ioq_irq()
373 writeq(1UL << OCTEP_IQ_INTR_RESEND_BIT, iq->inst_cnt_reg); in octep_enable_ioq_irq()
388 tx_pending = octep_iq_process_completions(ioq_vector->iq, budget); in octep_napi_poll()
398 octep_enable_ioq_irq(ioq_vector->iq, ioq_vector->oq); in octep_napi_poll()
588 static inline int octep_iq_full_check(struct octep_iq *iq) in octep_iq_full_check() argument
590 if (likely((iq->max_count - atomic_read(&iq->instr_pending)) >= in octep_iq_full_check()
595 netif_stop_subqueue(iq->netdev, iq->q_no); in octep_iq_full_check()
600 if (unlikely((iq->max_count - atomic_read(&iq->instr_pending)) >= in octep_iq_full_check()
602 netif_start_subqueue(iq->netdev, iq->q_no); in octep_iq_full_check()
603 iq->stats.restart_cnt++; in octep_iq_full_check()
628 struct octep_iq *iq; in octep_start_xmit() local
639 iq = oct->iq[q_no]; in octep_start_xmit()
640 if (octep_iq_full_check(iq)) { in octep_start_xmit()
641 iq->stats.tx_busy++; in octep_start_xmit()
648 wi = iq->host_write_index; in octep_start_xmit()
649 hw_desc = &iq->desc_ring[wi]; in octep_start_xmit()
652 tx_buffer = iq->buff_info + wi; in octep_start_xmit()
661 tx_buffer->dma = dma_map_single(iq->dev, skb->data, in octep_start_xmit()
663 if (dma_mapping_error(iq->dev, tx_buffer->dma)) in octep_start_xmit()
678 dma = dma_map_single(iq->dev, skb->data, len, DMA_TO_DEVICE); in octep_start_xmit()
679 if (dma_mapping_error(iq->dev, dma)) in octep_start_xmit()
682 dma_sync_single_for_cpu(iq->dev, tx_buffer->sglist_dma, in octep_start_xmit()
693 dma = skb_frag_dma_map(iq->dev, frag, 0, in octep_start_xmit()
695 if (dma_mapping_error(iq->dev, dma)) in octep_start_xmit()
704 dma_sync_single_for_device(iq->dev, tx_buffer->sglist_dma, in octep_start_xmit()
715 writel(1, iq->doorbell_reg); in octep_start_xmit()
716 atomic_inc(&iq->instr_pending); in octep_start_xmit()
718 if (wi == iq->max_count) in octep_start_xmit()
720 iq->host_write_index = wi; in octep_start_xmit()
722 netdev_tx_sent_queue(iq->netdev_q, skb->len); in octep_start_xmit()
723 iq->stats.instr_posted++; in octep_start_xmit()
729 dma_unmap_single(iq->dev, sglist[0].dma_ptr[0], in octep_start_xmit()
734 dma_unmap_page(iq->dev, sglist[si >> 2].dma_ptr[si & 3], in octep_start_xmit()
764 struct octep_iq *iq = oct->iq[q]; in octep_get_stats64() local
767 tx_packets += iq->stats.instr_completed; in octep_get_stats64()
768 tx_bytes += iq->stats.bytes_sent; in octep_get_stats64()