Lines Matching refs:iq

16 static void octep_iq_reset_indices(struct octep_iq *iq)  in octep_iq_reset_indices()  argument
18 iq->fill_cnt = 0; in octep_iq_reset_indices()
19 iq->host_write_index = 0; in octep_iq_reset_indices()
20 iq->octep_read_index = 0; in octep_iq_reset_indices()
21 iq->flush_index = 0; in octep_iq_reset_indices()
22 iq->pkts_processed = 0; in octep_iq_reset_indices()
23 iq->pkt_in_done = 0; in octep_iq_reset_indices()
24 atomic_set(&iq->instr_pending, 0); in octep_iq_reset_indices()
33 int octep_iq_process_completions(struct octep_iq *iq, u16 budget) in octep_iq_process_completions() argument
36 struct octep_device *oct = iq->octep_dev; in octep_iq_process_completions()
39 u32 fi = iq->flush_index; in octep_iq_process_completions()
46 iq->octep_read_index = oct->hw_ops.update_iq_read_idx(iq); in octep_iq_process_completions()
48 while (likely(budget && (fi != iq->octep_read_index))) { in octep_iq_process_completions()
49 tx_buffer = iq->buff_info + fi; in octep_iq_process_completions()
53 if (unlikely(fi == iq->max_count)) in octep_iq_process_completions()
60 dma_unmap_single(iq->dev, tx_buffer->dma, in octep_iq_process_completions()
71 dma_unmap_single(iq->dev, tx_buffer->sglist[0].dma_ptr[0], in octep_iq_process_completions()
76 dma_unmap_page(iq->dev, tx_buffer->sglist[i >> 2].dma_ptr[i & 3], in octep_iq_process_completions()
84 iq->pkts_processed += compl_pkts; in octep_iq_process_completions()
85 atomic_sub(compl_pkts, &iq->instr_pending); in octep_iq_process_completions()
86 iq->stats.instr_completed += compl_pkts; in octep_iq_process_completions()
87 iq->stats.bytes_sent += compl_bytes; in octep_iq_process_completions()
88 iq->stats.sgentry_sent += compl_sg; in octep_iq_process_completions()
89 iq->flush_index = fi; in octep_iq_process_completions()
91 netdev_tx_completed_queue(iq->netdev_q, compl_pkts, compl_bytes); in octep_iq_process_completions()
93 if (unlikely(__netif_subqueue_stopped(iq->netdev, iq->q_no)) && in octep_iq_process_completions()
94 ((iq->max_count - atomic_read(&iq->instr_pending)) > in octep_iq_process_completions()
96 netif_wake_subqueue(iq->netdev, iq->q_no); in octep_iq_process_completions()
105 static void octep_iq_free_pending(struct octep_iq *iq) in octep_iq_free_pending() argument
109 u32 fi = iq->flush_index; in octep_iq_free_pending()
113 while (fi != iq->host_write_index) { in octep_iq_free_pending()
114 tx_buffer = iq->buff_info + fi; in octep_iq_free_pending()
118 if (unlikely(fi == iq->max_count)) in octep_iq_free_pending()
122 dma_unmap_single(iq->dev, tx_buffer->dma, in octep_iq_free_pending()
132 dma_unmap_single(iq->dev, in octep_iq_free_pending()
139 dma_unmap_page(iq->dev, tx_buffer->sglist[i >> 2].dma_ptr[i & 3], in octep_iq_free_pending()
147 atomic_set(&iq->instr_pending, 0); in octep_iq_free_pending()
148 iq->flush_index = fi; in octep_iq_free_pending()
149 netdev_tx_reset_queue(netdev_get_tx_queue(iq->netdev, iq->q_no)); in octep_iq_free_pending()
165 octep_iq_free_pending(oct->iq[i]); in octep_clean_iqs()
166 octep_iq_reset_indices(oct->iq[i]); in octep_clean_iqs()
181 struct octep_iq *iq; in octep_setup_iq() local
184 iq = vzalloc(sizeof(*iq)); in octep_setup_iq()
185 if (!iq) in octep_setup_iq()
187 oct->iq[q_no] = iq; in octep_setup_iq()
189 iq->octep_dev = oct; in octep_setup_iq()
190 iq->netdev = oct->netdev; in octep_setup_iq()
191 iq->dev = &oct->pdev->dev; in octep_setup_iq()
192 iq->q_no = q_no; in octep_setup_iq()
193 iq->max_count = CFG_GET_IQ_NUM_DESC(oct->conf); in octep_setup_iq()
194 iq->ring_size_mask = iq->max_count - 1; in octep_setup_iq()
195 iq->fill_threshold = CFG_GET_IQ_DB_MIN(oct->conf); in octep_setup_iq()
196 iq->netdev_q = netdev_get_tx_queue(iq->netdev, q_no); in octep_setup_iq()
200 iq->desc_ring = dma_alloc_coherent(iq->dev, desc_ring_size, in octep_setup_iq()
201 &iq->desc_ring_dma, GFP_KERNEL); in octep_setup_iq()
202 if (unlikely(!iq->desc_ring)) { in octep_setup_iq()
203 dev_err(iq->dev, in octep_setup_iq()
211 iq->sglist = dma_alloc_coherent(iq->dev, sglist_size, in octep_setup_iq()
212 &iq->sglist_dma, GFP_KERNEL); in octep_setup_iq()
213 if (unlikely(!iq->sglist)) { in octep_setup_iq()
214 dev_err(iq->dev, in octep_setup_iq()
221 buff_info_size = OCTEP_IQ_TXBUFF_INFO_SIZE * iq->max_count; in octep_setup_iq()
222 iq->buff_info = vzalloc(buff_info_size); in octep_setup_iq()
223 if (!iq->buff_info) { in octep_setup_iq()
224 dev_err(iq->dev, in octep_setup_iq()
233 tx_buffer = &iq->buff_info[i]; in octep_setup_iq()
235 &iq->sglist[i * OCTEP_SGLIST_ENTRIES_PER_PKT]; in octep_setup_iq()
237 iq->sglist_dma + (i * OCTEP_SGLIST_SIZE_PER_PKT); in octep_setup_iq()
240 octep_iq_reset_indices(iq); in octep_setup_iq()
247 dma_free_coherent(iq->dev, sglist_size, iq->sglist, iq->sglist_dma); in octep_setup_iq()
249 dma_free_coherent(iq->dev, desc_ring_size, in octep_setup_iq()
250 iq->desc_ring, iq->desc_ring_dma); in octep_setup_iq()
252 vfree(iq); in octep_setup_iq()
253 oct->iq[q_no] = NULL; in octep_setup_iq()
265 static void octep_free_iq(struct octep_iq *iq) in octep_free_iq() argument
267 struct octep_device *oct = iq->octep_dev; in octep_free_iq()
269 int q_no = iq->q_no; in octep_free_iq()
273 vfree(iq->buff_info); in octep_free_iq()
275 if (iq->desc_ring) in octep_free_iq()
276 dma_free_coherent(iq->dev, desc_ring_size, in octep_free_iq()
277 iq->desc_ring, iq->desc_ring_dma); in octep_free_iq()
281 if (iq->sglist) in octep_free_iq()
282 dma_free_coherent(iq->dev, sglist_size, in octep_free_iq()
283 iq->sglist, iq->sglist_dma); in octep_free_iq()
285 vfree(iq); in octep_free_iq()
286 oct->iq[q_no] = NULL; in octep_free_iq()
314 octep_free_iq(oct->iq[i]); in octep_setup_iqs()
329 octep_free_iq(oct->iq[i]); in octep_free_iqs()