Lines Matching refs:e

78 	struct amdgpu_mux_entry *e = NULL;  in amdgpu_mux_resubmit_chunks()  local
89 e = &mux->ring_entry[i]; in amdgpu_mux_resubmit_chunks()
94 if (!e) { in amdgpu_mux_resubmit_chunks()
99 last_seq = atomic_read(&e->ring->fence_drv.last_seq); in amdgpu_mux_resubmit_chunks()
103 list_for_each_entry(chunk, &e->list, entry) { in amdgpu_mux_resubmit_chunks()
105 amdgpu_fence_update_start_timestamp(e->ring, in amdgpu_mux_resubmit_chunks()
109 le32_to_cpu(*(e->ring->fence_drv.cpu_addr + 2))) { in amdgpu_mux_resubmit_chunks()
110 if (chunk->cntl_offset <= e->ring->buf_mask) in amdgpu_mux_resubmit_chunks()
111 amdgpu_ring_patch_cntl(e->ring, in amdgpu_mux_resubmit_chunks()
113 if (chunk->ce_offset <= e->ring->buf_mask) in amdgpu_mux_resubmit_chunks()
114 amdgpu_ring_patch_ce(e->ring, chunk->ce_offset); in amdgpu_mux_resubmit_chunks()
115 if (chunk->de_offset <= e->ring->buf_mask) in amdgpu_mux_resubmit_chunks()
116 amdgpu_ring_patch_de(e->ring, chunk->de_offset); in amdgpu_mux_resubmit_chunks()
118 amdgpu_ring_mux_copy_pkt_from_sw_ring(mux, e->ring, in amdgpu_mux_resubmit_chunks()
176 struct amdgpu_mux_entry *e; in amdgpu_ring_mux_fini() local
181 e = &mux->ring_entry[i]; in amdgpu_ring_mux_fini()
182 list_for_each_entry_safe(chunk, chunk2, &e->list, entry) { in amdgpu_ring_mux_fini()
196 struct amdgpu_mux_entry *e; in amdgpu_ring_mux_add_sw_ring() local
203 e = &mux->ring_entry[mux->num_ring_entries]; in amdgpu_ring_mux_add_sw_ring()
205 e->ring = ring; in amdgpu_ring_mux_add_sw_ring()
207 INIT_LIST_HEAD(&e->list); in amdgpu_ring_mux_add_sw_ring()
214 struct amdgpu_mux_entry *e; in amdgpu_ring_mux_set_wptr() local
221 e = amdgpu_ring_mux_sw_entry(mux, ring); in amdgpu_ring_mux_set_wptr()
222 if (!e) { in amdgpu_ring_mux_set_wptr()
234 e->sw_cptr = e->sw_wptr; in amdgpu_ring_mux_set_wptr()
236 if (ring->hw_prio <= AMDGPU_RING_PRIO_DEFAULT && e->sw_cptr < mux->wptr_resubmit) in amdgpu_ring_mux_set_wptr()
237 e->sw_cptr = mux->wptr_resubmit; in amdgpu_ring_mux_set_wptr()
238 e->sw_wptr = wptr; in amdgpu_ring_mux_set_wptr()
239 e->start_ptr_in_hw_ring = mux->real_ring->wptr; in amdgpu_ring_mux_set_wptr()
243 amdgpu_ring_mux_copy_pkt_from_sw_ring(mux, ring, e->sw_cptr, wptr); in amdgpu_ring_mux_set_wptr()
244 e->end_ptr_in_hw_ring = mux->real_ring->wptr; in amdgpu_ring_mux_set_wptr()
247 e->end_ptr_in_hw_ring = mux->real_ring->wptr; in amdgpu_ring_mux_set_wptr()
254 struct amdgpu_mux_entry *e; in amdgpu_ring_mux_get_wptr() local
256 e = amdgpu_ring_mux_sw_entry(mux, ring); in amdgpu_ring_mux_get_wptr()
257 if (!e) { in amdgpu_ring_mux_get_wptr()
262 return e->sw_wptr; in amdgpu_ring_mux_get_wptr()
283 struct amdgpu_mux_entry *e; in amdgpu_ring_mux_get_rptr() local
286 e = amdgpu_ring_mux_sw_entry(mux, ring); in amdgpu_ring_mux_get_rptr()
287 if (!e) { in amdgpu_ring_mux_get_rptr()
294 start = e->start_ptr_in_hw_ring & mux->real_ring->buf_mask; in amdgpu_ring_mux_get_rptr()
295 end = e->end_ptr_in_hw_ring & mux->real_ring->buf_mask; in amdgpu_ring_mux_get_rptr()
304 e->sw_rptr = (e->sw_cptr + offset) & ring->buf_mask; in amdgpu_ring_mux_get_rptr()
306 e->sw_rptr = e->sw_cptr; in amdgpu_ring_mux_get_rptr()
309 e->sw_rptr = e->sw_wptr; in amdgpu_ring_mux_get_rptr()
312 return e->sw_rptr; in amdgpu_ring_mux_get_rptr()
434 struct amdgpu_mux_entry *e; in amdgpu_ring_mux_start_ib() local
441 e = amdgpu_ring_mux_sw_entry(mux, ring); in amdgpu_ring_mux_start_ib()
442 if (!e) { in amdgpu_ring_mux_start_ib()
458 list_add_tail(&chunk->entry, &e->list); in amdgpu_ring_mux_start_ib()
464 struct amdgpu_mux_entry *e; in scan_and_remove_signaled_chunk() local
467 e = amdgpu_ring_mux_sw_entry(mux, ring); in scan_and_remove_signaled_chunk()
468 if (!e) { in scan_and_remove_signaled_chunk()
475 list_for_each_entry_safe(chunk, tmp, &e->list, entry) { in scan_and_remove_signaled_chunk()
487 struct amdgpu_mux_entry *e; in amdgpu_ring_mux_ib_mark_offset() local
490 e = amdgpu_ring_mux_sw_entry(mux, ring); in amdgpu_ring_mux_ib_mark_offset()
491 if (!e) { in amdgpu_ring_mux_ib_mark_offset()
496 chunk = list_last_entry(&e->list, struct amdgpu_mux_chunk, entry); in amdgpu_ring_mux_ib_mark_offset()
520 struct amdgpu_mux_entry *e; in amdgpu_ring_mux_end_ib() local
523 e = amdgpu_ring_mux_sw_entry(mux, ring); in amdgpu_ring_mux_end_ib()
524 if (!e) { in amdgpu_ring_mux_end_ib()
529 chunk = list_last_entry(&e->list, struct amdgpu_mux_chunk, entry); in amdgpu_ring_mux_end_ib()
543 struct amdgpu_mux_entry *e; in amdgpu_mcbp_handle_trailing_fence_irq() local
554 e = &mux->ring_entry[i]; in amdgpu_mcbp_handle_trailing_fence_irq()
555 if (e->ring->hw_prio <= AMDGPU_RING_PRIO_DEFAULT) { in amdgpu_mcbp_handle_trailing_fence_irq()
556 ring = e->ring; in amdgpu_mcbp_handle_trailing_fence_irq()