| /drivers/media/pci/saa7164/ |
| A D | saa7164-cmd.c | 22 ret = dev->cmds[i].seqno; in saa7164_cmd_alloc_seqno() 35 (dev->cmds[seqno].seqno == seqno)) { in saa7164_cmd_free_seqno() 47 (dev->cmds[seqno].seqno == seqno)) { in saa7164_cmd_timeout_seqno() 59 (dev->cmds[seqno].seqno == seqno)) { in saa7164_cmd_timeout_get() 92 __func__, tRsp.seqno); in saa7164_irq_dequeue() 258 (dev->cmds[seqno].seqno == seqno)) { in saa7164_cmd_wait() 269 __func__, seqno, dev->cmds[seqno].signalled); in saa7164_cmd_wait() 287 __func__, seqno, r, in saa7164_cmd_wait() 293 __func__, seqno); in saa7164_cmd_wait() 406 if (presponse_t->seqno != pcommand_t->seqno) { in saa7164_cmd_send() [all …]
|
| /drivers/gpu/drm/vc4/ |
| A D | vc4_trace.h | 23 __field(u64, seqno) 29 __entry->seqno = seqno; 39 TP_ARGS(dev, seqno), 48 __entry->seqno = seqno; 82 uint64_t seqno, 97 __entry->seqno = seqno; 105 __entry->seqno, 112 uint64_t seqno), 122 __entry->seqno = seqno; 127 __entry->seqno) [all …]
|
| /drivers/gpu/drm/v3d/ |
| A D | v3d_trace.h | 39 uint64_t seqno, 54 __entry->seqno = seqno; 62 __entry->seqno, 69 uint64_t seqno), 79 __entry->seqno = seqno; 84 __entry->seqno) 99 __entry->seqno = seqno; 119 __entry->seqno = seqno; 139 __entry->seqno = seqno; 178 __entry->seqno = seqno; [all …]
|
| /drivers/gpu/drm/virtio/ |
| A D | virtgpu_trace.h | 12 TP_PROTO(struct virtqueue *vq, struct virtio_gpu_ctrl_hdr *hdr, u32 seqno), 13 TP_ARGS(vq, hdr, seqno), 23 __field(u32, seqno) 34 __entry->seqno = seqno; 39 __entry->ctx_id, __entry->num_free, __entry->seqno) 43 TP_PROTO(struct virtqueue *vq, struct virtio_gpu_ctrl_hdr *hdr, u32 seqno), 44 TP_ARGS(vq, hdr, seqno) 48 TP_PROTO(struct virtqueue *vq, struct virtio_gpu_ctrl_hdr *hdr, u32 seqno), 49 TP_ARGS(vq, hdr, seqno)
|
| /drivers/gpu/drm/scheduler/ |
| A D | gpu_scheduler_trace.h | 77 __entry->fence_seqno = sched_job->s_fence->finished.seqno; 106 __entry->fence_seqno = fence->finished.seqno; 119 __field(u64, seqno) 124 __entry->fence_seqno = sched_job->s_fence->finished.seqno; 126 __entry->seqno = fence->seqno; 130 __entry->ctx, __entry->seqno) 140 __field(u64, seqno) 145 __entry->fence_seqno = sched_job->s_fence->finished.seqno; 147 __entry->seqno = fence->seqno; 151 __entry->ctx, __entry->seqno)
|
| /drivers/dma-buf/ |
| A D | dma-fence-chain.c | 90 int dma_fence_chain_find_seqno(struct dma_fence **pfence, uint64_t seqno) in dma_fence_chain_find_seqno() argument 94 if (!seqno) in dma_fence_chain_find_seqno() 98 if (!chain || chain->base.seqno < seqno) in dma_fence_chain_find_seqno() 103 to_dma_fence_chain(*pfence)->prev_seqno < seqno) in dma_fence_chain_find_seqno() 243 uint64_t seqno) in dma_fence_chain_init() argument 254 if (prev_chain && __dma_fence_is_later(prev, seqno, prev->seqno)) { in dma_fence_chain_init() 256 chain->prev_seqno = prev->seqno; in dma_fence_chain_init() 261 seqno = max(prev->seqno, seqno); in dma_fence_chain_init() 265 context, seqno); in dma_fence_chain_init()
|
| A D | st-dma-fence-chain.c | 63 u64 seqno) in mock_chain() argument 72 seqno); in mock_chain() 281 fence->seqno); in find_signaled() 326 fence ? fence->seqno : 0); in find_out_of_order() 363 fence->seqno, in find_gap() 401 int seqno; in __find_race() local 405 err = dma_fence_chain_find_seqno(&fence, seqno); in __find_race() 408 seqno); in __find_race() 419 if (fence->seqno == seqno) { in __find_race() 423 seqno); in __find_race() [all …]
|
| /drivers/gpu/drm/i915/selftests/ |
| A D | i915_syncmap.c | 154 if (__sync_seqno(leaf)[idx] != seqno) { in check_seqno() 167 err = i915_syncmap_set(sync, context, seqno); in check_one() 195 context, seqno); in check_one() 242 err = i915_syncmap_set(sync, context, seqno); in check_leaf() 264 context, seqno); in check_leaf() 548 u32 seqno; in igt_syncmap_random() local 570 seqno = 0; in igt_syncmap_random() 575 u32 last_seqno = seqno; in igt_syncmap_random() 578 seqno = prandom_u32_state(&prng); in igt_syncmap_random() 579 expect = seqno_later(last_seqno, seqno); in igt_syncmap_random() [all …]
|
| A D | igt_spinner.c | 89 if (!spin->seqno) { in igt_spinner_pin() 94 spin->seqno = memset(vaddr, 0xff, PAGE_SIZE); in igt_spinner_pin() 178 *batch++ = rq->fence.seqno; in igt_spinner_create_request() 222 u32 *seqno = spin->seqno + seqno_offset(rq->fence.context); in hws_seqno() local 224 return READ_ONCE(*seqno); in hws_seqno() 246 if (spin->seqno) { in igt_spinner_fini() 259 rq->fence.seqno), in igt_wait_for_spinner() 262 rq->fence.seqno), in igt_wait_for_spinner()
|
| /drivers/gpu/drm/xe/ |
| A D | xe_gt_tlb_invalidation.c | 119 gt->tlb_invalidation.seqno = 1; in xe_gt_tlb_invalidation_init_early() 163 if (gt->tlb_invalidation.seqno == 1) in xe_gt_tlb_invalidation_reset() 186 return seqno_recv >= seqno; in tlb_invalidation_seqno_past() 195 int seqno; in send_tlb_invalidation() local 207 seqno = gt->tlb_invalidation.seqno; in send_tlb_invalidation() 208 fence->seqno = seqno; in send_tlb_invalidation() 210 action[1] = seqno; in send_tlb_invalidation() 221 if (tlb_invalidation_seqno_past(gt, seqno)) { in send_tlb_invalidation() 238 gt->tlb_invalidation.seqno = (gt->tlb_invalidation.seqno + 1) % in send_tlb_invalidation() 240 if (!gt->tlb_invalidation.seqno) in send_tlb_invalidation() [all …]
|
| A D | xe_ring_ops.c | 248 u64 batch_addr, u32 seqno) in __emit_job_gen12_simple() argument 259 seqno, MI_INVALIDATE_TLB, dw, i); in __emit_job_gen12_simple() 263 seqno, dw, i); in __emit_job_gen12_simple() 299 u64 batch_addr, u32 seqno) in __emit_job_gen12_video() argument 321 seqno, MI_INVALIDATE_TLB, dw, i); in __emit_job_gen12_video() 327 seqno, dw, i); in __emit_job_gen12_video() 349 u64 batch_addr, u32 seqno) in __emit_job_gen12_render_compute() argument 376 seqno, dw, i); in __emit_job_gen12_render_compute() 399 struct xe_lrc *lrc, u32 seqno) in emit_migration_job_gen12() argument 406 i = emit_store_imm_ggtt(saddr, seqno, dw, i); in emit_migration_job_gen12() [all …]
|
| /drivers/gpu/drm/i915/gt/ |
| A D | selftest_timeline.c | 199 u32 seqno; member 697 u32 seqno[2]; in live_hwsp_wrap() local 708 tl->seqno = -4u; in live_hwsp_wrap() 745 GEM_BUG_ON(seqno[1] >= seqno[0]); in live_hwsp_wrap() 760 seqno[0], seqno[1]); in live_hwsp_wrap() 792 *cs++ = seqno; in emit_read_hwsp() 960 u32 seqno = rq->fence.seqno; in wrap_timeline() local 962 while (tl->seqno >= seqno) { /* Cause a wrap */ in wrap_timeline() 1204 tl->seqno = -2u; in live_hwsp_rollover_kernel() 1227 GEM_BUG_ON(rq[2]->fence.seqno > rq[0]->fence.seqno); in live_hwsp_rollover_kernel() [all …]
|
| A D | intel_timeline.h | 46 u64 context, u32 seqno) in __intel_timeline_sync_set() argument 48 return i915_syncmap_set(&tl->sync, context, seqno); in __intel_timeline_sync_set() 54 return __intel_timeline_sync_set(tl, fence->context, fence->seqno); in intel_timeline_sync_set() 58 u64 context, u32 seqno) in __intel_timeline_sync_is_later() argument 60 return i915_syncmap_is_later(&tl->sync, context, seqno); in __intel_timeline_sync_is_later() 66 return __intel_timeline_sync_is_later(tl, fence->context, fence->seqno); in intel_timeline_sync_is_later() 74 u32 *seqno);
|
| A D | intel_tlb.c | 113 static bool tlb_seqno_passed(const struct intel_gt *gt, u32 seqno) in tlb_seqno_passed() argument 118 return (s32)(cur - ALIGN(seqno, 2)) > 0; in tlb_seqno_passed() 121 void intel_gt_invalidate_tlb_full(struct intel_gt *gt, u32 seqno) in intel_gt_invalidate_tlb_full() argument 131 if (tlb_seqno_passed(gt, seqno)) in intel_gt_invalidate_tlb_full() 138 if (tlb_seqno_passed(gt, seqno)) in intel_gt_invalidate_tlb_full() 154 write_seqcount_invalidate(>->tlb.seqno); in intel_gt_invalidate_tlb_full() 163 seqcount_mutex_init(>->tlb.seqno, >->tlb.invalidate_lock); in intel_gt_init_tlb()
|
| A D | intel_timeline.c | 230 WRITE_ONCE(*hwsp_seqno, tl->seqno); in intel_timeline_reset_seqno() 302 GEM_BUG_ON(tl->seqno & tl->has_initial_breadcrumb); in timeline_advance() 304 return tl->seqno += 1 + tl->has_initial_breadcrumb; in timeline_advance() 309 u32 *seqno) in __intel_timeline_get_seqno() argument 321 *seqno = timeline_advance(tl); in __intel_timeline_get_seqno() 322 GEM_BUG_ON(i915_seqno_passed(*tl->hwsp_seqno, *seqno)); in __intel_timeline_get_seqno() 328 u32 *seqno) in intel_timeline_get_seqno() argument 330 *seqno = timeline_advance(tl); in intel_timeline_get_seqno() 333 if (unlikely(!*seqno && tl->has_initial_breadcrumb)) in intel_timeline_get_seqno() 334 return __intel_timeline_get_seqno(tl, seqno); in intel_timeline_get_seqno() [all …]
|
| /drivers/gpu/drm/radeon/ |
| A D | radeon_trace.h | 127 TP_PROTO(struct drm_device *dev, int ring, u32 seqno), 129 TP_ARGS(dev, ring, seqno), 134 __field(u32, seqno) 140 __entry->seqno = seqno; 144 __entry->dev, __entry->ring, __entry->seqno) 149 TP_PROTO(struct drm_device *dev, int ring, u32 seqno), 151 TP_ARGS(dev, ring, seqno) 156 TP_PROTO(struct drm_device *dev, int ring, u32 seqno), 158 TP_ARGS(dev, ring, seqno) 163 TP_PROTO(struct drm_device *dev, int ring, u32 seqno), [all …]
|
| /drivers/gpu/drm/i915/ |
| A D | i915_syncmap.c | 79 DECLARE_FLEX_ARRAY(u32, seqno); 99 return p->seqno; in __sync_seqno() 151 bool i915_syncmap_is_later(struct i915_syncmap **root, u64 id, u32 seqno) in i915_syncmap_is_later() argument 192 return seqno_later(__sync_seqno(p)[idx], seqno); in i915_syncmap_is_later() 200 p = kmalloc(struct_size(p, seqno, KSYNCMAP), GFP_KERNEL); in __sync_alloc_leaf() 216 __sync_seqno(p)[idx] = seqno; in __sync_set_seqno() 227 static noinline int __sync_set(struct i915_syncmap **root, u64 id, u32 seqno) in __sync_set() argument 332 __sync_set_seqno(p, id, seqno); in __sync_set() 350 int i915_syncmap_set(struct i915_syncmap **root, u64 id, u32 seqno) in i915_syncmap_set() argument 359 __sync_set_seqno(p, id, seqno); in i915_syncmap_set() [all …]
|
| A D | i915_trace.h | 275 __field(u32, seqno) 284 __entry->seqno = rq->fence.seqno; 302 __field(u32, seqno) 311 __entry->seqno = rq->fence.seqno; 350 __field(u32, seqno) 360 __entry->seqno = rq->fence.seqno; 367 __entry->ctx, __entry->seqno, 380 __field(u32, seqno) 389 __entry->seqno = rq->fence.seqno; 615 __field(u32, seqno) [all …]
|
| /drivers/net/wireless/mediatek/mt76/ |
| A D | agg-rx.c | 84 mt76_rx_aggr_release_frames(tid, frames, status->seqno); in mt76_rx_aggr_check_release() 126 u16 seqno; in mt76_rx_aggr_check_ctl() local 135 seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(bar->start_seq_num)); in mt76_rx_aggr_check_ctl() 142 mt76_rx_aggr_release_frames(tid, frames, seqno); in mt76_rx_aggr_check_ctl() 155 u16 seqno, head, size, idx; in mt76_rx_aggr_reorder() local 187 seqno = status->seqno; in mt76_rx_aggr_reorder() 189 sn_less = ieee80211_sn_less(seqno, head); in mt76_rx_aggr_reorder() 204 if (seqno == head) { in mt76_rx_aggr_reorder() 217 if (!ieee80211_sn_less(seqno, head + size)) { in mt76_rx_aggr_reorder() 218 head = ieee80211_sn_inc(ieee80211_sn_sub(seqno, size)); in mt76_rx_aggr_reorder() [all …]
|
| /drivers/gpu/drm/vmwgfx/ |
| A D | vmwgfx_fence.c | 99 u32 seqno; in vmw_fence_enable_signaling() local 106 seqno = vmw_fence_read(dev_priv); in vmw_fence_enable_signaling() 107 if (seqno - fence->base.seqno < VMW_FENCE_WRAP) { in vmw_fence_enable_signaling() 165 fman->ctx, seqno); in vmw_fence_obj_init() 193 if (seqno - fence->base.seqno < VMW_FENCE_WRAP) { in __vmw_fences_update() 205 return seqno; in __vmw_fences_update() 210 u32 seqno; in vmw_fences_update() local 212 seqno = __vmw_fences_update(fman); in vmw_fences_update() 214 return seqno; in vmw_fences_update() 248 uint32_t seqno, in vmw_fence_create() argument [all …]
|
| A D | vmwgfx_irq.c | 120 static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno) in vmw_fifo_idle() argument 127 uint32_t seqno) in vmw_seqno_passed() argument 132 if (last_read_seqno - seqno < VMW_FENCE_WRAP) in vmw_seqno_passed() 136 if (last_read_seqno - seqno < VMW_FENCE_WRAP) in vmw_seqno_passed() 139 if (!vmw_has_fences(dev_priv) && vmw_fifo_idle(dev_priv, seqno)) in vmw_seqno_passed() 147 ret = ((atomic_read(&dev_priv->marker_seq) - seqno) in vmw_seqno_passed() 156 uint32_t seqno, in vmw_fallback_wait() argument 196 if (wait_condition(dev_priv, seqno)) in vmw_fallback_wait()
|
| /drivers/gpu/drm/amd/amdgpu/ |
| A D | amdgpu_trace.h | 172 __field(u64, seqno) 181 __entry->seqno = job->base.s_fence->finished.seqno; 187 __entry->seqno, __get_str(ring), __entry->num_ibs) 196 __field(u64, seqno) 204 __entry->seqno = job->base.s_fence->finished.seqno; 210 __entry->seqno, __get_str(ring), __entry->num_ibs) 478 __field(u64, seqno) 483 __entry->seqno = fence->seqno; 552 __field(u64, seqno) 559 __entry->seqno = fence->seqno; [all …]
|
| /drivers/gpu/drm/msm/ |
| A D | msm_gpu_trace.h | 40 __field(u32, seqno) 47 __entry->seqno = submit->seqno; 51 __entry->id, __entry->pid, __entry->ringid, __entry->seqno, 64 __field(u32, seqno) 74 __entry->seqno = submit->seqno; 81 __entry->id, __entry->pid, __entry->ringid, __entry->seqno,
|
| /drivers/gpu/drm/lima/ |
| A D | lima_trace.h | 18 __field(unsigned int, seqno) 24 __entry->seqno = task->base.s_fence->finished.seqno; 29 __entry->context, __entry->seqno,
|
| /drivers/net/ppp/ |
| A D | ppp_deflate.c | 25 int seqno; member 150 state->seqno = 0; in z_comp_init() 170 state->seqno = 0; in z_comp_reset() 213 put_unaligned_be16(state->seqno, wptr); in z_compress() 218 ++state->seqno; in z_compress() 364 state->seqno = 0; in z_decomp_init() 385 state->seqno = 0; in z_decomp_reset() 427 if (seq != (state->seqno & 0xffff)) { in z_decompress() 430 state->unit, seq, state->seqno & 0xffff); in z_decompress() 433 ++state->seqno; in z_decompress() [all …]
|