| /drivers/gpu/drm/i915/selftests/ |
| A D | igt_spinner.c | 97 if (!spin->batch) { in igt_spinner_pin() 105 spin->batch = vaddr; in igt_spinner_pin() 131 u32 *batch; in igt_spinner_create_request() local 139 if (!spin->batch) { in igt_spinner_create_request() 160 batch = spin->batch; in igt_spinner_create_request() 168 *batch++ = 0; in igt_spinner_create_request() 172 *batch++ = 0; in igt_spinner_create_request() 178 *batch++ = rq->fence.seqno; in igt_spinner_create_request() 183 batch += 128; in igt_spinner_create_request() 229 if (!spin->batch) in igt_spinner_end() [all …]
|
| A D | i915_request.c | 1058 if (IS_ERR(batch)) in live_empty_request() 1109 i915_vma_unpin(batch); in live_empty_request() 1110 i915_vma_put(batch); in live_empty_request() 1218 if (IS_ERR(batch)) { in live_all_engines() 1219 err = PTR_ERR(batch); in live_all_engines() 1225 i915_vma_lock(batch); in live_all_engines() 1240 request[idx]->batch = batch; in live_all_engines() 1305 if (rq->batch) { in live_all_engines() 1348 if (IS_ERR(batch)) { in live_sequential_engines() 1355 i915_vma_lock(batch); in live_sequential_engines() [all …]
|
| /drivers/iommu/iommufd/ |
| A D | pages.c | 295 batch->npfns[batch->end - 1] < keep_pfns); in batch_clear_carry() 298 batch->pfns[0] = batch->pfns[batch->end - 1] + in batch_clear_carry() 299 (batch->npfns[batch->end - 1] - keep_pfns); in batch_clear_carry() 309 WARN_ON(batch->total_pfns != batch->npfns[0]); in batch_skip_carry() 328 batch->npfns = (u32 *)(batch->pfns + batch->array_size); in __batch_init() 362 batch->end++; in batch_add_pfn_num() 373 batch->npfns[batch->end - 1] -= nr; in batch_remove_pfn_num() 374 if (batch->npfns[batch->end - 1] == 0) in batch_remove_pfn_num() 375 batch->end--; in batch_remove_pfn_num() 449 batch->array_size = batch->end; in batch_from_domain_continue() [all …]
|
| /drivers/gpu/drm/i915/gt/ |
| A D | gen7_renderclear.c | 257 batch_advance(batch, cs); in gen7_emit_state_base_address() 286 batch_advance(batch, cs); in gen7_emit_vfe_state() 294 u32 *cs = batch_alloc_items(batch, 8, 4); in gen7_emit_interface_descriptor_load() 305 batch_advance(batch, cs); in gen7_emit_interface_descriptor_load() 317 cs = batch_alloc_items(batch, 8, pkt); in gen7_emit_media_object() 337 batch_advance(batch, cs); in gen7_emit_media_object() 352 batch_advance(batch, cs); in gen7_emit_pipeline_flush() 373 batch_advance(batch, cs); in gen7_emit_pipeline_invalidate() 436 u32 *batch; in gen7_setup_clear_gpr_bb() local 445 if (IS_ERR(batch)) in gen7_setup_clear_gpr_bb() [all …]
|
| A D | intel_lrc.c | 1644 *batch++ = 0; in gen8_emit_flush_coherentl3_wa() 1650 batch = gen8_emit_pipe_control(batch, in gen8_emit_flush_coherentl3_wa() 1659 *batch++ = 0; in gen8_emit_flush_coherentl3_wa() 1661 return batch; in gen8_emit_flush_coherentl3_wa() 1686 batch = gen8_emit_flush_coherentl3_wa(engine, batch); in gen8_init_indirectctx_bb() 1690 batch = gen8_emit_pipe_control(batch, in gen8_init_indirectctx_bb() 1709 return batch; in gen8_init_indirectctx_bb() 1728 return batch; in emit_lri() 1759 batch = gen8_emit_flush_coherentl3_wa(engine, batch); in gen9_init_indirectctx_bb() 1762 batch = gen8_emit_pipe_control(batch, in gen9_init_indirectctx_bb() [all …]
|
| A D | gen8_engine_cs.h | 53 __gen8_emit_pipe_control(u32 *batch, u32 bit_group_0, in __gen8_emit_pipe_control() argument 56 memset(batch, 0, 6 * sizeof(u32)); in __gen8_emit_pipe_control() 58 batch[0] = GFX_OP_PIPE_CONTROL(6) | bit_group_0; in __gen8_emit_pipe_control() 59 batch[1] = bit_group_1; in __gen8_emit_pipe_control() 60 batch[2] = offset; in __gen8_emit_pipe_control() 62 return batch + 6; in __gen8_emit_pipe_control() 65 static inline u32 *gen8_emit_pipe_control(u32 *batch, in gen8_emit_pipe_control() argument 68 return __gen8_emit_pipe_control(batch, 0, bit_group_1, offset); in gen8_emit_pipe_control() 71 static inline u32 *gen12_emit_pipe_control(u32 *batch, u32 bit_group_0, in gen12_emit_pipe_control() argument 74 return __gen8_emit_pipe_control(batch, bit_group_0, in gen12_emit_pipe_control()
|
| A D | selftest_hangcheck.c | 38 u32 *batch; member 81 h->batch = vaddr; in hang_init() 113 u32 *batch; in hang_create_request() local 133 h->batch = vaddr; in hang_create_request() 171 batch = h->batch; in hang_create_request() 180 batch += 1024 / sizeof(*batch); in hang_create_request() 188 *batch++ = 0; in hang_create_request() 194 batch += 1024 / sizeof(*batch); in hang_create_request() 201 *batch++ = 0; in hang_create_request() 207 batch += 1024 / sizeof(*batch); in hang_create_request() [all …]
|
| A D | selftest_workarounds.c | 504 struct i915_vma *batch; in check_dirty_whitelist() local 513 batch = create_batch(ce->vm); in check_dirty_whitelist() 514 if (IS_ERR(batch)) { in check_dirty_whitelist() 515 err = PTR_ERR(batch); in check_dirty_whitelist() 750 i915_vma_unpin_and_release(&batch, 0); in check_dirty_whitelist() 890 struct i915_vma *batch; in scrub_whitelisted_registers() local 894 batch = create_batch(ce->vm); in scrub_whitelisted_registers() 895 if (IS_ERR(batch)) in scrub_whitelisted_registers() 896 return PTR_ERR(batch); in scrub_whitelisted_registers() 919 i915_gem_object_flush_map(batch->obj); in scrub_whitelisted_registers() [all …]
|
| A D | selftest_lrc.c | 980 if (IS_ERR(batch)) in store_context() 981 return batch; in store_context() 985 i915_vma_put(batch); in store_context() 992 i915_vma_put(batch); in store_context() 1056 return batch; in store_context() 1151 if (IS_ERR(batch)) in load_context() 1152 return batch; in load_context() 1156 i915_vma_put(batch); in load_context() 1214 return batch; in load_context() 1225 if (IS_ERR(batch)) in poison_registers() [all …]
|
| A D | selftest_tlb.c | 42 struct drm_i915_gem_object *batch; in pte_tlbinv() local 50 batch = i915_gem_object_create_internal(ce->vm->i915, 4096); in pte_tlbinv() 51 if (IS_ERR(batch)) in pte_tlbinv() 52 return PTR_ERR(batch); in pte_tlbinv() 54 vma = i915_vma_instance(batch, ce->vm, NULL); in pte_tlbinv() 100 cs = i915_gem_object_pin_map_unlocked(batch, I915_MAP_WC); in pte_tlbinv() 122 i915_gem_object_flush_map(batch); in pte_tlbinv() 184 cs = page_mask_bits(batch->mm.mapping); in pte_tlbinv() 195 i915_gem_object_put(batch); in pte_tlbinv()
|
| A D | selftest_engine_cs.c | 147 struct i915_vma *batch; in perf_mi_bb_start() local 156 batch = create_empty_batch(ce); in perf_mi_bb_start() 157 if (IS_ERR(batch)) { in perf_mi_bb_start() 158 err = PTR_ERR(batch); in perf_mi_bb_start() 163 err = i915_vma_sync(batch); in perf_mi_bb_start() 166 i915_vma_put(batch); in perf_mi_bb_start() 184 i915_vma_offset(batch), 8, in perf_mi_bb_start() 205 i915_vma_put(batch); in perf_mi_bb_start()
|
| A D | intel_renderstate.c | 43 #define OUT_BATCH(batch, i, val) \ argument 47 (batch)[(i)++] = (val); \ 63 u32 s = rodata->batch[i]; in render_state_setup() 71 rodata->batch[i + 1] != 0) in render_state_setup()
|
| /drivers/gpu/drm/xe/ |
| A D | xe_pxp_submit.c | 223 emit_cmd(xe, batch, offset++, 0); in pxp_emit_wait() 231 offset = pxp_emit_wait(xe, batch, offset); in pxp_emit_session_selection() 235 emit_cmd(xe, batch, offset++, 0); in pxp_emit_session_selection() 236 emit_cmd(xe, batch, offset++, 0); in pxp_emit_session_selection() 237 emit_cmd(xe, batch, offset++, 0); in pxp_emit_session_selection() 241 emit_cmd(xe, batch, offset++, 0); in pxp_emit_session_selection() 243 offset = pxp_emit_wait(xe, batch, offset); in pxp_emit_session_selection() 252 emit_cmd(xe, batch, offset++, 0); in pxp_emit_session_selection() 253 emit_cmd(xe, batch, offset++, 0); in pxp_emit_session_selection() 255 offset = pxp_emit_wait(xe, batch, offset); in pxp_emit_session_selection() [all …]
|
| /drivers/xen/ |
| A D | gntdev.c | 640 batch = priv->batch; in gntdev_release() 641 priv->batch = batch->next; in gntdev_release() 823 batch->pages[batch->nr_pages++] = page; in gntdev_get_page() 833 unpin_user_pages_dirty_lock(batch->pages, batch->nr_pages, batch->writeable); in gntdev_put_pages() 842 gnttab_batch_copy(batch->ops, batch->nr_ops); in gntdev_copy() 912 op = &batch->ops[batch->nr_ops]; in gntdev_grant_copy_seg() 958 batch->status[batch->nr_ops] = status; in gntdev_grant_copy_seg() 979 batch = priv->batch; in gntdev_ioctl_grant_copy() 980 priv->batch = batch->next; in gntdev_ioctl_grant_copy() 1011 batch->next = priv->batch; in gntdev_ioctl_grant_copy() [all …]
|
| /drivers/iommu/intel/ |
| A D | cache.c | 295 if (!iommu || !batch->index) in qi_batch_flush_descs() 298 qi_submit_sync(iommu, batch->descs, batch->index, 0); in qi_batch_flush_descs() 301 memset(batch, 0, sizeof(*batch)); in qi_batch_flush_descs() 307 qi_batch_flush_descs(iommu, batch); in qi_batch_increment_index() 312 struct qi_batch *batch) in qi_batch_add_iotlb() argument 314 qi_desc_iotlb(iommu, did, addr, size_order, type, &batch->descs[batch->index]); in qi_batch_add_iotlb() 320 struct qi_batch *batch) in qi_batch_add_dev_iotlb() argument 329 qi_desc_dev_iotlb(sid, pfsid, qdep, addr, mask, &batch->descs[batch->index]); in qi_batch_add_dev_iotlb() 335 struct qi_batch *batch) in qi_batch_add_piotlb() argument 345 qi_desc_piotlb(did, pasid, addr, npages, ih, &batch->descs[batch->index]); in qi_batch_add_piotlb() [all …]
|
| /drivers/gpu/drm/vmwgfx/ |
| A D | vmwgfx_mob.c | 238 struct vmw_otable_batch *batch) in vmw_otable_batch_setup() argument 257 &batch->otable_bo); in vmw_otable_batch_setup() 263 if (!batch->otables[i].enabled) in vmw_otable_batch_setup() 267 &batch->otable_bo->tbo, in vmw_otable_batch_setup() 279 if (batch->otables[i].enabled) in vmw_otable_batch_setup() 281 &batch->otables[i]); in vmw_otable_batch_setup() 285 vmw_bo_unreference(&batch->otable_bo); in vmw_otable_batch_setup() 332 struct vmw_otable_batch *batch) in vmw_otable_batch_takedown() argument 339 if (batch->otables[i].enabled) in vmw_otable_batch_takedown() 341 &batch->otables[i]); in vmw_otable_batch_takedown() [all …]
|
| /drivers/gpu/drm/i915/gem/selftests/ |
| A D | igt_gem_utils.c | 116 struct i915_vma *batch; in igt_gpu_fill_dw() local 123 batch = igt_emit_store_dw(vma, offset, count, val); in igt_gpu_fill_dw() 124 if (IS_ERR(batch)) in igt_gpu_fill_dw() 125 return PTR_ERR(batch); in igt_gpu_fill_dw() 133 err = igt_vma_move_to_active_unlocked(batch, rq, 0); in igt_gpu_fill_dw() 146 i915_vma_offset(batch), in igt_gpu_fill_dw() 147 i915_vma_size(batch), in igt_gpu_fill_dw() 155 i915_vma_unpin_and_release(&batch, 0); in igt_gpu_fill_dw()
|
| A D | i915_gem_client_blt.c | 104 struct i915_vma *batch; member 143 struct drm_i915_gem_object *batch) in prepare_blit() argument 252 i915_gem_object_flush_map(batch); in prepare_blit() 253 i915_gem_object_unpin_map(batch); in prepare_blit() 266 i915_vma_put(t->batch); in tiled_blits_destroy_buffers() 306 if (IS_ERR(t->batch)) in tiled_blits_create_buffers() 307 return PTR_ERR(t->batch); in tiled_blits_create_buffers() 311 i915_vma_put(t->batch); in tiled_blits_create_buffers() 513 i915_vma_offset(t->batch), in tiled_blit() 514 i915_vma_size(t->batch), in tiled_blit() [all …]
|
| /drivers/net/ethernet/mellanox/mlx5/core/en/xsk/ |
| A D | rx.c | 26 int batch, i; in mlx5e_xsk_alloc_rx_mpwqe() local 35 batch = xsk_buff_alloc_batch(rq->xsk_pool, xsk_buffs, in mlx5e_xsk_alloc_rx_mpwqe() 44 for (; batch < rq->mpwqe.pages_per_wqe; batch++) { in mlx5e_xsk_alloc_rx_mpwqe() 45 xsk_buffs[batch] = xsk_buff_alloc(rq->xsk_pool); in mlx5e_xsk_alloc_rx_mpwqe() 46 if (unlikely(!xsk_buffs[batch])) in mlx5e_xsk_alloc_rx_mpwqe() 55 for (i = 0; i < batch; i++) { in mlx5e_xsk_alloc_rx_mpwqe() 65 for (i = 0; i < batch; i++) { in mlx5e_xsk_alloc_rx_mpwqe() 78 for (i = 0; i < batch; i++) { in mlx5e_xsk_alloc_rx_mpwqe() 105 for (i = 0; i < batch; i++) { in mlx5e_xsk_alloc_rx_mpwqe() 152 while (--batch >= 0) in mlx5e_xsk_alloc_rx_mpwqe() [all …]
|
| /drivers/vfio/ |
| A D | vfio_iommu_type1.c | 475 batch->size = 0; in __vfio_batch_init() 476 batch->offset = 0; in __vfio_batch_init() 482 if (!batch->pages) in __vfio_batch_init() 489 batch->pages = &batch->fallback_page; in __vfio_batch_init() 506 unsigned long pfn = page_to_pfn(batch->pages[batch->offset]); in vfio_batch_unpin() 509 batch->offset++; in vfio_batch_unpin() 510 batch->size--; in vfio_batch_unpin() 636 if (batch->size) { in vfio_pin_pages_remote() 638 *pfn_base = page_to_pfn(batch->pages[batch->offset]); in vfio_pin_pages_remote() 715 batch->size--; in vfio_pin_pages_remote() [all …]
|
| /drivers/net/ethernet/netronome/nfp/flower/ |
| A D | lag_conf.c | 234 unsigned int member_cnt, enum nfp_fl_lag_batch *batch) in nfp_fl_lag_config_group() argument 254 if (*batch == NFP_FL_LAG_BATCH_FIRST) { in nfp_fl_lag_config_group() 257 *batch = NFP_FL_LAG_BATCH_MEMBER; in nfp_fl_lag_config_group() 263 *batch = NFP_FL_LAG_BATCH_FINISHED; in nfp_fl_lag_config_group() 269 if (*batch == NFP_FL_LAG_BATCH_FINISHED) { in nfp_fl_lag_config_group() 296 enum nfp_fl_lag_batch batch = NFP_FL_LAG_BATCH_FIRST; in nfp_fl_lag_do_work() local 318 &batch); in nfp_fl_lag_do_work() 391 active_count, &batch); in nfp_fl_lag_do_work() 405 if (batch == NFP_FL_LAG_BATCH_MEMBER) { in nfp_fl_lag_do_work() 406 batch = NFP_FL_LAG_BATCH_FINISHED; in nfp_fl_lag_do_work() [all …]
|
| /drivers/net/ethernet/freescale/dpaa2/ |
| A D | dpaa2-xsk.c | 401 int batch, i, err; in dpaa2_xsk_tx() local 410 batch = xsk_tx_peek_release_desc_batch(ch->xsk_pool, budget); in dpaa2_xsk_tx() 411 if (!batch) in dpaa2_xsk_tx() 415 for (i = 0; i < batch; i++) { in dpaa2_xsk_tx() 418 batch = i; in dpaa2_xsk_tx() 426 max_retries = batch * DPAA2_ETH_ENQUEUE_RETRIES; in dpaa2_xsk_tx() 430 while (total_enqueued < batch && retries < max_retries) { in dpaa2_xsk_tx() 432 batch - total_enqueued, &enqueued); in dpaa2_xsk_tx() 446 for (i = total_enqueued; i < batch; i++) { in dpaa2_xsk_tx()
|
| /drivers/md/ |
| A D | dm-clone-target.c | 984 if (batch->head) { in __batch_hydration() 987 (batch->head->region_nr + batch->nr_batched_regions) == hd->region_nr) { in __batch_hydration() 989 batch->nr_batched_regions++; in __batch_hydration() 995 hydration_copy(batch->head, batch->nr_batched_regions); in __batch_hydration() 996 batch->head = NULL; in __batch_hydration() 1012 batch->head = hd; in __batch_hydration() 1013 batch->nr_batched_regions = 1; in __batch_hydration() 1042 __batch_hydration(batch, hd); in __start_next_hydration() 1066 struct batch_info batch = { in do_hydration() local 1106 if (batch.head) in do_hydration() [all …]
|
| /drivers/interconnect/qcom/ |
| A D | bcm-voter.c | 163 size_t idx = 0, batch = 0, cur_vcd_size = 0; in tcs_list_gen() local 181 n[batch]++; in tcs_list_gen() 188 if (n[batch] >= MAX_RPMH_PAYLOAD) { in tcs_list_gen() 190 n[batch] -= cur_vcd_size; in tcs_list_gen() 191 n[batch + 1] = cur_vcd_size; in tcs_list_gen() 193 batch++; in tcs_list_gen()
|
| /drivers/char/ |
| A D | random.c | 523 struct batch_ ##type *batch; \ 534 batch = raw_cpu_ptr(&batched_entropy_##type); \ 537 if (batch->position >= ARRAY_SIZE(batch->entropy) || \ 538 next_gen != batch->generation) { \ 539 _get_random_bytes(batch->entropy, sizeof(batch->entropy)); \ 540 batch->position = 0; \ 541 batch->generation = next_gen; \ 544 ret = batch->entropy[batch->position]; \ 545 batch->entropy[batch->position] = 0; \ 546 ++batch->position; \
|