Home
last modified time | relevance | path

Searched refs:vmap (Results 1 – 25 of 161) sorted by relevance

1234567

/linux/drivers/gpu/drm/nouveau/nvkm/subdev/bios/
A Dvmap.c32 u32 vmap = 0; in nvbios_vmap_table() local
37 if (vmap) { in nvbios_vmap_table()
38 *ver = nvbios_rd08(bios, vmap + 0); in nvbios_vmap_table()
45 return vmap; in nvbios_vmap_table()
62 switch (!!vmap * *ver) { in nvbios_vmap_parse()
77 return vmap; in nvbios_vmap_parse()
85 if (vmap && idx < cnt) { in nvbios_vmap_entry()
86 vmap = vmap + hdr + (idx * *len); in nvbios_vmap_entry()
87 return vmap; in nvbios_vmap_entry()
98 switch (!!vmap * *ver) { in nvbios_vmap_entry_parse()
[all …]
/linux/drivers/gpu/drm/xe/tests/
A Dxe_migrate.c112 retval = xe_map_rd(xe, &remote->vmap, 0, u64); in test_copy()
123 xe_map_memset(xe, &bo->vmap, 0, 0xd0, bo->size); in test_copy()
130 retval = xe_map_rd(xe, &bo->vmap, 0, u64); in test_copy()
141 xe_map_memset(xe, &bo->vmap, 0, 0xc0, bo->size); in test_copy()
147 retval = xe_map_rd(xe, &remote->vmap, 0, u64); in test_copy()
266 xe_map_wr(xe, &pt->vmap, 0, u32, 0xdeaddead); in xe_migrate_sanity_test()
274 retval = xe_map_rd(xe, &pt->vmap, 0, u32); in xe_migrate_sanity_test()
289 retval = xe_map_rd(xe, &tiny->vmap, 0, u32); in xe_migrate_sanity_test()
311 retval = xe_map_rd(xe, &big->vmap, 0, u32); in xe_migrate_sanity_test()
526 retval = xe_map_rd(xe, &vram_bo->vmap, 0, u64); in test_migrate()
[all …]
/linux/drivers/net/
A Dvrf.c109 struct vrf_map vmap; member
145 return &nn_vrf->vmap; in netns_vrf_map()
220 static void vrf_map_lock(struct vrf_map *vmap) __acquires(&vmap->vmap_lock) in vrf_map_lock() argument
251 vrf_map_lock(vmap); in vrf_map_register_dev()
281 vrf_map_unlock(vmap); in vrf_map_register_dev()
299 vrf_map_lock(vmap); in vrf_map_unregister_dev()
318 vrf_map_unlock(vmap); in vrf_map_unregister_dev()
328 vrf_map_lock(vmap); in vrf_ifindex_lookup_by_table_id()
344 vrf_map_unlock(vmap); in vrf_ifindex_lookup_by_table_id()
1850 vrf_map_lock(vmap); in vrf_strict_mode()
[all …]
/linux/drivers/gpu/drm/nouveau/nvkm/subdev/volt/
A Dbase.c87 u32 vmap; in nvkm_volt_map_min() local
89 vmap = nvbios_vmap_entry_parse(bios, id, &ver, &len, &info); in nvkm_volt_map_min()
90 if (vmap) { in nvkm_volt_map_min()
109 u32 vmap; in nvkm_volt_map() local
111 vmap = nvbios_vmap_entry_parse(bios, id, &ver, &len, &info); in nvkm_volt_map()
112 if (vmap) { in nvkm_volt_map()
295 struct nvbios_vmap vmap; in nvkm_volt_ctor() local
301 if (nvbios_vmap_parse(bios, &ver, &hdr, &cnt, &len, &vmap)) { in nvkm_volt_ctor()
302 volt->max0_id = vmap.max0; in nvkm_volt_ctor()
303 volt->max1_id = vmap.max1; in nvkm_volt_ctor()
[all …]
/linux/drivers/gpu/drm/xe/
A Dxe_sa.c59 sa_manager->is_iomem = bo->vmap.is_iomem; in xe_sa_bo_manager_init()
64 if (bo->vmap.is_iomem) { in xe_sa_bo_manager_init()
71 sa_manager->cpu_ptr = bo->vmap.vaddr; in xe_sa_bo_manager_init()
101 if (!sa_manager->bo->vmap.is_iomem) in xe_sa_bo_flush_write()
104 xe_map_memcpy_to(xe, &sa_manager->bo->vmap, drm_suballoc_soffset(sa_bo), in xe_sa_bo_flush_write()
A Dxe_huc.c170 wr_offset = xe_gsc_emit_header(xe, &pkt->vmap, 0, HECI_MEADDRESS_PXP, 0, in huc_auth_via_gsccs()
172 wr_offset = huc_emit_pxp_auth_msg(xe, &pkt->vmap, wr_offset, in huc_auth_via_gsccs()
182 if (xe_gsc_check_and_update_pending(xe, &pkt->vmap, 0, &pkt->vmap, in huc_auth_via_gsccs()
194 err = xe_gsc_read_out_header(xe, &pkt->vmap, PXP43_HUC_AUTH_INOUT_SIZE, in huc_auth_via_gsccs()
206 out_status = huc_auth_msg_rd(xe, &pkt->vmap, rd_offset, header.status); in huc_auth_via_gsccs()
A Dxe_gsc.c59 xe_map_memcpy_from(xe, storage, &gsc->fw.bo->vmap, 0, fw_size); in memcpy_fw()
60 xe_map_memcpy_to(xe, &gsc->private->vmap, 0, storage, fw_size); in memcpy_fw()
61 xe_map_memset(xe, &gsc->private->vmap, fw_size, 0, gsc->private->size - fw_size); in memcpy_fw()
147 wr_offset = xe_gsc_emit_header(xe, &bo->vmap, 0, HECI_MEADDRESS_MKHI, 0, in query_compatibility_version()
149 wr_offset = emit_version_query_msg(xe, &bo->vmap, wr_offset); in query_compatibility_version()
161 err = xe_gsc_read_out_header(xe, &bo->vmap, GSC_VER_PKT_SZ, in query_compatibility_version()
169 compat->major = version_query_rd(xe, &bo->vmap, rd_offset, proj_major); in query_compatibility_version()
170 compat->minor = version_query_rd(xe, &bo->vmap, rd_offset, compat_major); in query_compatibility_version()
171 compat->patch = version_query_rd(xe, &bo->vmap, rd_offset, compat_minor); in query_compatibility_version()
A Dxe_guc_log.c67 xe_map_memcpy_from(xe, read, &log->bo->vmap, i * sizeof(u32), in xe_guc_log_print()
93 xe_map_memset(xe, &bo->vmap, 0, 0, guc_log_size()); in xe_guc_log_init()
A Dxe_memirq.c143 iosys_map_memset(&bo->vmap, 0, 0, SZ_4K); in memirq_alloc_pages()
146 memirq->source = IOSYS_MAP_INIT_OFFSET(&bo->vmap, XE_MEMIRQ_SOURCE_OFFSET); in memirq_alloc_pages()
147 memirq->status = IOSYS_MAP_INIT_OFFSET(&bo->vmap, XE_MEMIRQ_STATUS_OFFSET); in memirq_alloc_pages()
148 memirq->mask = IOSYS_MAP_INIT_OFFSET(&bo->vmap, XE_MEMIRQ_ENABLE_OFFSET); in memirq_alloc_pages()
A Dxe_migrate.c165 xe_map_wr(xe, &bo->vmap, ofs, u64, entry); in xe_migrate_program_identity()
174 xe_map_wr(xe, &bo->vmap, ofs, u64, pos | flags); in xe_migrate_program_identity()
178 xe_map_wr(xe, &bo->vmap, ofs, u64, pos | flags); in xe_migrate_program_identity()
219 xe_pt_write(xe, &vm->pt_root[id]->bo->vmap, 0, entry); in xe_migrate_prepare_vm()
228 xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64, entry); in xe_migrate_prepare_vm()
245 xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64, in xe_migrate_prepare_vm()
262 xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64, in xe_migrate_prepare_vm()
287 xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE * level, u64, in xe_migrate_prepare_vm()
296 xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE + in xe_migrate_prepare_vm()
1290 &update->pt_bo->vmap, NULL, in xe_migrate_update_pgtables_cpu()
[all …]
A Dxe_bo_types.h45 struct iosys_map vmap; member
/linux/Documentation/translations/zh_CN/core-api/
A Dcachetlb.rst319 vmap/vmalloc API设置的。由于内核I/O是通过物理页进行的,I/O子系统假定用户
320 映射和内核偏移映射是唯一的别名。这对vmap别名来说是不正确的,所以内核中任何
321 试图对vmap区域进行I/O的东西都必须手动管理一致性。它必须在做I/O之前刷新vmap
326 刷新vmap区域中指定的虚拟地址范围的内核缓存。这是为了确保内核在vmap范围
332vmap区域的一个给定的虚拟地址范围的缓存,这可以防止处理器在物理页的I/O
333 发生时通过投机性地读取数据而使缓存变脏。这只对读入vmap区域的数据是必要的。
/linux/drivers/gpu/drm/xe/display/
A Dxe_hdcp_gsc.c90 xe_map_memset(xe, &bo->vmap, 0, 0, bo->size); in intel_hdcp_gsc_initialize_message()
184 struct iosys_map *map = &hdcp_message->hdcp_bo->vmap; in xe_gsc_send_sync()
227 addr_in_wr_off = xe_gsc_emit_header(xe, &hdcp_message->hdcp_bo->vmap, in intel_hdcp_gsc_msg_send()
230 xe_map_memcpy_to(xe, &hdcp_message->hdcp_bo->vmap, addr_in_wr_off, in intel_hdcp_gsc_msg_send()
253 xe_map_memcpy_from(xe, msg_out, &hdcp_message->hdcp_bo->vmap, in intel_hdcp_gsc_msg_send()
A Dxe_dsb_buffer.c22 iosys_map_wr(&dsb_buf->vma->bo->vmap, idx * 4, u32, val); in intel_dsb_buffer_write()
28 return iosys_map_rd(&dsb_buf->vma->bo->vmap, idx * 4, u32); in intel_dsb_buffer_read()
37 iosys_map_memset(&dsb_buf->vma->bo->vmap, idx * 4, val, size); in intel_dsb_buffer_memset()
A Dintel_fbdev_fb.c100 XE_WARN_ON(iosys_map_is_null(&obj->vmap)); in intel_fbdev_fb_fill_info()
102 info->screen_base = obj->vmap.vaddr_iomem; in intel_fbdev_fb_fill_info()
/linux/kernel/dma/
A Dremap.c29 vaddr = vmap(pages, PAGE_ALIGN(size) >> PAGE_SHIFT, in dma_common_pages_remap()
53 vaddr = vmap(pages, count, VM_DMA_COHERENT, prot); in dma_common_contiguous_remap()
/linux/Documentation/translations/zh_CN/mm/
A Dvmalloced-kernel-stacks.rst48 - vmalloc空间的堆栈需要可靠地工作。例如,如果vmap页表是按需创建的,当堆栈指向
91 工作。架构特定的vmap堆栈分配器照顾到了这个细节。
/linux/tools/testing/selftests/net/
A Dtest_vxlan_vnifiltering.sh222 for vmap in $(echo $vattrs | cut -d "," -f1- --output-delimiter=' ')
224 local vid=$(echo $vmap | awk -F'-' '{print ($1)}')
225 local family=$(echo $vmap | awk -F'-' '{print ($2)}')
226 local localip=$(echo $vmap | awk -F'-' '{print ($3)}')
227 local group=$(echo $vmap | awk -F'-' '{print ($4)}')
228 local vtype=$(echo $vmap | awk -F'-' '{print ($5)}')
229 local port=$(echo $vmap | awk -F'-' '{print ($6)}')
/linux/arch/arm/mm/
A Dfault-armv.c249 p1 = vmap(&page, 1, VM_IOREMAP, prot); in check_writebuffer_bugs()
250 p2 = vmap(&page, 1, VM_IOREMAP, prot); in check_writebuffer_bugs()
/linux/drivers/gpu/drm/etnaviv/
A Detnaviv_gem.c336 etnaviv_obj->vaddr = etnaviv_obj->ops->vmap(etnaviv_obj); in etnaviv_gem_vmap()
352 return vmap(pages, obj->base.size >> PAGE_SHIFT, in etnaviv_gem_vmap_impl()
484 .vmap = etnaviv_gem_vmap_impl,
541 .vmap = etnaviv_gem_prime_vmap,
705 .vmap = etnaviv_gem_vmap_impl,
/linux/drivers/gpu/drm/tiny/
A Dst7586.c73 struct iosys_map dst_map, vmap; in st7586_xrgb8888_to_gray332() local
80 iosys_map_set_vaddr(&vmap, vaddr); in st7586_xrgb8888_to_gray332()
81 drm_fb_xrgb8888_to_gray8(&dst_map, NULL, &vmap, fb, clip, fmtcnv_state); in st7586_xrgb8888_to_gray332()
/linux/Documentation/features/vm/huge-vmap/
A Darch-support.txt2 # Feature name: huge-vmap
/linux/arch/x86/kernel/
A Dirq_64.c49 va = vmap(pages, IRQ_STACK_SIZE / PAGE_SIZE, VM_MAP, PAGE_KERNEL); in map_irq_stack()
/linux/drivers/gpu/drm/hyperv/
A Dhyperv_drm_modeset.c23 const struct iosys_map *vmap, in hyperv_blit_to_vram_rect() argument
34 drm_fb_memcpy(&dst, fb->pitches, vmap, fb, rect); in hyperv_blit_to_vram_rect()
/linux/drivers/gpu/drm/xen/
A Dxen_drm_front_gem.c108 .vmap = xen_drm_front_gem_prime_vmap,
292 vaddr = vmap(xen_obj->pages, xen_obj->num_pages, in xen_drm_front_gem_prime_vmap()

Completed in 31 milliseconds

1234567