/linux-6.3-rc2/drivers/gpu/drm/nouveau/nvkm/subdev/bios/ |
A D | vmap.c | 32 u32 vmap = 0; in nvbios_vmap_table() local 37 if (vmap) { in nvbios_vmap_table() 38 *ver = nvbios_rd08(bios, vmap + 0); in nvbios_vmap_table() 45 return vmap; in nvbios_vmap_table() 62 switch (!!vmap * *ver) { in nvbios_vmap_parse() 77 return vmap; in nvbios_vmap_parse() 85 if (vmap && idx < cnt) { in nvbios_vmap_entry() 86 vmap = vmap + hdr + (idx * *len); in nvbios_vmap_entry() 87 return vmap; in nvbios_vmap_entry() 98 switch (!!vmap * *ver) { in nvbios_vmap_entry_parse() [all …]
|
/linux-6.3-rc2/drivers/net/ |
A D | vrf.c | 108 struct vrf_map vmap; member 181 return &nn_vrf->vmap; in netns_vrf_map() 256 static void vrf_map_lock(struct vrf_map *vmap) __acquires(&vmap->vmap_lock) in vrf_map_lock() argument 287 vrf_map_lock(vmap); in vrf_map_register_dev() 317 vrf_map_unlock(vmap); in vrf_map_register_dev() 335 vrf_map_lock(vmap); in vrf_map_unregister_dev() 354 vrf_map_unlock(vmap); in vrf_map_unregister_dev() 364 vrf_map_lock(vmap); in vrf_ifindex_lookup_by_table_id() 380 vrf_map_unlock(vmap); in vrf_ifindex_lookup_by_table_id() 1889 vrf_map_lock(vmap); in vrf_strict_mode() [all …]
|
/linux-6.3-rc2/drivers/gpu/drm/nouveau/nvkm/subdev/volt/ |
A D | base.c | 87 u32 vmap; in nvkm_volt_map_min() local 89 vmap = nvbios_vmap_entry_parse(bios, id, &ver, &len, &info); in nvkm_volt_map_min() 90 if (vmap) { in nvkm_volt_map_min() 109 u32 vmap; in nvkm_volt_map() local 111 vmap = nvbios_vmap_entry_parse(bios, id, &ver, &len, &info); in nvkm_volt_map() 112 if (vmap) { in nvkm_volt_map() 295 struct nvbios_vmap vmap; in nvkm_volt_ctor() local 301 if (nvbios_vmap_parse(bios, &ver, &hdr, &cnt, &len, &vmap)) { in nvkm_volt_ctor() 302 volt->max0_id = vmap.max0; in nvkm_volt_ctor() 303 volt->max1_id = vmap.max1; in nvkm_volt_ctor() [all …]
|
/linux-6.3-rc2/Documentation/translations/zh_CN/core-api/ |
A D | cachetlb.rst | 319 vmap/vmalloc API设置的。由于内核I/O是通过物理页进行的,I/O子系统假定用户 320 映射和内核偏移映射是唯一的别名。这对vmap别名来说是不正确的,所以内核中任何 321 试图对vmap区域进行I/O的东西都必须手动管理一致性。它必须在做I/O之前刷新vmap 326 刷新vmap区域中指定的虚拟地址范围的内核缓存。这是为了确保内核在vmap范围 332 在vmap区域的一个给定的虚拟地址范围的缓存,这可以防止处理器在物理页的I/O 333 发生时通过投机性地读取数据而使缓存变脏。这只对读入vmap区域的数据是必要的。
|
/linux-6.3-rc2/kernel/dma/ |
A D | remap.c | 27 vaddr = vmap(pages, PAGE_ALIGN(size) >> PAGE_SHIFT, in dma_common_pages_remap() 51 vaddr = vmap(pages, count, VM_DMA_COHERENT, prot); in dma_common_contiguous_remap()
|
/linux-6.3-rc2/tools/testing/selftests/net/ |
A D | test_vxlan_vnifiltering.sh | 228 for vmap in $(echo $vattrs | cut -d "," -f1- --output-delimiter=' ') 230 local vid=$(echo $vmap | awk -F'-' '{print ($1)}') 231 local family=$(echo $vmap | awk -F'-' '{print ($2)}') 232 local localip=$(echo $vmap | awk -F'-' '{print ($3)}') 233 local group=$(echo $vmap | awk -F'-' '{print ($4)}') 234 local vtype=$(echo $vmap | awk -F'-' '{print ($5)}') 235 local port=$(echo $vmap | awk -F'-' '{print ($6)}')
|
/linux-6.3-rc2/Documentation/translations/zh_CN/mm/ |
A D | vmalloced-kernel-stacks.rst | 48 - vmalloc空间的堆栈需要可靠地工作。例如,如果vmap页表是按需创建的,当堆栈指向 91 工作。架构特定的vmap堆栈分配器照顾到了这个细节。
|
A D | highmem.rst | 114 * vmap()。这可以用来将多个物理页长期映射到一个连续的虚拟空间。它需要全局同步来解除
|
/linux-6.3-rc2/arch/arm/mm/ |
A D | fault-armv.c | 247 p1 = vmap(&page, 1, VM_IOREMAP, prot); in check_writebuffer_bugs() 248 p2 = vmap(&page, 1, VM_IOREMAP, prot); in check_writebuffer_bugs()
|
/linux-6.3-rc2/drivers/gpu/drm/tiny/ |
A D | st7586.c | 72 struct iosys_map dst_map, vmap; in st7586_xrgb8888_to_gray332() local 79 iosys_map_set_vaddr(&vmap, vaddr); in st7586_xrgb8888_to_gray332() 80 drm_fb_xrgb8888_to_gray8(&dst_map, NULL, &vmap, fb, clip); in st7586_xrgb8888_to_gray332()
|
A D | cirrus.c | 319 const struct iosys_map *vmap, in cirrus_fb_blit_rect() argument 333 drm_fb_memcpy(&dst, fb->pitches, vmap, fb, rect); in cirrus_fb_blit_rect() 337 drm_fb_xrgb8888_to_rgb565(&dst, &cirrus->pitch, vmap, fb, rect, false); in cirrus_fb_blit_rect() 341 drm_fb_xrgb8888_to_rgb888(&dst, &cirrus->pitch, vmap, fb, rect); in cirrus_fb_blit_rect()
|
/linux-6.3-rc2/drivers/gpu/drm/etnaviv/ |
A D | etnaviv_gem.c | 337 etnaviv_obj->vaddr = etnaviv_obj->ops->vmap(etnaviv_obj); in etnaviv_gem_vmap() 353 return vmap(pages, obj->base.size >> PAGE_SHIFT, in etnaviv_gem_vmap_impl() 483 .vmap = etnaviv_gem_vmap_impl, 540 .vmap = etnaviv_gem_prime_vmap, 705 .vmap = etnaviv_gem_vmap_impl,
|
A D | etnaviv_gem.h | 67 void *(*vmap)(struct etnaviv_gem_object *); member
|
A D | etnaviv_gem_prime.c | 100 .vmap = etnaviv_gem_prime_vmap_impl,
|
/linux-6.3-rc2/Documentation/features/vm/huge-vmap/ |
A D | arch-support.txt | 2 # Feature name: huge-vmap
|
/linux-6.3-rc2/arch/x86/kernel/ |
A D | irq_64.c | 48 va = vmap(pages, IRQ_STACK_SIZE / PAGE_SIZE, VM_MAP, PAGE_KERNEL); in map_irq_stack()
|
/linux-6.3-rc2/drivers/gpu/drm/xen/ |
A D | xen_drm_front_gem.c | 107 .vmap = xen_drm_front_gem_prime_vmap, 291 vaddr = vmap(xen_obj->pages, xen_obj->num_pages, in xen_drm_front_gem_prime_vmap()
|
/linux-6.3-rc2/drivers/gpu/drm/hyperv/ |
A D | hyperv_drm_modeset.c | 23 const struct iosys_map *vmap, in hyperv_blit_to_vram_rect() argument 34 drm_fb_memcpy(&dst, fb->pitches, vmap, fb, rect); in hyperv_blit_to_vram_rect()
|
/linux-6.3-rc2/arch/hexagon/kernel/ |
A D | vdso.c | 28 vdso = vmap(&vdso_page, 1, 0, PAGE_KERNEL); in vdso_init()
|
/linux-6.3-rc2/drivers/gpu/drm/mediatek/ |
A D | mtk_drm_gem.c | 27 .vmap = mtk_drm_gem_prime_vmap, 248 mtk_gem->kvaddr = vmap(mtk_gem->pages, npages, VM_MAP, in mtk_drm_gem_prime_vmap()
|
/linux-6.3-rc2/Documentation/translations/zh_CN/dev-tools/ |
A D | kasan.rst | 64 通用KASAN支持在所有的slab、page_alloc、vmap、vmalloc、堆栈和全局内存 348 这通过连接到vmalloc和vmap并动态分配真实的影子内存来支持映射。 357 KASAN连接到vmap基础架构以懒清理未使用的影子内存。
|
/linux-6.3-rc2/drivers/gpu/drm/rockchip/ |
A D | rockchip_drm_gem.c | 137 rk_obj->kvaddr = vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP, in rockchip_gem_alloc_iommu() 279 .vmap = rockchip_gem_prime_vmap, 521 void *vaddr = vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP, in rockchip_gem_prime_vmap()
|
/linux-6.3-rc2/Documentation/core-api/ |
A D | cachetlb.rst | 377 vmap/vmalloc API. Since kernel I/O goes via physical pages, the I/O 379 the only aliases. This isn't true for vmap aliases, so anything in 380 the kernel trying to do I/O to vmap areas must manually manage 381 coherency. It must do this by flushing the vmap range before doing 387 the vmap area. This is to make sure that any data the kernel 388 modified in the vmap range is made visible to the physical 395 the cache for a given virtual address range in the vmap area 399 vmap area.
|
/linux-6.3-rc2/drivers/gpu/drm/ |
A D | drm_memory.c | 95 addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP); in agp_remap()
|
/linux-6.3-rc2/drivers/dma-buf/heaps/ |
A D | cma_heap.c | 201 vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, PAGE_KERNEL); in cma_heap_do_vmap() 274 .vmap = cma_heap_vmap,
|