Home
last modified time | relevance | path

Searched refs:mapped (Results 1 – 5 of 5) sorted by relevance

/mm/kmsan/
A Dshadow.c222 int nr, mapped, err = 0; in kmsan_vmap_pages_range_noflush() local
248 mapped = __vmap_pages_range_noflush(shadow_start, shadow_end, prot, in kmsan_vmap_pages_range_noflush()
251 if (mapped) { in kmsan_vmap_pages_range_noflush()
252 err = mapped; in kmsan_vmap_pages_range_noflush()
256 mapped = __vmap_pages_range_noflush(origin_start, origin_end, prot, in kmsan_vmap_pages_range_noflush()
259 if (mapped) { in kmsan_vmap_pages_range_noflush()
260 err = mapped; in kmsan_vmap_pages_range_noflush()
A Dhooks.c154 int nr, err = 0, clean = 0, mapped; in kmsan_ioremap_page_range() local
168 mapped = __vmap_pages_range_noflush( in kmsan_ioremap_page_range()
172 if (mapped) { in kmsan_ioremap_page_range()
173 err = mapped; in kmsan_ioremap_page_range()
177 mapped = __vmap_pages_range_noflush( in kmsan_ioremap_page_range()
181 if (mapped) { in kmsan_ioremap_page_range()
185 err = mapped; in kmsan_ioremap_page_range()
/mm/
A Drmap.c1248 atomic_t *mapped = &folio->_nr_pages_mapped; in __folio_add_rmap() local
1276 atomic_add_return_relaxed(first, mapped) < ENTIRELY_MAPPED) in __folio_add_rmap()
1297 nr = atomic_add_return_relaxed(ENTIRELY_MAPPED, mapped); in __folio_add_rmap()
1690 atomic_t *mapped = &folio->_nr_pages_mapped; in __folio_remove_rmap() local
1722 atomic_sub_return_relaxed(last, mapped) < ENTIRELY_MAPPED) in __folio_remove_rmap()
1725 partially_mapped = nr && atomic_read(mapped); in __folio_remove_rmap()
1748 nr = atomic_sub_return_relaxed(ENTIRELY_MAPPED, mapped); in __folio_remove_rmap()
A DKconfig311 utilization of a direct-mapped memory-side-cache. See section
446 SPARSEMEM_VMEMMAP uses a virtually mapped memmap to optimise
490 # IORESOURCE_EXCLUSIVE cannot be mapped to user space, for example, via
1255 not mapped to other processes and other kernel page tables.
A DKconfig.debug128 Check that anonymous page is not being mapped twice with read write

Completed in 17 milliseconds