| /arch/arm64/kernel/ |
| A D | efi.c | 24 return !PAGE_ALIGNED(md->phys_addr) || in region_is_misaligned() 35 u64 attr = md->attribute; in create_mapping_protection() 36 u32 type = md->type; in create_mapping_protection() 49 if (region_is_misaligned(md)) { in create_mapping_protection() 99 if (region_is_misaligned(md)) in efi_create_mapping() 102 create_pgd_mapping(mm, md->phys_addr, md->virt_addr, in efi_create_mapping() 109 const efi_memory_desc_t *md; member 116 const efi_memory_desc_t *md = spd->md; in set_permissions() local 119 if (md->attribute & EFI_MEMORY_RO) in set_permissions() 130 efi_memory_desc_t *md, in efi_set_mapping_permissions() argument [all …]
|
| A D | acpi.c | 303 efi_memory_desc_t *md, *region = NULL; in acpi_os_ioremap() local 309 for_each_efi_memory_desc(md) { in acpi_os_ioremap() 310 u64 end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT); in acpi_os_ioremap() 312 if (phys < md->phys_addr || phys >= end) in acpi_os_ioremap() 319 region = md; in acpi_os_ioremap()
|
| /arch/riscv/kernel/ |
| A D | efi.c | 21 u64 attr = md->attribute; in efimem_to_pgprot_map() 22 u32 type = md->type; in efimem_to_pgprot_map() 53 for (i = 0; i < md->num_pages; i++) in efi_create_mapping() 55 md->phys_addr + i * PAGE_SIZE, in efi_create_mapping() 62 efi_memory_desc_t *md = data; in set_permissions() local 66 if (md->attribute & EFI_MEMORY_RO) { in set_permissions() 71 if (md->attribute & EFI_MEMORY_XP) { in set_permissions() 81 efi_memory_desc_t *md, in efi_set_mapping_permissions() argument 94 return apply_to_page_range(mm, md->virt_addr, in efi_set_mapping_permissions() 95 md->num_pages << EFI_PAGE_SHIFT, in efi_set_mapping_permissions() [all …]
|
| A D | acpi.c | 226 efi_memory_desc_t *md, *region = NULL; in acpi_os_ioremap() local 232 for_each_efi_memory_desc(md) { in acpi_os_ioremap() 233 u64 end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT); in acpi_os_ioremap() 235 if (phys < md->phys_addr || phys >= end) in acpi_os_ioremap() 242 region = md; in acpi_os_ioremap()
|
| /arch/x86/platform/efi/ |
| A D | memmap.c | 192 md = new; in efi_memmap_insert() 194 end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1; in efi_memmap_insert() 203 md->num_pages = (m_end - md->phys_addr + 1) >> in efi_memmap_insert() 208 md = new; in efi_memmap_insert() 210 md->num_pages = (end - md->phys_addr + 1) >> in efi_memmap_insert() 216 md->num_pages = (m_start - md->phys_addr) >> in efi_memmap_insert() 221 md = new; in efi_memmap_insert() 229 md = new; in efi_memmap_insert() 238 md->num_pages = (m_start - md->phys_addr) >> in efi_memmap_insert() 243 md = new; in efi_memmap_insert() [all …]
|
| A D | efi.c | 117 efi_memory_desc_t *md; in do_add_efi_memmap() local 127 switch (md->type) { in do_add_efi_memmap() 177 efi_memory_desc_t *md; in do_efi_soft_reserve() local 239 u64 end = (md->num_pages << EFI_PAGE_SHIFT) + md->phys_addr - 1; in efi_memmap_entry_valid() 357 md->phys_addr, in efi_print_memmap() 358 md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1, in efi_print_memmap() 517 prev_md = md; in efi_merge_regions() 523 prev_md = md; in efi_merge_regions() 532 md->attribute = 0; in efi_merge_regions() 535 prev_md = md; in efi_merge_regions() [all …]
|
| A D | efi_32.c | 42 start_pfn = PFN_DOWN(md->phys_addr); in efi_map_region() 43 size = md->num_pages << PAGE_SHIFT; in efi_map_region() 44 end = md->phys_addr + size; in efi_map_region() 48 va = __va(md->phys_addr); in efi_map_region() 50 if (!(md->attribute & EFI_MEMORY_WB)) in efi_map_region() 53 va = ioremap_cache(md->phys_addr, size); in efi_map_region() 56 md->virt_addr = (unsigned long)va; in efi_map_region() 132 efi_memory_desc_t *md; in efi_runtime_update_mappings() local 135 for_each_efi_memory_desc(md) { in efi_runtime_update_mappings() 136 if (md->type != EFI_RUNTIME_SERVICES_CODE) in efi_runtime_update_mappings() [all …]
|
| A D | quirks.c | 257 efi_memory_desc_t md; in efi_arch_mem_reserve() local 267 if (addr + size > md.phys_addr + (md.num_pages << EFI_PAGE_SHIFT)) { in efi_arch_mem_reserve() 326 efi_memory_desc_t *md; in efi_reserve_boot_services() local 331 for_each_efi_memory_desc(md) { in efi_reserve_boot_services() 332 u64 start = md->phys_addr; in efi_reserve_boot_services() 389 u64 pa = md->phys_addr; in efi_unmap_pages() 390 u64 va = md->virt_addr; in efi_unmap_pages() 410 efi_memory_desc_t *md; in efi_free_boot_services() local 418 for_each_efi_memory_desc(md) { in efi_free_boot_services() 440 efi_unmap_pages(md); in efi_free_boot_services() [all …]
|
| A D | efi_64.c | 303 md->phys_addr, va); in __map_region() 309 u64 pa = md->phys_addr; in efi_map_region() 316 __map_region(md, md->phys_addr); in efi_map_region() 324 md->virt_addr = md->phys_addr; in efi_map_region() 351 md->virt_addr = efi_va; in efi_map_region() 361 __map_region(md, md->phys_addr); in efi_map_region_fixed() 362 __map_region(md, md->virt_addr); in efi_map_region_fixed() 378 err1 = kernel_map_pages_in_pgd(pgd, pfn, md->phys_addr, md->num_pages, pf); in efi_update_mappings() 381 md->phys_addr, md->virt_addr); in efi_update_mappings() 384 err2 = kernel_map_pages_in_pgd(pgd, pfn, md->virt_addr, md->num_pages, pf); in efi_update_mappings() [all …]
|
| A D | runtime-map.c | 17 efi_memory_desc_t md; member 35 return snprintf(buf, PAGE_SIZE, "0x%x\n", entry->md.type); in type_show() 38 #define EFI_RUNTIME_FIELD(var) entry->md.var 106 efi_memory_desc_t *md) in add_sysfs_runtime_map_entry() argument 124 memcpy(&entry->md, md, sizeof(efi_memory_desc_t)); in add_sysfs_runtime_map_entry() 164 efi_memory_desc_t *md; in efi_runtime_map_init() local 176 for_each_efi_memory_desc(md) { in efi_runtime_map_init() 177 entry = add_sysfs_runtime_map_entry(efi_kobj, i, md); in efi_runtime_map_init()
|
| /arch/arm/kernel/ |
| A D | efi.c | 16 efi_memory_desc_t *md = data; in set_permissions() local 19 if (md->attribute & EFI_MEMORY_RO) in set_permissions() 21 if (md->attribute & EFI_MEMORY_XP) in set_permissions() 28 efi_memory_desc_t *md, in efi_set_mapping_permissions() argument 33 base = md->virt_addr; in efi_set_mapping_permissions() 34 size = md->num_pages << EFI_PAGE_SHIFT; in efi_set_mapping_permissions() 52 .virtual = md->virt_addr, in efi_create_mapping() 53 .pfn = __phys_to_pfn(md->phys_addr), in efi_create_mapping() 62 if (md->attribute & EFI_MEMORY_WB) in efi_create_mapping() 64 else if (md->attribute & EFI_MEMORY_WT) in efi_create_mapping() [all …]
|
| /arch/loongarch/kernel/ |
| A D | mem.c | 17 efi_memory_desc_t *md; in memblock_init() local 20 for_each_efi_memory_desc(md) { in memblock_init() 21 mem_type = md->type; in memblock_init() 22 mem_start = md->phys_addr; in memblock_init() 23 mem_size = md->num_pages << EFI_PAGE_SHIFT; in memblock_init()
|
| A D | numa.c | 193 efi_memory_desc_t *md; in init_node_memblock() local 196 for_each_efi_memory_desc(md) { in init_node_memblock() 197 mem_type = md->type; in init_node_memblock() 198 mem_start = md->phys_addr; in init_node_memblock() 199 mem_size = md->num_pages << EFI_PAGE_SHIFT; in init_node_memblock()
|
| /arch/x86/kernel/ |
| A D | tsc_msr.c | 171 const struct muldiv *md; in cpu_khz_from_msr() local 191 md = &freq_desc->muldiv[index]; in cpu_khz_from_msr() 197 if (md->divider) { in cpu_khz_from_msr() 198 tscref = TSC_REFERENCE_KHZ * md->multiplier; in cpu_khz_from_msr() 199 freq = DIV_ROUND_CLOSEST(tscref, md->divider); in cpu_khz_from_msr() 204 res = DIV_ROUND_CLOSEST(tscref * ratio, md->divider); in cpu_khz_from_msr()
|
| A D | smpboot.c | 1253 md->status = CPUDEAD_MWAIT_WAIT; in mwait_play_dead() 1254 md->control = CPUDEAD_MWAIT_WAIT; in mwait_play_dead() 1267 clflush(md); in mwait_play_dead() 1269 __monitor(md, 0, 0); in mwait_play_dead() 1273 if (READ_ONCE(md->control) == CPUDEAD_MWAIT_KEXEC_HLT) { in mwait_play_dead() 1286 WRITE_ONCE(md->status, CPUDEAD_MWAIT_KEXEC_HLT); in mwait_play_dead() 1300 struct mwait_cpu_dead *md; in smp_kick_mwait_play_dead() local 1304 md = per_cpu_ptr(&mwait_cpu_dead, cpu); in smp_kick_mwait_play_dead() 1307 if (READ_ONCE(md->status) != CPUDEAD_MWAIT_WAIT) in smp_kick_mwait_play_dead() 1313 WRITE_ONCE(md->control, newstate); in smp_kick_mwait_play_dead() [all …]
|
| /arch/arm/mm/ |
| A D | mmu.c | 874 addr = md->virtual; in create_36bit_mapping() 950 length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); in __create_mapping() 979 if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) { in create_mapping() 981 (long long)__pfn_to_phys((u64)md->pfn), md->virtual); in create_mapping() 986 md->virtual >= PAGE_OFFSET && md->virtual < FIXADDR_START && in create_mapping() 987 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) { in create_mapping() 989 (long long)__pfn_to_phys((u64)md->pfn), md->virtual); in create_mapping() 1002 p4d = p4d_alloc(mm, pgd_offset(mm, md->virtual), md->virtual); in create_mapping_late() 1018 struct map_desc *md; in iotable_init() local 1027 for (md = io_desc; nr; md++, nr--) { in iotable_init() [all …]
|
| /arch/sparc/mm/ |
| A D | init_64.c | 1123 u64 target = mdesc_arc_target(md, arc); in scan_pio_for_cfg_handle() 1126 val = mdesc_get_property(md, target, in scan_pio_for_cfg_handle() 1167 struct mdesc_handle *md; in of_node_to_nid() local 1185 md = mdesc_grab(); in of_node_to_nid() 1197 mdesc_release(md); in of_node_to_nid() 1300 val = mdesc_get_property(md, node, in grab_mblocks() 1455 struct mdesc_handle *md = mdesc_grab(); in numa_parse_mdesc() local 1461 mdesc_release(md); in numa_parse_mdesc() 1465 err = grab_mblocks(md); in numa_parse_mdesc() 1469 err = grab_mlgroups(md); in numa_parse_mdesc() [all …]
|
| /arch/riscv/include/asm/ |
| A D | efi.h | 21 int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md); 22 int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md, bool);
|
| /arch/x86/boot/compressed/ |
| A D | kaslr.c | 663 if (md->type == EFI_CONVENTIONAL_MEMORY) in memory_type_is_free() 667 md->type == EFI_UNACCEPTED_MEMORY) in memory_type_is_free() 683 efi_memory_desc_t *md; in process_efi_entries() local 707 md = efi_early_memdesc_ptr(pmap, e->efi_memdesc_size, i); in process_efi_entries() 708 if (md->attribute & EFI_MEMORY_MORE_RELIABLE) { in process_efi_entries() 715 md = efi_early_memdesc_ptr(pmap, e->efi_memdesc_size, i); in process_efi_entries() 717 if (!memory_type_is_free(md)) in process_efi_entries() 721 (md->attribute & EFI_MEMORY_SP)) in process_efi_entries() 725 !(md->attribute & EFI_MEMORY_MORE_RELIABLE)) in process_efi_entries() 728 region.start = md->phys_addr; in process_efi_entries() [all …]
|
| /arch/arm/include/asm/ |
| A D | efi.h | 23 int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md); 24 int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md, bool);
|
| /arch/arm64/include/asm/ |
| A D | efi.h | 29 int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md); 30 int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md,
|
| /arch/arm/include/asm/mach/ |
| A D | map.h | 43 extern void create_mapping_late(struct mm_struct *mm, struct map_desc *md,
|
| /arch/m68k/ifpsp060/src/ |
| A D | ilsp.S | 548 mov.l %d1,%d4 # md in d4 550 swap %d4 # hi(md) in lo d4 553 mulu.w %d1,%d0 # [1] lo(mr) * lo(md) 554 mulu.w %d3,%d1 # [2] hi(mr) * lo(md) 555 mulu.w %d4,%d2 # [3] lo(mr) * hi(md) 556 mulu.w %d4,%d3 # [4] hi(mr) * hi(md) 674 mov.l %d1,%d4 # md in d4 676 swap %d4 # hi(md) in lo d4 679 mulu.w %d1,%d0 # [1] lo(mr) * lo(md) 680 mulu.w %d3,%d1 # [2] hi(mr) * lo(md) [all …]
|
| /arch/powerpc/include/asm/ |
| A D | machdep.h | 229 static inline bool __machine_is(const struct machdep_calls *md) in __machine_is() argument 232 return machine_id == md; in __machine_is()
|
| /arch/x86/include/asm/ |
| A D | efi.h | 130 extern void __init efi_map_region(efi_memory_desc_t *md); 131 extern void __init efi_map_region_fixed(efi_memory_desc_t *md); 399 extern int __init efi_memmap_split_count(efi_memory_desc_t *md,
|