| /arch/powerpc/mm/book3s64/ |
| A D | iommu_api.c | 66 ret = account_locked_vm(mm, entries, true); in mm_iommu_do_alloc() 70 locked_entries = entries; in mm_iommu_do_alloc() 102 chunk = min(chunk, entries); in mm_iommu_do_alloc() 118 if (pinned != entries) { in mm_iommu_do_alloc() 128 mem->entries = entries; in mm_iommu_do_alloc() 151 for (i = 0; i < entries; ++i) { in mm_iommu_do_alloc() 210 for (i = 0; i < mem->entries; ++i) { in mm_iommu_unpin() 274 unlock_entries = mem->entries; in mm_iommu_put() 297 (mem->entries << PAGE_SHIFT))) { in mm_iommu_lookup() 317 if ((mem->ua == ua) && (mem->entries == entries)) { in mm_iommu_get() [all …]
|
| /arch/x86/xen/ |
| A D | multicalls.c | 40 struct multicall_entry entries[MC_BATCH]; member 49 struct multicall_entry entries[MC_BATCH]; member 125 mcdb->entries[idx].op, b->entries[idx].result, in print_debug_data() 161 memcpy(mcdb->entries, b->entries, in xen_mc_flush() 174 mc = &b->entries[0]; in xen_mc_flush() 186 if (b->entries[i].result < 0) in xen_mc_flush() 199 b->entries[i].op, in xen_mc_flush() 200 b->entries[i].args[0], in xen_mc_flush() 201 b->entries[i].result); in xen_mc_flush() 238 ret.mc = &b->entries[b->mcidx]; in __xen_mc_entry() [all …]
|
| A D | setup.c | 580 entry = xen_e820_table.entries; in xen_is_e820_reserved() 641 entry = xen_e820_table.entries; in xen_e820_swap_entry_with_ram() 651 entry = xen_e820_table.entries + in xen_e820_swap_entry_with_ram() 698 entry = xen_e820_table.entries; in xen_e820_resolve_conflicts() 820 xen_e820_table.entries[0].addr = 0ULL; in xen_memory_setup() 821 xen_e820_table.entries[0].size = mem_end; in xen_memory_setup() 894 addr = xen_e820_table.entries[0].addr; in xen_memory_setup() 895 size = xen_e820_table.entries[0].size; in xen_memory_setup() 900 type = xen_e820_table.entries[i].type; in xen_memory_setup() 927 addr = xen_e820_table.entries[i].addr; in xen_memory_setup() [all …]
|
| /arch/x86/kernel/cpu/ |
| A D | intel.c | 653 short entries = desc->entries; in intel_tlb_lookup() local 657 tlb_lli_4k = max(tlb_lli_4k, entries); in intel_tlb_lookup() 658 tlb_lld_4k = max(tlb_lld_4k, entries); in intel_tlb_lookup() 661 tlb_lli_4k = max(tlb_lli_4k, entries); in intel_tlb_lookup() 662 tlb_lld_4k = max(tlb_lld_4k, entries); in intel_tlb_lookup() 663 tlb_lli_2m = max(tlb_lli_2m, entries); in intel_tlb_lookup() 664 tlb_lld_2m = max(tlb_lld_2m, entries); in intel_tlb_lookup() 665 tlb_lli_4m = max(tlb_lli_4m, entries); in intel_tlb_lookup() 666 tlb_lld_4m = max(tlb_lld_4m, entries); in intel_tlb_lookup() 669 tlb_lli_4k = max(tlb_lli_4k, entries); in intel_tlb_lookup() [all …]
|
| /arch/arm64/kvm/vgic/ |
| A D | vgic-irqfd.c | 138 struct kvm_irq_routing_entry *entries; in kvm_vgic_setup_default_irq_routing() local 143 entries = kcalloc(nr, sizeof(*entries), GFP_KERNEL_ACCOUNT); in kvm_vgic_setup_default_irq_routing() 144 if (!entries) in kvm_vgic_setup_default_irq_routing() 148 entries[i].gsi = i; in kvm_vgic_setup_default_irq_routing() 149 entries[i].type = KVM_IRQ_ROUTING_IRQCHIP; in kvm_vgic_setup_default_irq_routing() 150 entries[i].u.irqchip.irqchip = 0; in kvm_vgic_setup_default_irq_routing() 151 entries[i].u.irqchip.pin = i; in kvm_vgic_setup_default_irq_routing() 153 ret = kvm_set_irq_routing(kvm, entries, nr, 0); in kvm_vgic_setup_default_irq_routing() 154 kfree(entries); in kvm_vgic_setup_default_irq_routing()
|
| /arch/x86/kernel/ |
| A D | e820.c | 176 table->entries[x].addr = start; in __e820__range_add() 177 table->entries[x].size = size; in __e820__range_add() 178 table->entries[x].type = type; in __e820__range_add() 209 e820_table->entries[i].addr, in e820__print_table() 210 e820_table->entries[i].addr + e820_table->entries[i].size - 1); in e820__print_table() 323 struct e820_entry *entries = table->entries; in e820__update_table() local 338 if (entries[i].addr + entries[i].size < entries[i].addr) in e820__update_table() 352 if (entries[i].size != 0) { in e820__update_table() 355 change_point[chg_idx]->addr = entries[i].addr + entries[i].size; in e820__update_table() 415 memcpy(entries, new_entries, new_nr_entries*sizeof(*entries)); in e820__update_table() [all …]
|
| A D | ldt.c | 83 set_ldt(ldt->entries, ldt->nr_entries); in load_mm_ldt() 175 if (!new_ldt->entries) { in alloc_ldt_struct() 307 is_vmalloc = is_vmalloc_addr(ldt->entries); in map_ldt_struct() 313 const void *src = (char *)ldt->entries + offset; in map_ldt_struct() 418 paravirt_alloc_ldt(ldt->entries, ldt->nr_entries); in finalize_ldt_struct() 439 paravirt_free_ldt(ldt->entries, ldt->nr_entries); in free_ldt_struct() 441 vfree_atomic(ldt->entries); in free_ldt_struct() 443 free_page((unsigned long)ldt->entries); in free_ldt_struct() 469 memcpy(new_ldt->entries, old_mm->context.ldt->entries, in ldt_dup_context() 632 memcpy(new_ldt->entries, old_ldt->entries, old_nr_entries * LDT_ENTRY_SIZE); in write_ldt() [all …]
|
| /arch/powerpc/kvm/ |
| A D | e500_mmu_host.c | 38 #define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1) 45 return host_tlb_params[1].entries - tlbcam_index - 1; in tlb1_max_shadow_size() 275 sizeof(u64) * vcpu_e500->gtlb_params[1].entries); in clear_tlb1_bitmap() 278 sizeof(unsigned int) * host_tlb_params[1].entries); in clear_tlb1_bitmap() 724 if (host_tlb_params[0].entries == 0 || in e500_mmu_host_init() 725 host_tlb_params[1].entries == 0) { in e500_mmu_host_init() 732 host_tlb_params[1].ways = host_tlb_params[1].entries; in e500_mmu_host_init() 734 if (!is_power_of_2(host_tlb_params[0].entries) || in e500_mmu_host_init() 736 host_tlb_params[0].entries < host_tlb_params[0].ways || in e500_mmu_host_init() 739 __func__, host_tlb_params[0].entries, in e500_mmu_host_init() [all …]
|
| A D | e500_mmu.c | 73 esel &= vcpu_e500->gtlb_params[tlbsel].entries - 1; in get_tlb_esel() 83 int size = vcpu_e500->gtlb_params[tlbsel].entries; in kvmppc_e500_tlb_index() 153 int size = vcpu_e500->gtlb_params[1].entries; in kvmppc_recalc_tlb1map_range() 233 for (esel = 0; esel < vcpu_e500->gtlb_params[0].entries; esel++) in kvmppc_e500_emul_mt_mmucsr0() 258 for (esel = 0; esel < vcpu_e500->gtlb_params[tlbsel].entries; in kvmppc_e500_emul_tlbivax() 824 vcpu_e500->gtlb_params[0].entries = params.tlb_sizes[0]; in kvm_vcpu_ioctl_config_tlb() 825 vcpu_e500->gtlb_params[1].entries = params.tlb_sizes[1]; in kvm_vcpu_ioctl_config_tlb() 875 vcpu->arch.tlbcfg[0] |= params[0].entries; in vcpu_mmu_init() 880 vcpu->arch.tlbcfg[1] |= params[1].entries; in vcpu_mmu_init() 905 vcpu_e500->gtlb_params[0].entries = KVM_E500_TLB0_SIZE; in kvmppc_e500_tlb_init() [all …]
|
| /arch/parisc/kernel/ |
| A D | pdt.c | 153 unsigned long entries; in pdc_pdt_init() local 177 entries = pdt_status.pdt_entries; in pdc_pdt_init() 178 if (WARN_ON(entries > MAX_PDT_ENTRIES)) in pdc_pdt_init() 179 entries = pdt_status.pdt_entries = MAX_PDT_ENTRIES; in pdc_pdt_init() 190 if (entries == 0) { in pdc_pdt_init() 200 entries); in pdc_pdt_init()
|
| A D | inventory.c | 310 int entries; in pat_memconfig() local 332 entries = actual_len / sizeof(struct pdc_pat_pd_addr_map_entry); in pat_memconfig() 334 if (entries > PAT_MAX_RANGES) { in pat_memconfig() 347 for (i = 0; i < entries; i++,mtbl_ptr++) { in pat_memconfig() 404 int entries; in sprockets_memconfig() local 427 entries = (int)r_addr.entries_returned; in sprockets_memconfig() 432 for (i = 0; i < entries; i++,mtbl_ptr++) { in sprockets_memconfig()
|
| /arch/mips/mti-malta/ |
| A D | malta-dtshim.c | 74 unsigned entries; in gen_fdt_mem_array() local 76 entries = 1; in gen_fdt_mem_array() 108 entries++; in gen_fdt_mem_array() 121 entries++; in gen_fdt_mem_array() 127 BUG_ON(entries > MAX_MEM_ARRAY_ENTRIES); in gen_fdt_mem_array() 128 return entries; in gen_fdt_mem_array()
|
| /arch/arm/mm/ |
| A D | proc-arm940.S | 118 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries 121 bcs 2b @ entries 63 to 0 173 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries 176 bcs 2b @ entries 63 to 0 196 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries 199 bcs 2b @ entries 63 to 0 219 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries 222 bcs 2b @ entries 63 to 0 242 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries 250 bcs 2b @ entries 63 to 0
|
| /arch/loongarch/kvm/intc/ |
| A D | pch_pic.c | 460 struct kvm_irq_routing_entry *entries; in kvm_setup_default_irq_routing() local 462 entries = kcalloc(nr, sizeof(*entries), GFP_KERNEL); in kvm_setup_default_irq_routing() 463 if (!entries) in kvm_setup_default_irq_routing() 467 entries[i].gsi = i; in kvm_setup_default_irq_routing() 468 entries[i].type = KVM_IRQ_ROUTING_IRQCHIP; in kvm_setup_default_irq_routing() 469 entries[i].u.irqchip.irqchip = 0; in kvm_setup_default_irq_routing() 470 entries[i].u.irqchip.pin = i; in kvm_setup_default_irq_routing() 472 ret = kvm_set_irq_routing(kvm, entries, nr, 0); in kvm_setup_default_irq_routing() 473 kfree(entries); in kvm_setup_default_irq_routing()
|
| /arch/x86/boot/startup/ |
| A D | sme.c | 249 unsigned long entries = 0, tables = 0; in sme_pgtable_calc() local 266 entries += (DIV_ROUND_UP(len, PGDIR_SIZE) + 1) * sizeof(p4d_t) * PTRS_PER_P4D; in sme_pgtable_calc() 267 entries += (DIV_ROUND_UP(len, P4D_SIZE) + 1) * sizeof(pud_t) * PTRS_PER_PUD; in sme_pgtable_calc() 268 entries += (DIV_ROUND_UP(len, PUD_SIZE) + 1) * sizeof(pmd_t) * PTRS_PER_PMD; in sme_pgtable_calc() 269 entries += 2 * sizeof(pte_t) * PTRS_PER_PTE; in sme_pgtable_calc() 277 tables += DIV_ROUND_UP(entries, PGDIR_SIZE) * sizeof(p4d_t) * PTRS_PER_P4D; in sme_pgtable_calc() 278 tables += DIV_ROUND_UP(entries, P4D_SIZE) * sizeof(pud_t) * PTRS_PER_PUD; in sme_pgtable_calc() 279 tables += DIV_ROUND_UP(entries, PUD_SIZE) * sizeof(pmd_t) * PTRS_PER_PMD; in sme_pgtable_calc() 281 return entries + tables; in sme_pgtable_calc()
|
| /arch/mips/generic/ |
| A D | yamon-dt.c | 51 unsigned int entries = 0; in gen_fdt_mem_array() local 54 if (entries >= max_entries) { in gen_fdt_mem_array() 67 ++entries; in gen_fdt_mem_array() 72 return entries; in gen_fdt_mem_array()
|
| /arch/x86/kvm/ |
| A D | cpuid.h | 14 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry2(struct kvm_cpuid_entry2 *entries, 43 struct kvm_cpuid_entry2 __user *entries, 47 struct kvm_cpuid_entry __user *entries); 50 struct kvm_cpuid_entry2 __user *entries); 53 struct kvm_cpuid_entry2 __user *entries);
|
| A D | cpuid.c | 103 e = &entries[i]; in kvm_find_cpuid_entry2() 563 struct kvm_cpuid_entry __user *entries) in kvm_vcpu_ioctl_set_cpuid() argument 608 struct kvm_cpuid_entry2 __user *entries) in kvm_vcpu_ioctl_set_cpuid2() argument 631 struct kvm_cpuid_entry2 __user *entries) in kvm_vcpu_ioctl_get_cpuid2() argument 1238 struct kvm_cpuid_entry2 *entries; member 1248 return &array->entries[array->nent++]; in get_next_cpuid() 1830 limit = array->entries[array->nent - 1].eax; in get_cpuid_func() 1868 struct kvm_cpuid_entry2 __user *entries, in kvm_dev_ioctl_get_cpuid() argument 1889 if (!array.entries) in kvm_dev_ioctl_get_cpuid() 1901 if (copy_to_user(entries, array.entries, in kvm_dev_ioctl_get_cpuid() [all …]
|
| /arch/sh/kernel/ |
| A D | stacktrace.c | 34 trace->entries[trace->nr_entries++] = addr; in save_stack_address() 66 trace->entries[trace->nr_entries++] = addr; in save_stack_address_nosched()
|
| /arch/openrisc/kernel/ |
| A D | stacktrace.c | 39 trace->entries[trace->nr_entries++] = addr; in save_stack_address() 65 trace->entries[trace->nr_entries++] = addr; in save_stack_address_nosched()
|
| /arch/sparc/kernel/ |
| A D | stacktrace.c | 58 trace->entries[trace->nr_entries++] = pc; in __save_stack_trace() 68 trace->entries[trace->nr_entries++] = pc; in __save_stack_trace()
|
| /arch/arm/kernel/ |
| A D | unwind.c | 65 int entries; /* number of entries left to interpret */ member 215 if (ctrl->entries <= 0) { in unwind_get_byte() 224 ctrl->entries--; in unwind_get_byte() 373 ctrl->entries = 0; in unwind_exec_insn() 473 ctrl.entries = 1; in unwind_frame() 476 ctrl.entries = 1 + ((*ctrl.insn & 0x00ff0000) >> 16); in unwind_frame() 495 while (ctrl.entries > 0) { in unwind_frame()
|
| /arch/mips/kernel/ |
| A D | stacktrace.c | 30 trace->entries[trace->nr_entries++] = addr; in save_raw_context_stack() 58 trace->entries[trace->nr_entries++] = pc; in save_context_stack()
|
| /arch/powerpc/sysdev/ |
| A D | fsl_rmu.c | 720 fsl_open_outb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries) in fsl_open_outb_mbox() argument 726 if ((entries < RIO_MIN_TX_RING_SIZE) || in fsl_open_outb_mbox() 727 (entries > RIO_MAX_TX_RING_SIZE) || (!is_power_of_2(entries))) { in fsl_open_outb_mbox() 734 rmu->msg_tx_ring.size = entries; in fsl_open_outb_mbox() 793 ((get_bitmask_order(entries) - 2) << 12)); in fsl_open_outb_mbox() 852 fsl_open_inb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries) in fsl_open_inb_mbox() argument 858 if ((entries < RIO_MIN_RX_RING_SIZE) || in fsl_open_inb_mbox() 859 (entries > RIO_MAX_RX_RING_SIZE) || (!is_power_of_2(entries))) { in fsl_open_inb_mbox() 866 rmu->msg_rx_ring.size = entries; in fsl_open_inb_mbox() 907 setbits32(&rmu->msg_regs->imr, (get_bitmask_order(entries) - 2) << 12); in fsl_open_inb_mbox()
|
| /arch/powerpc/boot/dts/ |
| A D | ps3.dts | 33 * dtc expects a clock-frequency and timebase-frequency entries, so 34 * we'll put a null entries here. These will be initialized after
|