Home
last modified time | relevance | path

Searched refs:tb (Results 1 – 25 of 29) sorted by relevance

12

/arch/sparc/mm/
A Dtlb.c28 if (!tb->tlb_nr) in flush_tlb_pending()
31 flush_tsb_user(tb); in flush_tlb_pending()
38 smp_flush_tlb_pending(tb->mm, tb->tlb_nr, in flush_tlb_pending()
42 tb->tlb_nr, &tb->vaddrs[0]); in flush_tlb_pending()
47 tb->tlb_nr = 0; in flush_tlb_pending()
59 tb->active = 1; in arch_enter_lazy_mmu_mode()
66 if (tb->tlb_nr) in arch_leave_lazy_mmu_mode()
68 tb->active = 0; in arch_leave_lazy_mmu_mode()
82 nr = tb->tlb_nr; in tlb_batch_add_one()
89 if (!tb->active) { in tlb_batch_add_one()
[all …]
A Dtsb.c88 for (i = 0; i < tb->tlb_nr; i++) in __flush_tsb_one()
113 for (i = 0; i < tb->tlb_nr; i++) in __flush_huge_tsb_one()
114 __flush_huge_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift, in __flush_huge_tsb_one()
119 void flush_tsb_user(struct tlb_batch *tb) in flush_tsb_user() argument
121 struct mm_struct *mm = tb->mm; in flush_tsb_user()
126 if (tb->hugepage_shift < REAL_HPAGE_SHIFT) { in flush_tsb_user()
131 if (tb->hugepage_shift == PAGE_SHIFT) in flush_tsb_user()
132 __flush_tsb_one(tb, PAGE_SHIFT, base, nentries); in flush_tsb_user()
135 __flush_huge_tsb_one(tb, PAGE_SHIFT, base, nentries, in flush_tsb_user()
136 tb->hugepage_shift); in flush_tsb_user()
[all …]
/arch/powerpc/kernel/
A Dsmp-tbsync.c25 volatile u64 tb; member
48 u64 tb; in smp_generic_take_timebase() local
63 tb = tbsync->tb; in smp_generic_take_timebase()
72 set_tb(tb >> 32, tb & 0xfffffffful); in smp_generic_take_timebase()
81 u64 tb; in start_contest() local
88 tb = get_tb() + 400; in start_contest()
89 tbsync->tb = tb + offset; in start_contest()
90 tbsync->mark = mark = tb + 400; in start_contest()
98 while (get_tb() <= tb) in start_contest()
A Dwatchdog.c149 u64 tb = get_tb(); in wd_lockup_ipi() local
204 u64 tb, last_reset; in watchdog_smp_panic() local
209 tb = get_tb(); in watchdog_smp_panic()
236 cpu, tb, last_reset, tb_to_ns(tb - last_reset) / 1000000); in watchdog_smp_panic()
348 u64 tb = get_tb(); in watchdog_timer_interrupt() local
350 per_cpu(wd_timer_tb, cpu) = tb; in watchdog_timer_interrupt()
373 u64 tb; in DEFINE_INTERRUPT_HANDLER_NMI() local
383 tb = get_tb(); in DEFINE_INTERRUPT_HANDLER_NMI()
458 u64 tb; in arch_touch_nmi_watchdog() local
463 tb = get_tb(); in arch_touch_nmi_watchdog()
[all …]
A Dtime.c157 static inline unsigned long read_spurr(unsigned long tb) in read_spurr() argument
163 return tb; in read_spurr()
/arch/powerpc/include/asm/
A Dcputime.h47 unsigned long tb = mftb(); in account_cpu_user_entry() local
50 acct->utime += (tb - acct->starttime_user); in account_cpu_user_entry()
51 acct->starttime = tb; in account_cpu_user_entry()
56 unsigned long tb = mftb(); in account_cpu_user_exit() local
59 acct->stime += (tb - acct->starttime); in account_cpu_user_exit()
60 acct->starttime_user = tb; in account_cpu_user_exit()
A Dkvm_book3s_64.h145 int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr, u64 *tb);
/arch/m68k/coldfire/
A Dintc-simr.c132 u16 pa, tb; in intc_irq_set_type() local
136 tb = 0x1; in intc_irq_set_type()
139 tb = 0x2; in intc_irq_set_type()
142 tb = 0x3; in intc_irq_set_type()
146 tb = 0; in intc_irq_set_type()
150 if (tb) in intc_irq_set_type()
155 pa = (pa & ~(0x3 << ebit)) | (tb << ebit); in intc_irq_set_type()
A Dintc-2.c148 u16 pa, tb; in intc_irq_set_type() local
152 tb = 0x1; in intc_irq_set_type()
155 tb = 0x2; in intc_irq_set_type()
158 tb = 0x3; in intc_irq_set_type()
162 tb = 0; in intc_irq_set_type()
166 if (tb) in intc_irq_set_type()
171 pa = (pa & ~(0x3 << (irq * 2))) | (tb << (irq * 2)); in intc_irq_set_type()
/arch/sparc/kernel/
A Dirq_64.c1003 tb->cpu_mondo_qmask); in sun4v_register_mondo_queues()
1005 tb->dev_mondo_qmask); in sun4v_register_mondo_queues()
1007 tb->resum_qmask); in sun4v_register_mondo_queues()
1009 tb->nonresum_qmask); in sun4v_register_mondo_queues()
1054 tb->cpu_list_pa = __pa(page); in init_cpu_send_mondo_info()
1066 alloc_one_queue(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask); in sun4v_init_mondo_queues()
1067 alloc_one_queue(&tb->dev_mondo_pa, tb->dev_mondo_qmask); in sun4v_init_mondo_queues()
1068 alloc_one_queue(&tb->resum_mondo_pa, tb->resum_qmask); in sun4v_init_mondo_queues()
1069 alloc_one_queue(&tb->resum_kernel_buf_pa, tb->resum_qmask); in sun4v_init_mondo_queues()
1070 alloc_one_queue(&tb->nonresum_mondo_pa, tb->nonresum_qmask); in sun4v_init_mondo_queues()
[all …]
A Dsmp_64.c295 struct trap_per_cpu *tb; in ldom_startcpu_cpuid() local
312 tb = &trap_block[cpu]; in ldom_startcpu_cpuid()
668 tb->cpu_list_pa, in hypervisor_xcall_deliver()
772 this_cpu, tot_cpus, tb->cpu_list_pa, tb->cpu_mondo_block_pa); in hypervisor_xcall_deliver()
786 struct trap_per_cpu *tb; in xcall_deliver() local
805 tb = &trap_block[this_cpu]; in xcall_deliver()
824 xcall_deliver_impl(tb, cnt); in xcall_deliver()
1300 tb->cpu_mondo_pa, 0); in cpu_play_dead()
1302 tb->dev_mondo_pa, 0); in cpu_play_dead()
1304 tb->resum_mondo_pa, 0); in cpu_play_dead()
[all …]
A Dmdesc.c1081 struct trap_per_cpu *tb) in get_mondo_data() argument
1090 get_one_mondo_bits(val, &tb->dev_mondo_qmask, 7, 8); in get_mondo_data()
1093 get_one_mondo_bits(val, &tb->resum_qmask, 6, 7); in get_mondo_data()
1096 get_one_mondo_bits(val, &tb->nonresum_qmask, 2, 2); in get_mondo_data()
1100 tb->cpu_mondo_qmask + 1, in get_mondo_data()
1101 tb->dev_mondo_qmask + 1, in get_mondo_data()
1102 tb->resum_qmask + 1, in get_mondo_data()
1103 tb->nonresum_qmask + 1); in get_mondo_data()
1184 struct trap_per_cpu *tb; in fill_in_one_cpu() local
1201 tb = &trap_block[cpuid]; in fill_in_one_cpu()
[all …]
A Dtraps_64.c2082 struct trap_per_cpu *tb; in sun4v_resum_error() local
2088 tb = &trap_block[cpu]; in sun4v_resum_error()
2089 paddr = tb->resum_kernel_buf_pa + offset; in sun4v_resum_error()
2200 struct trap_per_cpu *tb; in sun4v_nonresum_error() local
2206 tb = &trap_block[cpu]; in sun4v_nonresum_error()
2207 paddr = tb->nonresum_kernel_buf_pa + offset; in sun4v_nonresum_error()
/arch/powerpc/boot/
A D4xx.c322 u32 cpu, plb, opb, ebc, tb, uart0, uart1, m; in ibm440gp_fixup_clocks() local
346 tb = sys_clk; in ibm440gp_fixup_clocks()
349 tb = cpu; in ibm440gp_fixup_clocks()
368 dt_fixup_cpu_clocks(cpu, tb, 0); in ibm440gp_fixup_clocks()
409 u32 ccr1, tb = tmr_clk; in __ibm440eplike_fixup_clocks() local
452 if (tb == 0) { in __ibm440eplike_fixup_clocks()
457 tb = cpu; in __ibm440eplike_fixup_clocks()
459 dt_fixup_cpu_clocks(cpu, tb, 0); in __ibm440eplike_fixup_clocks()
A Ddevtree.c63 void dt_fixup_cpu_clocks(u32 cpu, u32 tb, u32 bus) in dt_fixup_cpu_clocks() argument
68 printf("CPU timebase-frequency <- 0x%x (%dMHz)\n\r", tb, MHZ(tb)); in dt_fixup_cpu_clocks()
74 setprop_val(devp, "timebase-frequency", cpu_to_be32(tb)); in dt_fixup_cpu_clocks()
79 timebase_period_ns = 1000000000 / tb; in dt_fixup_cpu_clocks()
/arch/powerpc/kvm/
A Dbook3s_hv_p9_entry.c224 u64 tb = mftb() - vc->tb_offset_applied; in accumulate_time() local
232 vcpu->arch.cur_tb_start = tb; in accumulate_time()
237 delta = tb - prev_tb; in accumulate_time()
554 hdec = time_limit - *tb; in kvmhv_vcpu_entry_p9()
597 u64 new_tb = *tb + vc->tb_offset; in kvmhv_vcpu_entry_p9()
603 *tb = new_tb; in kvmhv_vcpu_entry_p9()
848 *tb = mftb(); in kvmhv_vcpu_entry_p9()
849 vcpu->arch.dec_expires = dec + *tb; in kvmhv_vcpu_entry_p9()
852 u64 new_tb = *tb - vc->tb_offset_applied; in kvmhv_vcpu_entry_p9()
858 *tb = new_tb; in kvmhv_vcpu_entry_p9()
[all …]
A Dbook3s_64_mmu.c41 if (vcpu->arch.slb[i].tb) in kvmppc_mmu_book3s_64_find_slbe()
55 vcpu->arch.slb[i].tb ? 't' : ' ', in kvmppc_mmu_book3s_64_find_slbe()
65 return slbe->tb ? SID_SHIFT_1T : SID_SHIFT; in kvmppc_slb_sid_shift()
129 ssize = slbe->tb ? MMU_SEGSIZE_1T : MMU_SEGSIZE_256M; in kvmppc_mmu_book3s_64_get_pteg()
233 if (slbe->tb) in kvmppc_mmu_book3s_64_xlate()
381 slbe->tb = (rs & SLB_VSID_B_1T) ? 1 : 0; in kvmppc_mmu_book3s_64_slbmte()
382 slbe->esid = slbe->tb ? esid_1t : esid; in kvmppc_mmu_book3s_64_slbmte()
587 if (slb->tb) { in kvmppc_mmu_book3s_64_esid_to_vsid()
A Demulate.c63 u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb) in kvmppc_get_dec() argument
65 u64 jd = tb - vcpu->arch.dec_jiffies; in kvmppc_get_dec()
A Dbook3s_hv.c300 vc->preempt_tb = tb; in kvmppc_core_start_stolen()
2891 tb = *acc; in debugfs_timings_read()
2907 tb_to_ns(tb.tb_total), in debugfs_timings_read()
2908 tb_to_ns(tb.tb_min), in debugfs_timings_read()
2909 tb_to_ns(tb.tb_max)); in debugfs_timings_read()
3183 now = tb; in kvmppc_remove_runnable()
4291 *tb = mftb(); in kvmhv_vcpu_entry_nestedv2()
4399 *tb = mftb(); in kvmhv_vcpu_entry_p9_nested()
4423 if (*tb >= next_timer) in kvmhv_p9_guest_entry()
4879 u64 tb; in kvmhv_run_single_vcpu() local
[all …]
A Dbooke.c580 u64 tb, wdt_tb, wdt_ticks = 0; in watchdog_next_timeout() local
585 tb = get_tb(); in watchdog_next_timeout()
590 if (tb & wdt_tb) in watchdog_next_timeout()
593 wdt_ticks += wdt_tb - (tb & (wdt_tb - 1)); in watchdog_next_timeout()
1512 u64 tb = get_tb(); in get_sregs_base() local
1523 sregs->u.e.dec = kvmppc_get_dec(vcpu, tb); in get_sregs_base()
1524 sregs->u.e.tb = tb; in get_sregs_base()
/arch/um/kernel/
A Ddyn.lds.S47 .rel.tbss : { *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*) }
48 .rela.tbss : { *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*) }
125 .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
A Duml.lds.S98 .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
/arch/sparc/include/asm/
A Dtlbflush_64.h20 void flush_tsb_user(struct tlb_batch *tb);
/arch/x86/kernel/cpu/mce/
A Damd.c1140 static int allocate_threshold_blocks(unsigned int cpu, struct threshold_bank *tb, in allocate_threshold_blocks() argument
1187 if (tb->blocks) in allocate_threshold_blocks()
1188 list_add(&b->miscj, &tb->blocks->miscj); in allocate_threshold_blocks()
1190 tb->blocks = b; in allocate_threshold_blocks()
1192 err = kobject_init_and_add(&b->kobj, &threshold_ktype, tb->kobj, get_name(cpu, bank, b)); in allocate_threshold_blocks()
1200 err = allocate_threshold_blocks(cpu, tb, bank, block, address); in allocate_threshold_blocks()
/arch/powerpc/include/uapi/asm/
A Dkvm.h222 __u64 tb; member

Completed in 67 milliseconds

12