| /arch/x86/kernel/ |
| A D | irq_32.c | 69 static inline bool execute_on_irq_stack(bool overflow, struct irq_desc *desc) in execute_on_irq_stack() argument 92 if (unlikely(overflow)) in execute_on_irq_stack() 150 bool overflow = check_stack_overflow(); in __handle_irq() local 152 if (user_mode(regs) || !execute_on_irq_stack(overflow, desc)) { in __handle_irq() 153 if (unlikely(overflow)) in __handle_irq()
|
| A D | module.c | 126 goto overflow; in __write_relocate_add() 131 goto overflow; in __write_relocate_add() 181 overflow: in __write_relocate_add()
|
| A D | machine_kexec_64.c | 561 goto overflow; in arch_kexec_apply_relocations_add() 566 goto overflow; in arch_kexec_apply_relocations_add() 581 overflow: in arch_kexec_apply_relocations_add()
|
| /arch/m68k/fpsp040/ |
| A D | x_ovfl.S | 4 | fpsp_ovfl --- FPSP handler for overflow exception 23 | RM For pos overflow, the largest pos number. For neg overflow, 25 | RP For pos overflow, +infinity. For neg overflow, the largest
|
| A D | util.S | 6 | ovf_res: used by overflow to force the correct 157 | ovf_r_x3 --- overflow result calculation 173 | ovf_res --- overflow result calculation 199 tstb LOCAL_SGN(%a0) |if negative overflow 209 tstb LOCAL_SGN(%a0) |if negative overflow 230 tstb LOCAL_SGN(%a0) |if negative overflow 240 tstb LOCAL_SGN(%a0) |if negative overflow 261 tstb LOCAL_SGN(%a0) |if negative overflow 271 tstb LOCAL_SGN(%a0) |if negative overflow 283 tstb LOCAL_SGN(%a0) |if negative overflow [all …]
|
| A D | scosh.S | 34 | However, invoking exp(|X|) may cause premature overflow. 42 | 5. (|X| > 16480 log2) sinh(X) must overflow. Return 43 | Huge*Huge to generate overflow and an infinity with
|
| A D | ssinh.S | 34 | However, invoking exp(|X|) may cause premature overflow. 43 | 5. (|X| > 16480 log2) sinh(X) must overflow. Return 44 | sign(X)*Huge*Huge to generate overflow and an infinity with
|
| A D | scale.S | 6 | the source operand is (>= 2^14) an overflow or underflow 94 | The result can be denormalized, if src = 0, or overflow, 102 cmpil #0x7fff,%d1 |test for overflow
|
| /arch/m68k/mvme147/ |
| A D | config.c | 165 u8 overflow, tmp; in mvme147_read_clk() local 172 overflow = m147_pcc->t1_cntrl >> 4; in mvme147_read_clk() 173 if (overflow != tmp) in mvme147_read_clk() 176 ticks = count + overflow * PCC_TIMER_CYCLES; in mvme147_read_clk()
|
| /arch/s390/kernel/ |
| A D | perf_cpum_sf.c | 1017 int overflow; in perf_push_sample() local 1071 overflow = 0; in perf_push_sample() 1077 return overflow; in perf_push_sample() 1127 if (!*overflow) { in hw_collect_samples() 1136 *overflow += 1; in hw_collect_samples() 1200 if (te->header.overflow) in hw_perf_event_update() 1220 new.overflow = 0; in hw_perf_event_update() 1394 *overflow = prev.overflow; in aux_set_alert() 1404 new.overflow = 0; in aux_set_alert() 1469 new.overflow = 0; in aux_reset_buffer() [all …]
|
| A D | perf_pai_ext.c | 485 int overflow; in paiext_push_sample() local 508 overflow = perf_event_overflow(event, &data, ®s); in paiext_push_sample() 513 return overflow; in paiext_push_sample()
|
| A D | perf_pai_crypto.c | 458 int overflow; in paicrypt_push_sample() local 483 overflow = perf_event_overflow(event, &data, ®s); in paicrypt_push_sample() 487 return overflow; in paicrypt_push_sample()
|
| /arch/loongarch/kernel/ |
| A D | module.c | 180 goto overflow; in apply_r_larch_sop_imm_field() 187 goto overflow; in apply_r_larch_sop_imm_field() 193 goto overflow; in apply_r_larch_sop_imm_field() 202 goto overflow; in apply_r_larch_sop_imm_field() 208 goto overflow; in apply_r_larch_sop_imm_field() 217 goto overflow; in apply_r_larch_sop_imm_field() 228 goto overflow; in apply_r_larch_sop_imm_field() 236 goto overflow; in apply_r_larch_sop_imm_field() 246 overflow: in apply_r_larch_sop_imm_field()
|
| A D | perf_event.c | 133 u64 overflow; member 326 local64_set(&hwc->prev_count, loongarch_pmu.overflow - left); in loongarch_pmu_event_set_period() 328 loongarch_pmu.write_counter(idx, loongarch_pmu.overflow - left); in loongarch_pmu_event_set_period() 510 if (counter & loongarch_pmu.overflow) { in pmu_handle_irq() 859 loongarch_pmu.overflow = 1ULL << 63; in init_hw_perf_events()
|
| /arch/m68k/mvme16x/ |
| A D | config.c | 430 u8 overflow, tmp; in mvme16x_read_clk() local 436 overflow = in_8(PCCTOVR1) >> 4; in mvme16x_read_clk() 437 if (overflow != tmp) in mvme16x_read_clk() 439 ticks += overflow * PCC_TIMER_CYCLES; in mvme16x_read_clk()
|
| /arch/arm64/kernel/vdso32/ |
| A D | Makefile | 72 VDSO_CFLAGS += -fno-strict-overflow 84 VDSO_CFLAGS += $(call cc32-disable-warning,shift-count-overflow)
|
| /arch/arc/kernel/ |
| A D | perf_event.c | 411 int overflow = 0; in arc_pmu_event_set_period() local 419 overflow = 1; in arc_pmu_event_set_period() 425 overflow = 1; in arc_pmu_event_set_period() 443 return overflow; in arc_pmu_event_set_period()
|
| /arch/x86/events/amd/ |
| A D | ibs.c | 106 int overflow = 0; in perf_event_set_period() local 115 overflow = 1; in perf_event_set_period() 122 overflow = 1; in perf_event_set_period() 141 return overflow; in perf_event_set_period() 378 int overflow; in perf_ibs_set_period() local 381 overflow = perf_event_set_period(hwc, perf_ibs->min_period, in perf_ibs_set_period() 385 return overflow; in perf_ibs_set_period()
|
| /arch/arm/boot/dts/aspeed/ |
| A D | aspeed-bmc-ibm-system1.dts | 225 * Use small nominator to prevent integer overflow. 238 * Use small nominator to prevent integer overflow. 251 * Use small nominator to prevent integer overflow. 264 * Use small nominator to prevent integer overflow. 277 * Use small nominator to prevent integer overflow. 290 * Use small nominator to prevent integer overflow. 303 * Use small nominator to prevent integer overflow.
|
| /arch/arm64/kvm/ |
| A D | pmu-emul.c | 399 bool overflow; in kvm_pmu_update_state() local 401 overflow = kvm_pmu_overflow_status(vcpu); in kvm_pmu_update_state() 402 if (pmu->irq_level == overflow) in kvm_pmu_update_state() 405 pmu->irq_level = overflow; in kvm_pmu_update_state() 409 pmu->irq_num, overflow, pmu); in kvm_pmu_update_state()
|
| /arch/riscv/ |
| A D | Kconfig.errata | 114 The T-Head C9xx cores implement a PMU overflow extension very 117 This will apply the overflow errata to handle the non-standard
|
| /arch/mips/kernel/ |
| A D | perf_event_mipsxx.c | 79 u64 overflow; member 436 local64_set(&hwc->prev_count, mipspmu.overflow - left); in mipspmu_event_set_period() 442 mipspmu.write_counter(idx, mipspmu.overflow - left); in mipspmu_event_set_period() 1604 if (!(counter & mipspmu.overflow)) in mipsxx_pmu_handle_shared_irq() 2027 mipspmu.overflow = 1ULL << 47; in init_hw_perf_events() 2032 mipspmu.overflow = 1ULL << 63; in init_hw_perf_events() 2040 mipspmu.overflow = 1ULL << 31; in init_hw_perf_events()
|
| /arch/arm64/kernel/ |
| A D | module.c | 435 goto overflow; in apply_relocate_add() 441 overflow: in apply_relocate_add()
|
| /arch/arm/kernel/ |
| A D | sleep.S | 71 @ Run the suspend code from the overflow stack so we don't have to rely
|
| /arch/s390/include/asm/ |
| A D | cpu_mf.h | 143 unsigned long long overflow; /* 64 - Overflow Count */ member
|