| /kernel/time/ |
| A D | timer.c | 964 if (WARN_ON_ONCE(time_before(base->next_expiry, base->clk))) in __forward_timer_base() 966 base->clk = base->next_expiry; in __forward_timer_base() 1006 return base; in lock_timer_base() 1068 clk = base->clk; in __mod_timer() 1517 __releases(&base->lock) __releases(&base->expiry_lock) in timer_sync_wait_running() 1518 __acquires(&base->expiry_lock) __acquires(&base->lock) in timer_sync_wait_running() 1809 unsigned long clk = base->clk = base->next_expiry; in collect_expired_timers() 1862 clk = base->clk; in timer_recalc_next_expiry() 2368 base->clk++; in __run_timers() 2505 base->next_expiry = base->clk + TIMER_NEXT_MAX_DELTA; in timers_prepare_cpu() [all …]
|
| A D | hrtimer.c | 169 base = READ_ONCE(timer->base); in lock_hrtimer_base() 172 if (likely(base == timer->base)) in lock_hrtimer_base() 234 return base; in get_target_base() 285 WRITE_ONCE(timer->base, base); in switch_hrtimer_base() 304 struct hrtimer_clock_base *base = timer->base; in lock_hrtimer_base() local 813 struct hrtimer_clock_base *base = timer->base; in hrtimer_reprogram() local 1091 base->cpu_base->active_bases |= 1 << base->index; in enqueue_hrtimer() 1597 int base; in __hrtimer_setup() local 1624 timer->base = &cpu_base->clock_base[base]; in __hrtimer_setup() 1686 base = READ_ONCE(timer->base); in hrtimer_active() [all …]
|
| A D | alarmtimer.c | 191 scoped_guard(spinlock_irqsave, &base->lock) in alarmtimer_fired() 192 alarmtimer_dequeue(base, alarm); in alarmtimer_fired() 346 alarmtimer_enqueue(base, alarm); in alarm_start() 372 guard(spinlock_irqsave)(&base->lock); in alarm_restart() 375 alarmtimer_enqueue(base, alarm); in alarm_restart() 394 alarmtimer_dequeue(base, alarm); in alarm_try_to_cancel() 465 struct alarm_base *base; in alarmtimer_freezerset() local 470 base = &alarm_bases[ALARM_REALTIME]; in alarmtimer_freezerset() 474 base = &alarm_bases[ALARM_BOOTTIME]; in alarmtimer_freezerset() 632 base->get_timespec(tp); in alarm_clock_get_timespec() [all …]
|
| A D | timekeeping.c | 419 struct tk_read_base *base = tkf->base; in update_fast_timekeeper() local 425 memcpy(base, tkr, sizeof(*base)); in update_fast_timekeeper() 431 memcpy(base + 1, base, sizeof(*base)); in update_fast_timekeeper() 819 base = tk->tkr_mono.base; in ktime_get() 932 base = tk->tkr_raw.base; in ktime_get_raw() 1211 base = READ_ONCE(cs->base); in convert_base_to_cs() 1212 if (!base || base->id != scv->cs_id) in convert_base_to_cs() 1234 base = READ_ONCE(cs->base); in convert_cs_to_base() 1235 if (!base || base->id != base_id) in convert_cs_to_base() 2552 base = tk->tkr_mono.base; in ktime_get_update_offsets_now() [all …]
|
| A D | timer_list.c | 60 print_active_timers(struct seq_file *m, struct hrtimer_clock_base *base, in print_active_timers() argument 73 raw_spin_lock_irqsave(&base->cpu_base->lock, flags); in print_active_timers() 75 curr = timerqueue_getnext(&base->active); in print_active_timers() 89 raw_spin_unlock_irqrestore(&base->cpu_base->lock, flags); in print_active_timers() 95 raw_spin_unlock_irqrestore(&base->cpu_base->lock, flags); in print_active_timers() 99 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now) in print_base() argument 101 SEQ_printf(m, " .base: %p\n", base); in print_base() 102 SEQ_printf(m, " .index: %d\n", base->index); in print_base() 106 SEQ_printf(m, " .get_time: %ps\n", base->get_time); in print_base() 109 (unsigned long long) ktime_to_ns(base->offset)); in print_base() [all …]
|
| A D | vsyscall.c | 18 static inline void fill_clock_configuration(struct vdso_clock *vc, const struct tk_read_base *base) in fill_clock_configuration() argument 20 vc->cycle_last = base->cycle_last; in fill_clock_configuration() 22 vc->max_cycles = base->clock->max_cycles; in fill_clock_configuration() 24 vc->mask = base->mask; in fill_clock_configuration() 25 vc->mult = base->mult; in fill_clock_configuration() 26 vc->shift = base->shift; in fill_clock_configuration()
|
| A D | posix-cpu-timers.c | 438 base->nextevt = 0; in trigger_base_recalc_expires() 454 struct posix_cputimer_base *base; in disarm_timer() local 459 base = timer_base(timer, p); in disarm_timer() 460 if (cpu_timer_getexpires(ctmr) == base->nextevt) in disarm_timer() 567 struct posix_cputimer_base *base = timer_base(timer, p); in arm_timer() local 572 if (!cpu_timer_enqueue(&base->tqhead, ctmr)) in arm_timer() 581 if (newexp < base->nextevt) in arm_timer() 582 base->nextevt = newexp; in arm_timer() 829 struct posix_cputimer_base *base = pct->bases; in collect_posix_cputimers() local 832 for (i = 0; i < CPUCLOCK_MAX; i++, base++) { in collect_posix_cputimers() [all …]
|
| /kernel/trace/ |
| A D | trace_probe_kernel.h | 40 static nokprobe_inline void set_data_loc(int ret, void *dest, void *__dest, void *base) in set_data_loc() argument 44 *(u32 *)dest = make_data_loc(ret, __dest - base); in set_data_loc() 52 fetch_store_string_user(unsigned long addr, void *dest, void *base) in fetch_store_string_user() argument 62 __dest = get_loc_data(dest, base); in fetch_store_string_user() 65 set_data_loc(ret, dest, __dest, base); in fetch_store_string_user() 75 fetch_store_string(unsigned long addr, void *dest, void *base) in fetch_store_string() argument 83 return fetch_store_string_user(addr, dest, base); in fetch_store_string() 89 __dest = get_loc_data(dest, base); in fetch_store_string() 96 set_data_loc(ret, dest, __dest, base); in fetch_store_string()
|
| A D | trace_probe_tmpl.h | 58 void *dest, void *base); 61 fetch_store_string(unsigned long addr, void *dest, void *base); 88 fetch_store_symstring(unsigned long addr, void *dest, void *base) in fetch_store_symstring() argument 96 __dest = get_loc_data(dest, base); in fetch_store_symstring() 124 void *dest, void *base) in process_fetch_insn_bottom() argument 183 ret = fetch_store_string(val + code->offset, dest, base); in process_fetch_insn_bottom() 187 ret = fetch_store_string_user(val + code->offset, dest, base); in process_fetch_insn_bottom() 191 ret = fetch_store_symstring(val + code->offset, dest, base); in process_fetch_insn_bottom() 258 void *base = data - header_size; in store_trace_args() local 268 *dl = make_data_loc(maxlen, dyndata - base); in store_trace_args() [all …]
|
| A D | trace_uprobe.c | 147 fetch_store_string(unsigned long addr, void *dest, void *base) in fetch_store_string() argument 152 u8 *dst = get_loc_data(dest, base); in fetch_store_string() 172 *(u32 *)dest = make_data_loc(ret, (void *)dst - base); in fetch_store_string() 174 *(u32 *)dest = make_data_loc(0, (void *)dst - base); in fetch_store_string() 180 fetch_store_string_user(unsigned long addr, void *dest, void *base) in fetch_store_string_user() argument 182 return fetch_store_string(addr, dest, base); in fetch_store_string_user() 220 void *dest, void *base) in process_fetch_insn() argument 253 return process_fetch_insn_bottom(code, val, dest, base); in process_fetch_insn()
|
| /kernel/locking/ |
| A D | mutex.c | 297 ww = container_of(lock, struct ww_mutex, base); in ww_mutex_spin_on_owner() 554 mutex_unlock(&lock->base); in ww_mutex_unlock() 579 ww = container_of(lock, struct ww_mutex, base); in __mutex_lock_common() 786 return mutex_trylock(&ww->base); in ww_mutex_trylock() 788 MUTEX_WARN_ON(ww->base.magic != &ww->base); in ww_mutex_trylock() 798 if (__mutex_trylock(&ww->base)) { in ww_mutex_trylock() 885 ret = __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, in ww_mutex_lock() 900 ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, in ww_mutex_lock_interruptible() 1075 return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0, in __ww_mutex_lock_interruptible_slowpath() 1123 if (__mutex_trylock_fast(&lock->base)) { in ww_mutex_lock() [all …]
|
| A D | ww_mutex.h | 421 if (likely(!__ww_mutex_has_waiters(&lock->base))) in ww_mutex_set_context_fastpath() 428 lock_wait_lock(&lock->base, &flags); in ww_mutex_set_context_fastpath() 429 __ww_mutex_check_waiters(&lock->base, ctx, &wake_q); in ww_mutex_set_context_fastpath() 431 unlock_wait_lock(&lock->base, &flags); in ww_mutex_set_context_fastpath() 443 ww = container_of(lock, struct ww_mutex, base); in __ww_mutex_kill() 468 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); in __ww_mutex_check_kill() 570 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); in __ww_mutex_add_waiter()
|
| A D | ww_rt_mutex.c | 14 struct rt_mutex *rtm = &lock->base; in ww_mutex_trylock() 42 struct rt_mutex *rtm = &lock->base; in __ww_rt_mutex_lock() 94 struct rt_mutex *rtm = &lock->base; in ww_mutex_unlock()
|
| A D | qspinlock.h | 72 struct mcs_spinlock *grab_mcs_node(struct mcs_spinlock *base, int idx) in grab_mcs_node() argument 74 return &((struct qnode *)base + idx)->mcs; in grab_mcs_node()
|
| /kernel/module/ |
| A D | kdb.c | 44 kdb_printf(" 0x%px", mod->mem[MOD_TEXT].base); in kdb_lsmod() 45 kdb_printf("/0x%px", mod->mem[MOD_RODATA].base); in kdb_lsmod() 46 kdb_printf("/0x%px", mod->mem[MOD_RO_AFTER_INIT].base); in kdb_lsmod() 47 kdb_printf("/0x%px", mod->mem[MOD_DATA].base); in kdb_lsmod()
|
| A D | strict_rwx.c | 20 if (!mod_mem->base) in module_set_memory() 23 set_vm_flush_reset_perms(mod_mem->base); in module_set_memory() 24 return set_memory((unsigned long)mod_mem->base, mod_mem->size >> PAGE_SHIFT); in module_set_memory() 40 ret = execmem_restore_rox(mem->base, mem->size); in module_enable_text_rox()
|
| A D | main.c | 100 unsigned long min = (unsigned long)base; in __mod_update_bounds() 332 #define symversion(base, idx) NULL argument 334 #define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL) argument 1346 mod->mem[type].base = ptr; in module_memory_alloc() 1357 execmem_restore_rox(mem->base, mem->size); in module_memory_restore_rox() 1365 execmem_free(mem->base); in module_memory_free() 2732 mod->mem[type].base = NULL; in move_module() 2774 dest = mod->mem[type].base + offset; in move_module() 2836 flush_icache_range((unsigned long)mod_mem->base, in flush_module_icache() 3089 mod->mem[type].base = NULL; in do_init_module() [all …]
|
| A D | debug_kmemleak.c | 19 kmemleak_no_scan(mod->mem[type].base); in kmemleak_load_module()
|
| A D | kallsyms.c | 178 void *data_base = mod->mem[MOD_DATA].base; in add_kallsyms() 179 void *init_data_base = mod->mem[MOD_INIT_DATA].base; in add_kallsyms() 269 nextval = (unsigned long)mod_mem->base + mod_mem->size; in find_kallsyms_symbol()
|
| /kernel/irq/ |
| A D | devres.c | 183 int base; in __devm_irq_alloc_descs() local 189 base = __irq_alloc_descs(irq, from, cnt, node, owner, affinity); in __devm_irq_alloc_descs() 190 if (base < 0) { in __devm_irq_alloc_descs() 192 return base; in __devm_irq_alloc_descs() 195 dr->from = base; in __devm_irq_alloc_descs() 199 return base; in __devm_irq_alloc_descs()
|
| A D | ipi.c | 290 unsigned int base = data->irq; in __ipi_send_mask() local 293 unsigned irq = base + cpu - data->common->ipi_offset; in __ipi_send_mask()
|
| /kernel/dma/ |
| A D | contiguous.c | 255 dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) in dma_contiguous_early_fixup() argument 276 int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, in dma_contiguous_reserve_area() argument 282 ret = cma_declare_contiguous(base, size, limit, 0, 0, fixed, in dma_contiguous_reserve_area() 476 if (!IS_ALIGNED(rmem->base | rmem->size, CMA_MIN_ALIGNMENT_BYTES)) { in rmem_cma_setup() 481 err = cma_init_reserved_mem(rmem->base, rmem->size, 0, rmem->name, &cma); in rmem_cma_setup() 487 dma_contiguous_early_fixup(rmem->base, rmem->size); in rmem_cma_setup() 496 &rmem->base, (unsigned long)rmem->size / SZ_1M); in rmem_cma_setup()
|
| A D | coherent.c | 342 mem = dma_init_coherent_memory(rmem->base, rmem->base, in rmem_dma_device_init() 388 dma_reserved_default_memory_base = rmem->base; in rmem_dma_setup() 395 &rmem->base, (unsigned long)rmem->size / SZ_1M); in rmem_dma_setup()
|
| /kernel/ |
| A D | resource.c | 1882 return ALIGN(max(base->start, align), align); in gfr_start() 1889 return addr > size && addr >= base->start; in gfr_continue() 1916 get_free_mem_region(struct device *dev, struct resource *base, in get_free_mem_region() argument 1944 for (addr = gfr_start(base, size, align, flags); in get_free_mem_region() 1945 gfr_continue(base, addr, align, flags); in get_free_mem_region() 1979 if (__insert_resource(base, res) || res->child) in get_free_mem_region() 2010 struct resource *base, unsigned long size) in devm_request_free_mem_region() argument 2014 return get_free_mem_region(dev, base, size, GFR_DEFAULT_ALIGN, in devm_request_free_mem_region() 2020 struct resource *request_free_mem_region(struct resource *base, in request_free_mem_region() argument 2042 struct resource *alloc_free_mem_region(struct resource *base, in alloc_free_mem_region() argument [all …]
|
| /kernel/gcov/ |
| A D | Makefile | 4 obj-y := base.o fs.o
|