/kernel/bpf/ |
A D | disasm.c | 143 insn->code, insn->dst_reg, in print_bpf_end_insn() 159 return (BPF_OP(insn->code) == BPF_DIV || BPF_OP(insn->code) == BPF_MOD) && in is_sdiv_smod() 231 insn->code, in print_bpf_insn() 239 insn->code, in print_bpf_insn() 258 insn->code, in print_bpf_insn() 279 insn->code, in print_bpf_insn() 288 insn->code, in print_bpf_insn() 298 if (BPF_MODE(insn->code) != BPF_MEM && BPF_MODE(insn->code) != BPF_MEMSX) { in print_bpf_insn() 311 insn->code, in print_bpf_insn() 316 insn->code, in print_bpf_insn() [all …]
|
A D | core.c | 327 dst[i].code == 0 && in bpf_prog_calc_tag() 424 u8 code; in bpf_adj_branches() local 441 code = insn->code; in bpf_adj_branches() 442 if ((BPF_CLASS(code) != BPF_JMP && in bpf_adj_branches() 444 BPF_OP(code) == BPF_EXIT) in bpf_adj_branches() 447 if (BPF_OP(code) == BPF_CALL) { in bpf_adj_branches() 1346 switch (from->code) { in bpf_jit_blind_insn() 1519 insn[1].code == 0) in bpf_jit_blind_constants() 1727 return public_insntable[code]; in bpf_opcode_in_insntable() 1768 goto *jumptable[insn->code]; in ___bpf_prog_run() [all …]
|
A D | verifier.c | 3499 u8 code = insn->code; in jmp_offset() local 3517 u8 code = insn[i].code; in check_subprogs() local 3530 if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL) in check_subprogs() 3671 code = insn->code; in is_reg64() 20801 u8 code = insn->code; in adjust_jmp_off() local 20807 BPF_OP(code) == BPF_CALL || BPF_OP(code) == BPF_EXIT) in adjust_jmp_off() 21125 code = insn.code; in opt_subreg_zext_lo32_rnd_hi32() 21406 insn->code = BPF_CLASS(insn->code) | BPF_PROBE_MEM32 | BPF_SIZE(insn->code); in convert_ctx_accesses() 22056 insn->code = BPF_ALU | BPF_OP(insn->code) | BPF_SRC(insn->code); in do_misc_fixups() 22264 insn->code = insn->code == code_add ? in do_misc_fixups() [all …]
|
A D | syscall.c | 4777 u8 code; in bpf_insn_prepare_dump() local 4786 code = insns[i].code; in bpf_insn_prepare_dump() 4789 insns[i].code = BPF_JMP | BPF_CALL; in bpf_insn_prepare_dump() 4793 if (code == (BPF_JMP | BPF_CALL) || in bpf_insn_prepare_dump() 4795 if (code == (BPF_JMP | BPF_CALL_ARGS)) in bpf_insn_prepare_dump() 4796 insns[i].code = BPF_JMP | BPF_CALL; in bpf_insn_prepare_dump() 4801 if (BPF_CLASS(code) == BPF_LDX && BPF_MODE(code) == BPF_PROBE_MEM) { in bpf_insn_prepare_dump() 4802 insns[i].code = BPF_LDX | BPF_SIZE(code) | BPF_MEM; in bpf_insn_prepare_dump() 4806 if ((BPF_CLASS(code) == BPF_LDX || BPF_CLASS(code) == BPF_STX || in bpf_insn_prepare_dump() 4807 BPF_CLASS(code) == BPF_ST) && BPF_MODE(code) == BPF_PROBE_MEM32) { in bpf_insn_prepare_dump() [all …]
|
A D | cgroup.c | 2430 BPF_SIZE(si->code), si->dst_reg, si->src_reg, in sysctl_convert_ctx_access() 2457 BPF_CLASS(si->code) | BPF_MEM | BPF_SIZEOF(u32), in sysctl_convert_ctx_access() 2470 read_size = bpf_size_to_bytes(BPF_SIZE(si->code)); in sysctl_convert_ctx_access() 2472 BPF_SIZE(si->code), si->dst_reg, si->dst_reg, in sysctl_convert_ctx_access() 2614 BPF_MEM | BPF_CLASS(si->code)), \ 2667 *insn++ = BPF_RAW_INSN(BPF_CLASS(si->code) | BPF_MEM | in cg_sockopt_convert_ctx_access()
|
/kernel/trace/ |
A D | trace_probe_tmpl.h | 9 switch (code->size) { in fetch_store_raw() 146 code++; in process_fetch_insn_bottom() 149 s3 = code; in process_fetch_insn_bottom() 156 code++; in process_fetch_insn_bottom() 160 code++; in process_fetch_insn_bottom() 164 code++; in process_fetch_insn_bottom() 196 code++; in process_fetch_insn_bottom() 201 code++; in process_fetch_insn_bottom() 211 code = s3; in process_fetch_insn_bottom() 218 code--; in process_fetch_insn_bottom() [all …]
|
A D | trace_probe.c | 746 code++; in parse_btf_bitfield() 919 struct fetch_insn *code = &earg->code[i]; in store_trace_entry_data() local 1275 code++; in __parse_bitfield_probe_arg() 1541 for (; code < tmp + FETCH_INSN_MAX; code++) in traceprobe_parse_probe_arg_body() 1545 parg->code = kcalloc(code - tmp + 1, sizeof(*code), GFP_KERNEL); in traceprobe_parse_probe_arg_body() 1549 memcpy(parg->code, tmp, sizeof(*code) * (code - tmp + 1)); in traceprobe_parse_probe_arg_body() 1553 for (code = tmp; code < tmp + FETCH_INSN_MAX; code++) in traceprobe_parse_probe_arg_body() 1647 struct fetch_insn *code = arg->code; in traceprobe_free_probe_arg() local 1649 while (code && code->op != FETCH_OP_END) { in traceprobe_free_probe_arg() 1862 struct fetch_insn *code = arg->code; in traceprobe_update_arg() local [all …]
|
A D | trace_eprobe.c | 363 struct fetch_insn *code; in get_eprobe_size() local 372 code = arg->code; in get_eprobe_size() 374 switch (code->op) { in get_eprobe_size() 376 val = get_event_field(code, rec); in get_eprobe_size() 379 code++; in get_eprobe_size() 385 code++; in get_eprobe_size() 406 switch (code->op) { in process_fetch_insn() 408 val = get_event_field(code, rec); in process_fetch_insn() 411 code++; in process_fetch_insn() 414 ret = process_common_fetch_insn(code, &val); in process_fetch_insn() [all …]
|
A D | trace_fprobe.c | 286 switch (code->op) { in process_fetch_insn() 288 val = ftrace_regs_get_kernel_stack_nth(fregs, code->param); in process_fetch_insn() 298 val = ftrace_regs_get_argument(fregs, code->param); in process_fetch_insn() 301 val = *(unsigned long *)((unsigned long)edata + code->offset); in process_fetch_insn() 305 code++; in process_fetch_insn() 308 ret = process_common_fetch_insn(code, &val); in process_fetch_insn() 312 code++; in process_fetch_insn() 314 return process_fetch_insn_bottom(code, val, dest, base); in process_fetch_insn() 372 struct fetch_insn *code = &earg->code[i]; in store_fprobe_entry_data() local 374 switch (code->op) { in store_fprobe_entry_data() [all …]
|
A D | trace_kprobe.c | 1418 process_fetch_insn(struct fetch_insn *code, void *rec, void *edata, in process_fetch_insn() argument 1427 switch (code->op) { in process_fetch_insn() 1429 val = regs_get_register(regs, code->param); in process_fetch_insn() 1432 val = regs_get_kernel_stack_nth(regs, code->param); in process_fetch_insn() 1442 val = regs_get_kernel_argument(regs, code->param); in process_fetch_insn() 1445 val = *(unsigned long *)((unsigned long)edata + code->offset); in process_fetch_insn() 1449 code++; in process_fetch_insn() 1452 ret = process_common_fetch_insn(code, &val); in process_fetch_insn() 1456 code++; in process_fetch_insn() 1458 return process_fetch_insn_bottom(code, val, dest, base); in process_fetch_insn()
|
A D | trace_uprobe.c | 219 process_fetch_insn(struct fetch_insn *code, void *rec, void *edata, in process_fetch_insn() argument 227 switch (code->op) { in process_fetch_insn() 229 val = regs_get_register(regs, code->param); in process_fetch_insn() 232 val = get_user_stack_nth(regs, code->param); in process_fetch_insn() 244 val = translate_user_vaddr(code->immediate); in process_fetch_insn() 247 ret = process_common_fetch_insn(code, &val); in process_fetch_insn() 251 code++; in process_fetch_insn() 253 return process_fetch_insn_bottom(code, val, dest, base); in process_fetch_insn()
|
A D | trace_probe.h | 229 struct fetch_insn *code; member 240 struct fetch_insn *code; member
|
/kernel/ |
A D | Kconfig.preempt | 43 "explicit preemption points" to the kernel code. These new 62 all kernel code (that is not executing in a critical section) 69 and a slight runtime overhead to kernel code. 99 low level and critical code paths (entry code, scheduler, low
|
A D | seccomp.c | 283 u16 code = ftest->code; in seccomp_check_filter() local 286 switch (code) { in seccomp_check_filter() 288 ftest->code = BPF_LDX | BPF_W | BPF_ABS; in seccomp_check_filter() 294 ftest->code = BPF_LD | BPF_IMM; in seccomp_check_filter() 298 ftest->code = BPF_LDX | BPF_IMM; in seccomp_check_filter() 771 u16 code = insn->code; in seccomp_is_const_allow() local 774 switch (code) { in seccomp_is_const_allow() 798 switch (BPF_OP(code)) { in seccomp_is_const_allow()
|
A D | jump_label.c | 70 jea->code = jeb->code - delta; in jump_label_swap() 74 jeb->code = tmp.code + delta; in jump_label_swap()
|
A D | exit.c | 863 static void synchronize_group_exit(struct task_struct *tsk, long code) in synchronize_group_exit() argument 874 signal->group_exit_code = code; in synchronize_group_exit() 892 void __noreturn do_exit(long code) in do_exit() argument 903 synchronize_group_exit(tsk, code); in do_exit() 904 ptrace_event(PTRACE_EVENT_EXIT, code); in do_exit() 921 tsk->signal->group_exit_code ?: (int)code); in do_exit() 930 acct_collect(code, group_dead); in do_exit() 935 tsk->exit_code = code; in do_exit()
|
A D | signal.c | 1697 info.si_code = code; in force_sig_fault_to_task() 1702 int force_sig_fault(int sig, int code, void __user *addr) in force_sig_fault() argument 1704 return force_sig_fault_to_task(sig, code, addr, current); in force_sig_fault() 1714 info.si_code = code; in send_sig_fault() 1719 int force_sig_mceerr(int code, void __user *addr, short lsb) in force_sig_mceerr() argument 1723 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR)); in force_sig_mceerr() 1727 info.si_code = code; in force_sig_mceerr() 1737 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR)); in send_sig_mceerr() 1741 info.si_code = code; in send_sig_mceerr() 1851 info.si_code = code; in force_sig_fault_trapno() [all …]
|
A D | auditsc.c | 1870 int success, long code) in audit_return_fixup() argument 1879 if (unlikely(code <= -ERESTARTSYS) && in audit_return_fixup() 1880 (code >= -ERESTART_RESTARTBLOCK) && in audit_return_fixup() 1881 (code != -ENOIOCTLCMD)) in audit_return_fixup() 1884 ctx->return_code = code; in audit_return_fixup() 1933 void __audit_uring_exit(int success, long code) in __audit_uring_exit() argument 1943 audit_return_fixup(ctx, success, code); in __audit_uring_exit() 2997 void audit_seccomp(unsigned long syscall, long signr, int code) in audit_seccomp() argument 3007 in_compat_syscall(), KSTK_EIP(current), code); in audit_seccomp()
|
A D | Kconfig.kexec | 33 is properly shutdown, so do not be surprised if this code does not 95 code in physical address mode via KEXEC
|
/kernel/gcov/ |
A D | Kconfig | 11 This option enables gcov-based code profiling (e.g. for code coverage
|
/kernel/power/ |
A D | Kconfig | 173 This option changes the behavior of various sleep-sensitive code to deal 176 Saying Y here, disables code paths that most users really should keep 223 code. This is helpful when debugging and reporting PM bugs, like 287 This enables code to save the last PM event point across 291 The architecture specific code must provide the extern 305 This enables some cheesy code to save the last PM event point in the
|
/kernel/livepatch/ |
A D | Kconfig | 20 to new function code contained in the patch module.
|
/kernel/module/ |
A D | Kconfig | 7 Kernel modules are small pieces of compiled code which can 66 Module autoloading allows in-kernel code to request modules through 181 bool "genksyms (from source code)" 183 Calculate symbol versions from pre-processed source code using 425 When kernel code requests a module, it does so by calling 443 (especially when using LTO) for optimizing the code and reducing
|
/kernel/rcu/ |
A D | Kconfig | 87 This option enables generic infrastructure code supporting 136 CPU hotplug code paths. It can force IPIs on online CPUs, 148 This option enables RCU CPU stall code that is common between 171 code paths on small(er) systems.
|
/kernel/dma/ |
A D | Kconfig | 15 # IOMMU drivers that can bypass the IOMMU code and optionally use the direct 152 # Fallback to arch code for DMA allocations. This should eventually go away.
|