| /kernel/livepatch/ |
| A D | patch.c | 27 struct klp_ops *ops; in klp_find_ops() local 34 return ops; in klp_find_ops() 45 struct klp_ops *ops; in klp_ftrace_handler() local 129 struct klp_ops *ops; in klp_unpatch_func() local 137 if (WARN_ON(!ops)) in klp_unpatch_func() 152 kfree(ops); in klp_unpatch_func() 162 struct klp_ops *ops; in klp_patch_func() local 172 if (!ops) { in klp_patch_func() 182 ops = kzalloc(sizeof(*ops), GFP_KERNEL); in klp_patch_func() 183 if (!ops) in klp_patch_func() [all …]
|
| /kernel/dma/ |
| A D | mapping.c | 126 if (likely(!ops)) in dma_go_direct() 475 else if (!ops->sync_single_for_device && !ops->sync_single_for_cpu && in dma_setup_need_sync() 476 !ops->sync_sg_for_device && !ops->sync_sg_for_cpu) in dma_setup_need_sync() 579 if (!ops->mmap) in dma_mmap_attrs() 680 else if (ops->free) in dma_free_attrs() 863 if (WARN_ON(ops)) in dma_supported() 872 if (ops) { in dma_supported() 892 return !ops; in dma_pci_p2pdma_supported() 971 else if (ops && ops->max_mapping_size) in dma_max_mapping_size() 985 else if (ops && ops->opt_mapping_size) in dma_opt_mapping_size() [all …]
|
| A D | ops_helpers.c | 65 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_common_alloc_pages() local 78 *dma_handle = ops->map_page(dev, page, 0, size, dir, in dma_common_alloc_pages() 92 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_common_free_pages() local 97 else if (ops->unmap_page) in dma_common_free_pages() 98 ops->unmap_page(dev, dma_handle, size, dir, in dma_common_free_pages()
|
| /kernel/trace/ |
| A D | ftrace.c | 166 ops->func_hash = &ops->local_hash; in ftrace_ops_init() 361 ops->saved_func = ops->func; in __register_ftrace_function() 389 ops->func = ops->saved_func; in __unregister_ftrace_function() 1692 ops != &ftrace_list_end; ops = ops->next) { in test_rec_ops_needs_regs() 1869 ftrace_ops_get_func(ops) == ops->func) in __ftrace_hash_rec_update() 4472 ops, ops->func); in t_show() 4639 iter->ops = ops; in ftrace_regex_open() 5980 if (ops->func || ops->trampoline) in register_ftrace_direct() 7351 for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) { in referenced_filters() 7400 if (!tr->ops || !tr->ops->func_hash) in clear_mod_from_hashes() [all …]
|
| A D | trace_functions.c | 58 struct ftrace_ops *ops; in ftrace_allocate_ftrace_ops() local 64 ops = kzalloc(sizeof(*ops), GFP_KERNEL); in ftrace_allocate_ftrace_ops() 65 if (!ops) in ftrace_allocate_ftrace_ops() 72 tr->ops = ops; in ftrace_allocate_ftrace_ops() 73 ops->private = tr; in ftrace_allocate_ftrace_ops() 80 kfree(tr->ops); in ftrace_free_ftrace_ops() 81 tr->ops = NULL; in ftrace_free_ftrace_ops() 95 if (!tr->ops) in ftrace_create_function_files() 100 kfree(tr->ops); in ftrace_create_function_files() 154 if (!tr->ops) in function_trace_init() [all …]
|
| A D | ftrace_internal.h | 5 int __register_ftrace_function(struct ftrace_ops *ops); 6 int __unregister_ftrace_function(struct ftrace_ops *ops); 15 int ftrace_startup(struct ftrace_ops *ops, int command); 16 int ftrace_shutdown(struct ftrace_ops *ops, int command); 24 # define ftrace_startup(ops, command) \ argument 26 int ___ret = __register_ftrace_function(ops); \ 28 (ops)->flags |= FTRACE_OPS_FL_ENABLED; \ 31 # define ftrace_shutdown(ops, command) \ argument 33 int ___ret = __unregister_ftrace_function(ops); \ 35 (ops)->flags &= ~FTRACE_OPS_FL_ENABLED; \ [all …]
|
| A D | trace_dynevent.c | 61 if (!ops || !ops->create || !ops->show || !ops->is_busy || in dyn_event_register() 62 !ops->free || !ops->match) in dyn_event_register() 65 INIT_LIST_HEAD(&ops->list); in dyn_event_register() 104 if (type && type != pos->ops) in dyn_event_release() 110 ret = pos->ops->free(pos); in dyn_event_release() 177 if (ev && ev->ops) in dyn_event_seq_show() 178 return ev->ops->show(m, ev); in dyn_event_seq_show() 208 if (type && ev->ops != type) in dyn_events_release_all() 210 if (ev->ops->is_busy(ev)) { in dyn_events_release_all() 216 if (type && ev->ops != type) in dyn_events_release_all() [all …]
|
| A D | trace_dynevent.h | 51 int dyn_event_register(struct dyn_event_operations *ops); 63 struct dyn_event_operations *ops; member 69 int dyn_event_init(struct dyn_event *ev, struct dyn_event_operations *ops) in dyn_event_init() argument 71 if (!ev || !ops) in dyn_event_init() 75 ev->ops = ops; in dyn_event_init() 84 if (!ev || !ev->ops) in dyn_event_add()
|
| A D | trace_event_perf.c | 436 struct ftrace_ops *ops, struct ftrace_regs *fregs) in perf_ftrace_function_call() argument 452 if ((unsigned long)ops->private != smp_processor_id()) in perf_ftrace_function_call() 455 event = container_of(ops, struct perf_event, ftrace_ops); in perf_ftrace_function_call() 489 struct ftrace_ops *ops = &event->ftrace_ops; in perf_ftrace_function_register() local 491 ops->func = perf_ftrace_function_call; in perf_ftrace_function_register() 492 ops->private = (void *)(unsigned long)nr_cpu_ids; in perf_ftrace_function_register() 494 return register_ftrace_function(ops); in perf_ftrace_function_register() 499 struct ftrace_ops *ops = &event->ftrace_ops; in perf_ftrace_function_unregister() local 500 int ret = unregister_ftrace_function(ops); in perf_ftrace_function_unregister() 501 ftrace_free_filter(ops); in perf_ftrace_function_unregister()
|
| A D | tracing_map.c | 371 if (elt->map->ops && elt->map->ops->elt_clear) in tracing_map_elt_clear() 372 elt->map->ops->elt_clear(elt); in tracing_map_elt_clear() 394 if (elt->map->ops && elt->map->ops->elt_free) in tracing_map_elt_free() 395 elt->map->ops->elt_free(elt); in tracing_map_elt_free() 440 if (map->ops && map->ops->elt_alloc) { in tracing_map_elt_alloc() 441 err = map->ops->elt_alloc(elt); in tracing_map_elt_alloc() 460 if (map->ops && map->ops->elt_init) in get_free_elt() 461 map->ops->elt_init(elt); in get_free_elt() 770 const struct tracing_map_ops *ops, in tracing_map_create() argument 789 map->ops = ops; in tracing_map_create()
|
| A D | trace_events_trigger.c | 194 data->ops->print(m, data); in trigger_show() 495 if (data->ops->free) in clear_event_triggers() 496 data->ops->free(data); in clear_event_triggers() 559 if (data->ops->init) { in register_trigger() 560 ret = data->ops->init(data); in register_trigger() 598 if (data->ops->free) in try_unregister_trigger() 599 data->ops->free(data); in try_unregister_trigger() 1404 return ops; in onoff_get_trigger_ops() 1864 if (data->ops->init) { in event_enable_register_trigger() 1906 data->ops->free(data); in event_enable_unregister_trigger() [all …]
|
| A D | fgraph.c | 295 static int entry_run(struct ftrace_graph_ent *trace, struct fgraph_ops *ops, in entry_run() argument 302 static void return_run(struct ftrace_graph_ret *trace, struct fgraph_ops *ops, in return_run() argument 693 if (ftrace_ops_test(&gops->ops, func, NULL) && in function_graph_enter_regs() 1191 struct trace_array *tr = gops->ops.private; in fgraph_pid_func() 1216 gops = container_of(op, struct fgraph_ops, ops); in fgraph_update_pid_func() 1328 if (WARN_ONCE(gops->ops.flags & FTRACE_OPS_FL_GRAPH, in register_ftrace_graph() 1389 gops->ops.flags |= FTRACE_OPS_FL_GRAPH; in register_ftrace_graph() 1391 ret = ftrace_startup_subops(&graph_ops, &gops->ops, command); in register_ftrace_graph() 1408 if (WARN_ONCE(!(gops->ops.flags & FTRACE_OPS_FL_GRAPH), in unregister_ftrace_graph() 1431 ftrace_shutdown_subops(&graph_ops, &gops->ops, command); in unregister_ftrace_graph() [all …]
|
| A D | trace.h | 417 struct ftrace_ops *ops; member 1137 #define init_array_fgraph_ops(tr, ops) do { } while (0) argument 1138 #define allocate_fgraph_ops(tr, ops) ({ 0; }) argument 1214 struct ftrace_probe_ops *ops, 1216 int (*init)(struct ftrace_probe_ops *ops, 1220 void (*free)(struct ftrace_probe_ops *ops, 1225 struct ftrace_probe_ops *ops, 1244 struct ftrace_probe_ops *ops, void *data); 1247 struct ftrace_probe_ops *ops); 1253 void ftrace_create_filter_files(struct ftrace_ops *ops, [all …]
|
| /kernel/time/ |
| A D | posix-clock.c | 50 if (clk->ops.read) in posix_clock_read() 67 if (clk->ops.poll) in posix_clock_poll() 85 if (clk->ops.ioctl) in posix_clock_ioctl() 113 if (clk->ops.open) { in posix_clock_open() 139 if (clk->ops.release) in posix_clock_release() 140 err = clk->ops.release(pccontext); in posix_clock_release() 173 clk->cdev.owner = clk->ops.owner; in posix_clock_register() 238 if (cd.clk->ops.clock_adjtime) in pc_clock_adjtime() 257 if (cd.clk->ops.clock_gettime) in pc_clock_gettime() 276 if (cd.clk->ops.clock_getres) in pc_clock_getres() [all …]
|
| /kernel/irq/ |
| A D | msi.c | 705 struct msi_domain_ops *ops = info->ops; in msi_domain_alloc() local 721 if (ops->msi_free) { in msi_domain_alloc() 834 struct msi_domain_ops *ops = info->ops; in msi_domain_update_dom_ops() local 836 if (ops == NULL) { in msi_domain_update_dom_ops() 1051 bundle->info.ops = &bundle->ops; in msi_create_device_irq_domain() 1163 struct msi_domain_ops *ops = info->ops; in msi_domain_prepare_irqs() local 1297 struct msi_domain_ops *ops = info->ops; in __msi_domain_alloc_irqs() local 1331 if (ops->prepare_desc) in __msi_domain_alloc_irqs() 1389 ops = info->ops; in __msi_domain_alloc_locked() 1640 ops = info->ops; in msi_domain_free_locked() [all …]
|
| A D | irq_sim.c | 19 struct irq_sim_ops ops; member 98 if (work_ctx->ops.irq_sim_irq_requested) in irq_sim_request_resources() 99 return work_ctx->ops.irq_sim_irq_requested(work_ctx->domain, in irq_sim_request_resources() 112 if (work_ctx->ops.irq_sim_irq_released) in irq_sim_release_resources() 113 work_ctx->ops.irq_sim_irq_released(work_ctx->domain, hwirq, in irq_sim_release_resources() 201 const struct irq_sim_ops *ops, in irq_domain_create_sim_full() argument 225 if (ops) in irq_domain_create_sim_full() 226 memcpy(&work_ctx->ops, ops, sizeof(*ops)); in irq_domain_create_sim_full() 281 const struct irq_sim_ops *ops, in devm_irq_domain_create_sim_full() argument 287 domain = irq_domain_create_sim_full(fwnode, num_irqs, ops, data); in devm_irq_domain_create_sim_full()
|
| A D | irqdomain.c | 251 domain->ops = info->ops; in __irq_domain_create() 475 .ops = ops, in irq_domain_create_simple() 497 .ops = ops, in irq_domain_create_legacy() 532 else if (h->ops->match) in irq_find_matching_fwspec() 642 if (domain->ops->unmap) in irq_domain_disassociate() 672 if (domain->ops->map) { in irq_domain_associate_locked() 844 if (d->ops->translate) in irq_domain_translate() 847 if (d->ops->xlate) in irq_domain_translate() 1573 if (!domain->ops->free) in irq_domain_free_irqs_hierarchy() 1997 if (domain->ops->alloc) in irq_domain_check_hierarchy() [all …]
|
| /kernel/bpf/ |
| A D | map_in_map.c | 24 if (!inner_map->ops->map_meta_equal) in bpf_map_meta_alloc() 29 if (inner_map->ops == &array_map_ops || inner_map->ops == &percpu_array_map_ops) in bpf_map_meta_alloc() 63 inner_map_meta->ops = inner_map->ops; in bpf_map_meta_alloc() 64 if (inner_map->ops == &array_map_ops || inner_map->ops == &percpu_array_map_ops) { in bpf_map_meta_alloc() 106 if (inner_map_meta->ops->map_meta_equal(inner_map_meta, inner_map)) in bpf_map_fd_get_ptr()
|
| A D | offload.c | 38 const struct bpf_prog_offload_ops *ops; member 114 offload->offdev->ops->destroy(prog); in __bpf_prog_offload_destroy() 305 ret = offload->offdev->ops->prepare(prog); in bpf_prog_offload_verifier_prep() 337 if (offload->offdev->ops->finalize) in bpf_prog_offload_finalize() 338 ret = offload->offdev->ops->finalize(env); in bpf_prog_offload_finalize() 351 const struct bpf_prog_offload_ops *ops; in bpf_prog_offload_replace_insn() local 358 ops = offload->offdev->ops; in bpf_prog_offload_replace_insn() 360 ret = ops->replace_insn(env, off, insn); in bpf_prog_offload_replace_insn() 794 offdev->ops = ops; in bpf_offload_dev_create() 846 const struct xdp_metadata_ops *ops; in bpf_dev_bound_resolve_kfunc() local [all …]
|
| A D | syscall.c | 864 map->ops->map_free(map); in bpf_map_free() 1120 if (map->ops->map_poll) in bpf_map_poll() 1392 if (!ops) in map_create() 1485 map->ops = ops; in map_create() 2246 if (!ops) in find_prog_type() 2250 prog->aux->ops = ops; in find_prog_type() 3081 WARN_ON(ops->dealloc && ops->dealloc_deferred); in bpf_link_init_sleepable() 3086 link->ops = ops; in bpf_link_init_sleepable() 3161 const struct bpf_link_ops *ops = link->ops; in bpf_link_free() local 3166 ops->release(link); in bpf_link_free() [all …]
|
| /kernel/power/ |
| A D | hibernate.c | 109 void hibernation_set_ops(const struct platform_hibernation_ops *ops) in hibernation_set_ops() argument 113 if (ops && !(ops->begin && ops->end && ops->pre_snapshot in hibernation_set_ops() 114 && ops->prepare && ops->finish && ops->enter && ops->pre_restore in hibernation_set_ops() 115 && ops->restore_cleanup && ops->leave)) { in hibernation_set_ops() 122 hibernation_ops = ops; in hibernation_set_ops() 123 if (ops) in hibernation_set_ops()
|
| /kernel/sched/ |
| A D | ext.c | 4610 SCX_ATTR(ops); 5053 if (sch->ops.exit) in scx_disable_workfn() 5488 sch->ops = *ops; in scx_alloc_and_add_sched() 5489 ops->priv = sch; in scx_alloc_and_add_sched() 5542 if ((ops->flags & SCX_OPS_ENQ_LAST) && !ops->enqueue) { in validate_ops() 5552 (ops->update_idle && !(ops->flags & SCX_OPS_KEEP_BUILTIN_IDLE))) { in validate_ops() 5614 scx_idle_enable(ops); in scx_enable() 5616 if (sch->ops.init) { in scx_enable() 5649 if (ops->timeout_ms) in scx_enable() 5671 if (sch->ops.cpu_acquire || sch->ops.cpu_release) in scx_enable() [all …]
|
| A D | ext_idle.c | 344 void scx_idle_update_selcpu_topology(struct sched_ext_ops *ops) in scx_idle_update_selcpu_topology() argument 382 if (!(ops->flags & SCX_OPS_BUILTIN_IDLE_PER_NODE)) { in scx_idle_update_selcpu_topology() 775 static void reset_idle_masks(struct sched_ext_ops *ops) in reset_idle_masks() argument 783 if (!(ops->flags & SCX_OPS_BUILTIN_IDLE_PER_NODE)) { in reset_idle_masks() 797 void scx_idle_enable(struct sched_ext_ops *ops) in scx_idle_enable() argument 799 if (!ops->update_idle || (ops->flags & SCX_OPS_KEEP_BUILTIN_IDLE)) in scx_idle_enable() 804 if (ops->flags & SCX_OPS_BUILTIN_IDLE_PER_NODE) in scx_idle_enable() 809 reset_idle_masks(ops); in scx_idle_enable()
|
| A D | ext_idle.h | 15 void scx_idle_update_selcpu_topology(struct sched_ext_ops *ops); 20 void scx_idle_enable(struct sched_ext_ops *ops);
|
| /kernel/ |
| A D | params.c | 137 !(params[i].ops->flags & KERNEL_PARAM_OPS_FL_NOARG)) in parse_one() 140 params[i].ops->set); in parse_one() 143 err = params[i].ops->set(val, ¶ms[i]); in parse_one() 471 arr->elemsize, arr->ops->set, kp->level, in param_array_set() 487 ret = arr->ops->get(buffer + off, &p); in param_array_get() 501 if (arr->ops->free) in param_array_free() 503 arr->ops->free(arr->elem + arr->elemsize * i); in param_array_free() 566 if (!attribute->param->ops->get) in param_attr_show() 583 if (!attribute->param->ops->set) in param_attr_store() 759 if (params[i].ops->free) in destroy_params() [all …]
|