Lines Matching refs:ops

854 	struct sched_ext_ops	ops;  member
1297 (sch)->ops.op(args); \
1300 (sch)->ops.op(args); \
1308 __typeof__((sch)->ops.op(args)) __ret; \
1314 __ret = (sch)->ops.op(args); \
1317 __ret = (sch)->ops.op(args); \
1345 __typeof__((sch)->ops.op(task, ##args)) __ret; \
1355 __typeof__((sch)->ops.op(task0, task1, ##args)) __ret; \
2286 if (!(sch->ops.flags & SCX_OPS_ENQ_EXITING) && in do_enqueue_task()
2293 if (!(sch->ops.flags & SCX_OPS_ENQ_MIGRATION_DISABLED) && in do_enqueue_task()
3081 if ((sch->ops.flags & SCX_OPS_HAS_CPU_PREEMPT) && in balance_one()
3172 (!(sch->ops.flags & SCX_OPS_ENQ_LAST) || scx_rq_bypassing(rq))) { in balance_one()
3313 if (!(sch->ops.flags & SCX_OPS_HAS_CPU_PREEMPT)) in switch_class()
3380 WARN_ON_ONCE(!(sch->ops.flags & SCX_OPS_ENQ_LAST)); in put_prev_task_scx()
3605 scx_idle_update_selcpu_topology(&sch->ops); in handle_hotplug()
4430 if (!sch->ops.cgroup_exit) in scx_cgroup_exit()
4471 if (!sch->ops.cgroup_init) { in scx_cgroup_init()
4608 return sysfs_emit(buf, "%s\n", scx_root->ops.name); in scx_attr_ops_show()
4610 SCX_ATTR(ops);
4652 return add_uevent_var(env, "SCXOPS=%s", scx_root->ops.name); in scx_uevent()
4675 (scx_root->ops.flags & SCX_OPS_ALLOW_QUEUED_WAKEUP) || in scx_allow_ttwu_queue()
4747 smp_processor_id(), dur_s, scx_root->ops.name); in scx_softlockup()
5041 sch->ops.name, ei->reason); in scx_disable_workfn()
5044 pr_err("sched_ext: %s: %s\n", sch->ops.name, ei->msg); in scx_disable_workfn()
5050 sch->ops.name, ei->reason); in scx_disable_workfn()
5053 if (sch->ops.exit) in scx_disable_workfn()
5405 scx_dump_state(ei, sch->ops.exit_dump_len); in scx_error_irq_workfn()
5437 static struct scx_sched *scx_alloc_and_add_sched(struct sched_ext_ops *ops) in scx_alloc_and_add_sched() argument
5446 sch->exit_info = alloc_exit_info(ops->exit_dump_len); in scx_alloc_and_add_sched()
5488 sch->ops = *ops; in scx_alloc_and_add_sched()
5489 ops->priv = sch; in scx_alloc_and_add_sched()
5516 const struct sched_ext_ops *ops) in check_hotplug_seq() argument
5525 if (ops->hotplug_seq) { in check_hotplug_seq()
5527 if (ops->hotplug_seq != global_hotplug_seq) { in check_hotplug_seq()
5531 ops->hotplug_seq, global_hotplug_seq); in check_hotplug_seq()
5536 static int validate_ops(struct scx_sched *sch, const struct sched_ext_ops *ops) in validate_ops() argument
5542 if ((ops->flags & SCX_OPS_ENQ_LAST) && !ops->enqueue) { in validate_ops()
5551 if ((ops->flags & SCX_OPS_BUILTIN_IDLE_PER_NODE) && in validate_ops()
5552 (ops->update_idle && !(ops->flags & SCX_OPS_KEEP_BUILTIN_IDLE))) { in validate_ops()
5557 if (ops->flags & SCX_OPS_HAS_CGROUP_WEIGHT) in validate_ops()
5563 static int scx_enable(struct sched_ext_ops *ops, struct bpf_link *link) in scx_enable() argument
5584 sch = scx_alloc_and_add_sched(ops); in scx_enable()
5614 scx_idle_enable(ops); in scx_enable()
5616 if (sch->ops.init) { in scx_enable()
5627 if (((void (**)(void))ops)[i]) in scx_enable()
5630 check_hotplug_seq(sch, ops); in scx_enable()
5631 scx_idle_update_selcpu_topology(ops); in scx_enable()
5635 ret = validate_ops(sch, ops); in scx_enable()
5640 scx_dsp_max_batch = ops->dispatch_max_batch ?: SCX_DSP_DFL_MAX_BATCH; in scx_enable()
5649 if (ops->timeout_ms) in scx_enable()
5650 timeout = msecs_to_jiffies(ops->timeout_ms); in scx_enable()
5668 if (((void (**)(void))ops)[i]) in scx_enable()
5671 if (sch->ops.cpu_acquire || sch->ops.cpu_release) in scx_enable()
5672 sch->ops.flags |= SCX_OPS_HAS_CPU_PREEMPT; in scx_enable()
5736 WRITE_ONCE(scx_switching_all, !(ops->flags & SCX_OPS_SWITCH_PARTIAL)); in scx_enable()
5775 if (!(ops->flags & SCX_OPS_SWITCH_PARTIAL)) in scx_enable()
5779 sch->ops.name, scx_switched_all() ? "" : " (partial)"); in scx_enable()
5869 struct sched_ext_ops *ops = kdata; in bpf_scx_init_member() local
5877 ops->dispatch_max_batch = *(u32 *)(udata + moff); in bpf_scx_init_member()
5882 ops->flags = *(u64 *)(udata + moff); in bpf_scx_init_member()
5885 ret = bpf_obj_name_cpy(ops->name, uops->name, in bpf_scx_init_member()
5886 sizeof(ops->name)); in bpf_scx_init_member()
5896 ops->timeout_ms = *(u32 *)(udata + moff); in bpf_scx_init_member()
5899 ops->exit_dump_len = in bpf_scx_init_member()
5903 ops->hotplug_seq = *(u64 *)(udata + moff); in bpf_scx_init_member()
5943 struct sched_ext_ops *ops = kdata; in bpf_scx_unreg() local
5944 struct scx_sched *sch = ops->priv; in bpf_scx_unreg()
6236 printk("%sSched_ext: %s (%s%s)", log_lvl, sch->ops.name, in print_scx_info()
6248 log_lvl, sch->ops.name, scx_enable_state_str[state], all, in print_scx_info()