Lines Matching refs:task
107 static __always_inline void update_task_info(struct task_struct *task, __u32 cpu) in update_task_info() argument
110 .pid = task->pid, in update_task_info()
116 .tgid = task->tgid, in update_task_info()
117 .is_kthread = task->flags & PF_KTHREAD ? 1 : 0, in update_task_info()
119 BPF_CORE_READ_STR_INTO(&data.comm, task, comm); in update_task_info()
141 static void on_sched_out(struct task_struct *task, __u64 ts, __u32 cpu) in on_sched_out() argument
146 pelem = bpf_task_storage_get(&kwork_top_task_time, task, NULL, 0); in on_sched_out()
154 .pid = task->pid, in on_sched_out()
155 .task_p = (__u64)task, in on_sched_out()
159 update_task_info(task, cpu); in on_sched_out()
162 static void on_sched_in(struct task_struct *task, __u64 ts) in on_sched_in() argument
166 pelem = bpf_task_storage_get(&kwork_top_task_time, task, NULL, in on_sched_in()
199 struct task_struct *task; in on_irq_handler_entry() local
211 task = (struct task_struct *)bpf_get_current_task(); in on_irq_handler_entry()
212 if (!task) in on_irq_handler_entry()
217 .pid = BPF_CORE_READ(task, pid), in on_irq_handler_entry()
218 .task_p = (__u64)task, in on_irq_handler_entry()
234 struct task_struct *task; in on_irq_handler_exit() local
247 task = (struct task_struct *)bpf_get_current_task(); in on_irq_handler_exit()
248 if (!task) in on_irq_handler_exit()
253 .pid = BPF_CORE_READ(task, pid), in on_irq_handler_exit()
254 .task_p = (__u64)task, in on_irq_handler_exit()
271 struct task_struct *task; in on_softirq_entry() local
283 task = (struct task_struct *)bpf_get_current_task(); in on_softirq_entry()
284 if (!task) in on_softirq_entry()
289 .pid = BPF_CORE_READ(task, pid), in on_softirq_entry()
290 .task_p = (__u64)task, in on_softirq_entry()
306 struct task_struct *task; in on_softirq_exit() local
319 task = (struct task_struct *)bpf_get_current_task(); in on_softirq_exit()
320 if (!task) in on_softirq_exit()
325 .pid = BPF_CORE_READ(task, pid), in on_softirq_exit()
326 .task_p = (__u64)task, in on_softirq_exit()