| /include/trace/events/ |
| A D | workqueue.h | 31 __field( void *, work ) 39 __entry->work = work; 63 TP_ARGS(work), 66 __field( void *, work ) 71 __entry->work = work; 88 TP_ARGS(work), 91 __field( void *, work ) 96 __entry->work = work; 114 TP_ARGS(work, function), 117 __field( void *, work ) [all …]
|
| A D | napi.h | 16 TP_PROTO(struct napi_struct *napi, int work, int budget), 18 TP_ARGS(napi, work, budget), 23 __field( int, work) 30 __entry->work = work; 36 __entry->work, __entry->budget)
|
| A D | sched.h | 67 struct kthread_work *work), 69 TP_ARGS(worker, work), 72 __field( void *, work ) 78 __entry->work = work; 79 __entry->function = work->func; 97 TP_ARGS(work), 100 __field( void *, work ) 105 __entry->work = work; 123 TP_ARGS(work, function), 126 __field( void *, work ) [all …]
|
| /include/linux/ |
| A D | completion.h | 35 #define COMPLETION_INITIALIZER(work) \ argument 39 (*({ init_completion_map(&(work), &(map)); &(work); })) 41 #define COMPLETION_INITIALIZER_ONSTACK(work) \ argument 42 (*({ init_completion(&work); &work; })) 52 #define DECLARE_COMPLETION(work) \ argument 53 struct completion work = COMPLETION_INITIALIZER(work) 68 # define DECLARE_COMPLETION_ONSTACK(work) \ argument 69 struct completion work = COMPLETION_INITIALIZER_ONSTACK(work) 71 struct completion work = COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) 73 # define DECLARE_COMPLETION_ONSTACK(work) DECLARE_COMPLETION(work) argument [all …]
|
| A D | irq_work.h | 37 void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *)) in init_irq_work() argument 39 *work = IRQ_WORK_INIT(func); in init_irq_work() 42 static inline bool irq_work_is_pending(struct irq_work *work) in irq_work_is_pending() argument 44 return atomic_read(&work->node.a_flags) & IRQ_WORK_PENDING; in irq_work_is_pending() 47 static inline bool irq_work_is_busy(struct irq_work *work) in irq_work_is_busy() argument 49 return atomic_read(&work->node.a_flags) & IRQ_WORK_BUSY; in irq_work_is_busy() 52 static inline bool irq_work_is_hard(struct irq_work *work) in irq_work_is_hard() argument 54 return atomic_read(&work->node.a_flags) & IRQ_WORK_HARD_IRQ; in irq_work_is_hard() 57 bool irq_work_queue(struct irq_work *work); 58 bool irq_work_queue_on(struct irq_work *work, int cpu); [all …]
|
| A D | jump_label_ratelimit.h | 12 struct delayed_work work; member 18 struct delayed_work work; member 24 struct delayed_work work; member 28 __static_key_slow_dec_deferred(&(x)->key, &(x)->work, (x)->timeout) 30 __static_key_slow_dec_deferred(&(x)->key.key, &(x)->work, (x)->timeout) 33 __static_key_deferred_flush((x), &(x)->work) 37 struct delayed_work *work, 39 extern void __static_key_deferred_flush(void *key, struct delayed_work *work); 43 extern void jump_label_update_timeout(struct work_struct *work); 49 .work = __DELAYED_WORK_INITIALIZER((name).work, \ [all …]
|
| A D | workqueue.h | 24 #define work_data_bits(work) ((unsigned long *)(&(work)->data)) argument 115 struct work_struct work; member 124 struct work_struct work; member 214 return container_of(work, struct delayed_work, work); in to_delayed_work() 219 return container_of(work, struct rcu_work, work); in to_rcu_work() 223 struct work_struct work; member 246 .work = __WORK_INITIALIZER((n).work, (f)), \ 355 #define work_pending(work) \ argument 364 work_pending(&(w)->work) 752 if (enable_work(work)) { in enable_and_queue_work() [all …]
|
| A D | entry-common.h | 65 unsigned long work); 92 unsigned long work = READ_ONCE(current_thread_info()->syscall_work); in syscall_enter_from_user_mode_work() local 94 if (work & SYSCALL_WORK_ENTER) in syscall_enter_from_user_mode_work() 95 syscall = syscall_trace_enter(regs, syscall, work); in syscall_enter_from_user_mode_work() 138 void syscall_exit_work(struct pt_regs *regs, unsigned long work); 155 unsigned long work = READ_ONCE(current_thread_info()->syscall_work); in syscall_exit_to_user_mode_work() local 172 if (unlikely(work & SYSCALL_WORK_EXIT)) in syscall_exit_to_user_mode_work() 173 syscall_exit_work(regs, work); in syscall_exit_to_user_mode_work()
|
| A D | kthread.h | 142 struct kthread_work work; member 146 #define KTHREAD_WORK_INIT(work, fn) { \ argument 147 .node = LIST_HEAD_INIT((work).node), \ 152 .work = KTHREAD_WORK_INIT((dwork).work, (fn)), \ 157 #define DEFINE_KTHREAD_WORK(work, fn) \ argument 158 struct kthread_work work = KTHREAD_WORK_INIT(work, fn) 173 #define kthread_init_work(work, fn) \ argument 176 INIT_LIST_HEAD(&(work)->node); \ 177 (work)->func = (fn); \ 182 kthread_init_work(&(dwork)->work, (fn)); \ [all …]
|
| A D | unwind_deferred.h | 11 typedef void (*unwind_callback_t)(struct unwind_work *work, struct unwind_stacktrace *trace, u64 co… 38 int unwind_deferred_init(struct unwind_work *work, unwind_callback_t func); 39 int unwind_deferred_request(struct unwind_work *work, u64 *cookie); 40 void unwind_deferred_cancel(struct unwind_work *work); 72 static inline int unwind_deferred_init(struct unwind_work *work, unwind_callback_t func) { return -… in unwind_deferred_init() argument 73 static inline int unwind_deferred_request(struct unwind_work *work, u64 *timestamp) { return -ENOSY… in unwind_deferred_request() argument 74 static inline void unwind_deferred_cancel(struct unwind_work *work) {} in unwind_deferred_cancel() argument
|
| A D | stop_machine.h | 47 struct work_struct work; member 62 static void stop_one_cpu_nowait_workfn(struct work_struct *work) in stop_one_cpu_nowait_workfn() argument 65 container_of(work, struct cpu_stop_work, work); in stop_one_cpu_nowait_workfn() 76 INIT_WORK(&work_buf->work, stop_one_cpu_nowait_workfn); in stop_one_cpu_nowait() 79 schedule_work(&work_buf->work); in stop_one_cpu_nowait()
|
| A D | dim.h | 87 void (*rx_dim_work)(struct work_struct *work); 88 void (*tx_dim_work)(struct work_struct *work); 149 struct work_struct work; member 250 void (*rx_dim_work)(struct work_struct *work), 251 void (*tx_dim_work)(struct work_struct *work));
|
| A D | closure.h | 151 struct work_struct work; member 267 INIT_WORK(&cl->work, cl->work.func); in closure_queue() 268 BUG_ON(!queue_work(wq, &cl->work)); in closure_queue() 270 cl->fn(&cl->work); in closure_queue() 354 struct closure *cl = container_of(ws, struct closure, work); \
|
| A D | workqueue_types.h | 13 typedef void (*work_func_t)(struct work_struct *work);
|
| A D | page_reporting.h | 17 struct delayed_work work; member
|
| A D | unwind_deferred_types.h | 35 struct callback_head work; member
|
| A D | srcutree.h | 43 struct work_struct work; /* Context for CB invoking. */ member 95 struct delayed_work work; member 155 .work = __DELAYED_WORK_INITIALIZER(name.work, NULL, 0), \
|
| A D | pci-pwrctrl.h | 45 struct work_struct work; member
|
| /include/drm/ |
| A D | drm_flip_work.h | 51 typedef void (*drm_flip_func_t)(struct drm_flip_work *work, void *val); 71 void drm_flip_work_queue(struct drm_flip_work *work, void *val); 72 void drm_flip_work_commit(struct drm_flip_work *work, 74 void drm_flip_work_init(struct drm_flip_work *work, 76 void drm_flip_work_cleanup(struct drm_flip_work *work);
|
| A D | drm_vblank_work.h | 65 int drm_vblank_work_schedule(struct drm_vblank_work *work, 67 void drm_vblank_work_init(struct drm_vblank_work *work, struct drm_crtc *crtc, 68 void (*func)(struct kthread_work *work)); 69 bool drm_vblank_work_cancel_sync(struct drm_vblank_work *work); 70 void drm_vblank_work_flush(struct drm_vblank_work *work);
|
| /include/net/bluetooth/ |
| A D | coredump.h | 64 void hci_devcd_rx(struct work_struct *work); 65 void hci_devcd_timeout(struct work_struct *work); 78 static inline void hci_devcd_rx(struct work_struct *work) {} in hci_devcd_rx() argument 79 static inline void hci_devcd_timeout(struct work_struct *work) {} in hci_devcd_timeout() argument
|
| /include/cxl/ |
| A D | event.h | 289 int cxl_cper_register_work(struct work_struct *work); 290 int cxl_cper_unregister_work(struct work_struct *work); 292 int cxl_cper_register_prot_err_work(struct work_struct *work); 293 int cxl_cper_unregister_prot_err_work(struct work_struct *work); 296 static inline int cxl_cper_register_work(struct work_struct *work) in cxl_cper_register_work() argument 301 static inline int cxl_cper_unregister_work(struct work_struct *work) in cxl_cper_unregister_work() argument 309 static inline int cxl_cper_register_prot_err_work(struct work_struct *work) in cxl_cper_register_prot_err_work() argument 313 static inline int cxl_cper_unregister_prot_err_work(struct work_struct *work) in cxl_cper_unregister_prot_err_work() argument
|
| /include/linux/dsa/ |
| A D | ksz_common.h | 29 struct kthread_work work; member 33 void (*xmit_work_fn)(struct kthread_work *work);
|
| A D | sja1105.h | 46 struct kthread_work work; member 51 void (*xmit_work_fn)(struct kthread_work *work);
|
| /include/scsi/ |
| A D | libsas.h | 197 struct work_struct work; member 208 INIT_WORK(&sw->work, fn); in INIT_SAS_WORK() 213 struct sas_work work; member 217 static inline struct sas_discovery_event *to_sas_discovery_event(struct work_struct *work) in to_sas_discovery_event() argument 219 struct sas_discovery_event *ev = container_of(work, typeof(*ev), work.work); in to_sas_discovery_event() 245 struct sas_work work; member 271 struct sas_work work; member 276 static inline struct asd_sas_event *to_asd_sas_event(struct work_struct *work) in to_asd_sas_event() argument 278 struct asd_sas_event *ev = container_of(work, typeof(*ev), work.work); in to_asd_sas_event() 287 INIT_SAS_WORK(&ev->work, fn); in INIT_SAS_EVENT()
|