| /include/linux/ |
| A D | units.h | 46 return t + ABSOLUTE_ZERO_MILLICELSIUS; in milli_kelvin_to_millicelsius() 51 return t - ABSOLUTE_ZERO_MILLICELSIUS; in millicelsius_to_milli_kelvin() 57 static inline long kelvin_to_millicelsius(long t) in kelvin_to_millicelsius() argument 62 static inline long millicelsius_to_kelvin(long t) in millicelsius_to_kelvin() argument 64 t = millicelsius_to_milli_kelvin(t); in millicelsius_to_kelvin() 71 t = milli_kelvin_to_millicelsius(t * MILLIDEGREE_PER_DECIDEGREE); in deci_kelvin_to_celsius() 78 t = millicelsius_to_milli_kelvin(t * MILLIDEGREE_PER_DEGREE); in celsius_to_deci_kelvin() 92 return t * MILLIDEGREE_PER_DECIDEGREE - offset; in deci_kelvin_to_millicelsius_with_offset() 102 t = millicelsius_to_milli_kelvin(t); in millicelsius_to_deci_kelvin() 107 static inline long kelvin_to_celsius(long t) in kelvin_to_celsius() argument [all …]
|
| A D | rseq.h | 28 if (t->rseq) in rseq_set_notify_resume() 54 rseq_set_notify_resume(t); in rseq_preempt() 71 t->rseq = NULL; in rseq_fork() 72 t->rseq_len = 0; in rseq_fork() 73 t->rseq_sig = 0; in rseq_fork() 74 t->rseq_event_mask = 0; in rseq_fork() 76 t->rseq = current->rseq; in rseq_fork() 85 t->rseq = NULL; in rseq_execve() 86 t->rseq_len = 0; in rseq_execve() 87 t->rseq_sig = 0; in rseq_execve() [all …]
|
| A D | user_events.h | 30 extern void user_event_mm_dup(struct task_struct *t, 33 extern void user_event_mm_remove(struct task_struct *t); 35 static inline void user_events_fork(struct task_struct *t, in user_events_fork() argument 40 if (!t || !current->user_event_mm) in user_events_fork() 46 t->user_event_mm = old_mm; in user_events_fork() 51 user_event_mm_dup(t, old_mm); in user_events_fork() 56 if (!t || !t->user_event_mm) in user_events_execve() 59 user_event_mm_remove(t); in user_events_execve() 62 static inline void user_events_exit(struct task_struct *t) in user_events_exit() argument 64 if (!t || !t->user_event_mm) in user_events_exit() [all …]
|
| A D | btf.h | 269 return btf_type_is_int(t) && t->size <= sizeof(u64); in btf_type_is_small_int() 279 return btf_type_is_int(t) && (btf_int_encoding(t) & BTF_INT_SIGNED); in btf_type_is_signed_int() 307 return BTF_INFO_KIND(t->info); in btf_kind() 327 u16 kind = btf_kind(t); in btf_is_composite() 359 return btf_type_is_int(t) || btf_type_is_enum(t); in btf_type_is_scalar() 419 return BTF_INFO_VLEN(t->info); in btf_type_vlen() 424 return btf_type_vlen(t); in btf_vlen() 491 const struct btf_type *t) in btf_type_var_secinfo() argument 600 if (!btf_type_is_ptr(t)) in btf_type_is_struct_ptr() 603 t = btf_type_skip_modifiers(btf, t->type, NULL); in btf_type_is_struct_ptr() [all …]
|
| A D | rcupdate_trace.h | 35 void rcu_read_unlock_trace_special(struct task_struct *t); 51 struct task_struct *t = current; in rcu_read_lock_trace() local 53 WRITE_ONCE(t->trc_reader_nesting, READ_ONCE(t->trc_reader_nesting) + 1); in rcu_read_lock_trace() 56 t->trc_reader_special.b.need_mb) in rcu_read_lock_trace() 73 struct task_struct *t = current; in rcu_read_unlock_trace() local 76 nesting = READ_ONCE(t->trc_reader_nesting) - 1; in rcu_read_unlock_trace() 79 WRITE_ONCE(t->trc_reader_nesting, INT_MIN + nesting); in rcu_read_unlock_trace() 80 if (likely(!READ_ONCE(t->trc_reader_special.s)) || nesting) { in rcu_read_unlock_trace() 81 WRITE_ONCE(t->trc_reader_nesting, nesting); in rcu_read_unlock_trace() 85 rcu_read_unlock_trace_special(t); in rcu_read_unlock_trace()
|
| A D | cb710.h | 51 #define CB710_PORT_ACCESSORS(t) \ argument 52 static inline void cb710_write_port_##t(struct cb710_slot *slot, \ 53 unsigned port, u##t value) \ 55 iowrite##t(value, slot->iobase + port); \ 58 static inline u##t cb710_read_port_##t(struct cb710_slot *slot, \ 61 return ioread##t(slot->iobase + port); \ 64 static inline void cb710_modify_port_##t(struct cb710_slot *slot, \ 65 unsigned port, u##t set, u##t clear) \ 67 iowrite##t( \ 68 (ioread##t(slot->iobase + port) & ~clear)|set, \
|
| A D | kcov.h | 30 void kcov_task_init(struct task_struct *t); 31 void kcov_task_exit(struct task_struct *t); 33 #define kcov_prepare_switch(t) \ argument 35 (t)->kcov_mode |= KCOV_IN_CTXSW; \ 38 #define kcov_finish_switch(t) \ argument 40 (t)->kcov_mode &= ~KCOV_IN_CTXSW; \ 122 static inline void kcov_task_init(struct task_struct *t) {} in kcov_task_init() argument 123 static inline void kcov_task_exit(struct task_struct *t) {} in kcov_task_exit() argument 124 static inline void kcov_prepare_switch(struct task_struct *t) {} in kcov_prepare_switch() argument 125 static inline void kcov_finish_switch(struct task_struct *t) {} in kcov_finish_switch() argument
|
| A D | win_minmax.h | 13 u32 t; /* time measurement was taken */ member 27 static inline u32 minmax_reset(struct minmax *m, u32 t, u32 meas) in minmax_reset() argument 29 struct minmax_sample val = { .t = t, .v = meas }; in minmax_reset() 35 u32 minmax_running_max(struct minmax *m, u32 win, u32 t, u32 meas); 36 u32 minmax_running_min(struct minmax *m, u32 win, u32 t, u32 meas);
|
| A D | kstack_erase.h | 72 static inline void stackleak_task_init(struct task_struct *t) in stackleak_task_init() argument 74 t->lowest_stack = stackleak_task_low_bound(t); in stackleak_task_init() 76 t->prev_lowest_stack = t->lowest_stack; in stackleak_task_init() 86 static inline void stackleak_task_init(struct task_struct *t) { } in stackleak_task_init() argument
|
| A D | damon.h | 422 struct damon_target *t, struct damon_region *r, 627 bool (*target_valid)(struct damon_target *t); 628 void (*cleanup_target)(struct damon_target *t); 822 #define damon_for_each_region(r, t) \ argument 823 list_for_each_entry(r, &t->regions_list, list) 825 #define damon_for_each_region_from(r, t) \ argument 828 #define damon_for_each_region_safe(r, next, t) \ argument 831 #define damon_for_each_target(t, ctx) \ argument 870 struct damon_target *t) in damon_insert_region() argument 873 t->nr_regions++; in damon_insert_region() [all …]
|
| A D | thread_info.h | 153 #define read_task_thread_flags(t) \ argument 154 read_ti_thread_flags(task_thread_info(t)) 164 #define set_task_syscall_work(t, fl) \ argument 166 #define test_task_syscall_work(t, fl) \ argument 168 #define clear_task_syscall_work(t, fl) \ argument 180 #define set_task_syscall_work(t, fl) \ argument 181 set_ti_thread_flag(task_thread_info(t), TIF_##fl) 182 #define test_task_syscall_work(t, fl) \ argument 183 test_ti_thread_flag(task_thread_info(t), TIF_##fl) 184 #define clear_task_syscall_work(t, fl) \ argument [all …]
|
| A D | interrupt.h | 696 void (*callback)(struct tasklet_struct *t); 742 void tasklet_unlock(struct tasklet_struct *t); 758 __tasklet_schedule(t); in tasklet_schedule() 766 __tasklet_hi_schedule(t); in tasklet_hi_schedule() 771 atomic_inc(&t->count); in tasklet_disable_nosync() 781 tasklet_disable_nosync(t); in tasklet_disable_in_atomic() 782 tasklet_unlock_spin_wait(t); in tasklet_disable_in_atomic() 788 tasklet_disable_nosync(t); in tasklet_disable() 789 tasklet_unlock_wait(t); in tasklet_disable() 796 atomic_dec(&t->count); in tasklet_enable() [all …]
|
| A D | nls.h | 62 static inline unsigned char nls_tolower(struct nls_table *t, unsigned char c) in nls_tolower() argument 64 unsigned char nc = t->charset2lower[c]; in nls_tolower() 69 static inline unsigned char nls_toupper(struct nls_table *t, unsigned char c) in nls_toupper() argument 71 unsigned char nc = t->charset2upper[c]; in nls_toupper() 76 static inline int nls_strnicmp(struct nls_table *t, const unsigned char *s1, in nls_strnicmp() argument 80 if (nls_tolower(t, *s1++) != nls_tolower(t, *s2++)) in nls_strnicmp()
|
| A D | watchdog.h | 172 static inline bool watchdog_timeout_invalid(struct watchdog_device *wdd, unsigned int t) in watchdog_timeout_invalid() argument 185 return t > UINT_MAX / 1000 || t < wdd->min_timeout || in watchdog_timeout_invalid() 187 t > wdd->max_timeout); in watchdog_timeout_invalid() 192 unsigned int t) in watchdog_pretimeout_invalid() argument 194 return t && wdd->timeout && t >= wdd->timeout; in watchdog_pretimeout_invalid()
|
| /include/asm-generic/ |
| A D | ioctl.h | 8 #define _IOC_TYPECHECK(t) (sizeof(t)) argument 12 #define _IOC_TYPECHECK(t) \ argument 13 ((sizeof(t) == sizeof(t[1]) && \ 14 sizeof(t) < (1 << _IOC_SIZEBITS)) ? \ 15 sizeof(t) : __invalid_size_argument_for_IOC)
|
| /include/scsi/ |
| A D | scsi_transport.h | 57 scsi_transport_reserve_target(struct scsi_transport_template * t, int space) in scsi_transport_reserve_target() argument 59 BUG_ON(t->target_private_offset != 0); in scsi_transport_reserve_target() 60 t->target_private_offset = ALIGN(t->target_size, sizeof(void *)); in scsi_transport_reserve_target() 61 t->target_size = t->target_private_offset + space; in scsi_transport_reserve_target() 64 scsi_transport_reserve_device(struct scsi_transport_template * t, int space) in scsi_transport_reserve_device() argument 66 BUG_ON(t->device_private_offset != 0); in scsi_transport_reserve_device() 67 t->device_private_offset = ALIGN(t->device_size, sizeof(void *)); in scsi_transport_reserve_device() 68 t->device_size = t->device_private_offset + space; in scsi_transport_reserve_device()
|
| /include/net/netfilter/ |
| A D | nf_conntrack_tuple.h | 89 t, t->dst.protonum, in nf_ct_dump_tuple_ip() 90 &t->src.u3.ip, ntohs(t->src.u.all), in nf_ct_dump_tuple_ip() 91 &t->dst.u3.ip, ntohs(t->dst.u.all)); in nf_ct_dump_tuple_ip() 99 t, t->dst.protonum, in nf_ct_dump_tuple_ipv6() 100 t->src.u3.all, ntohs(t->src.u.all), in nf_ct_dump_tuple_ipv6() 101 t->dst.u3.all, ntohs(t->dst.u.all)); in nf_ct_dump_tuple_ipv6() 107 switch (t->src.l3num) { in nf_ct_dump_tuple() 109 nf_ct_dump_tuple_ip(t); in nf_ct_dump_tuple() 112 nf_ct_dump_tuple_ipv6(t); in nf_ct_dump_tuple() 186 return nf_ct_tuple_src_mask_cmp(t, tuple, mask) && in nf_ct_tuple_mask_cmp() [all …]
|
| /include/linux/sunrpc/ |
| A D | sched.h | 148 #define RPC_IS_ASYNC(t) ((t)->tk_flags & RPC_TASK_ASYNC) argument 149 #define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER) argument 150 #define RPC_IS_SOFT(t) ((t)->tk_flags & (RPC_TASK_SOFT|RPC_TASK_TIMEOUT)) argument 151 #define RPC_IS_SOFTCONN(t) ((t)->tk_flags & RPC_TASK_SOFTCONN) argument 152 #define RPC_WAS_SENT(t) ((t)->tk_flags & RPC_TASK_SENT) argument 153 #define RPC_IS_MOVEABLE(t) ((t)->tk_flags & RPC_TASK_MOVEABLE) argument 166 #define rpc_clear_running(t) clear_bit(RPC_TASK_RUNNING, &(t)->tk_runstate) argument 168 #define RPC_IS_QUEUED(t) test_bit(RPC_TASK_QUEUED, &(t)->tk_runstate) argument 169 #define rpc_set_queued(t) set_bit(RPC_TASK_QUEUED, &(t)->tk_runstate) argument 170 #define rpc_clear_queued(t) clear_bit(RPC_TASK_QUEUED, &(t)->tk_runstate) argument [all …]
|
| A D | timer.h | 29 int *t; in rpc_set_timeo() local 32 t = &rt->ntimeouts[timer-1]; in rpc_set_timeo() 33 if (ntimeo < *t) { in rpc_set_timeo() 34 if (*t > 0) in rpc_set_timeo() 35 (*t)--; in rpc_set_timeo() 39 *t = ntimeo; in rpc_set_timeo()
|
| /include/linux/sched/ |
| A D | cputime.h | 12 extern bool task_cputime(struct task_struct *t, 14 extern u64 task_gtime(struct task_struct *t); 16 static inline bool task_cputime(struct task_struct *t, in task_cputime() argument 19 *utime = t->utime; in task_cputime() 20 *stime = t->stime; in task_cputime() 24 static inline u64 task_gtime(struct task_struct *t) in task_gtime() argument 26 return t->gtime; in task_gtime() 31 static inline void task_cputime_scaled(struct task_struct *t, in task_cputime_scaled() argument 35 *utimescaled = t->utimescaled; in task_cputime_scaled() 36 *stimescaled = t->stimescaled; in task_cputime_scaled() [all …]
|
| A D | task.h | 116 refcount_inc(&t->usage); in get_task_struct() 117 return t; in get_task_struct() 122 return refcount_inc_not_zero(&t->usage) ? t : NULL; in tryget_task_struct() 125 extern void __put_task_struct(struct task_struct *t); 128 static inline void put_task_struct(struct task_struct *t) in put_task_struct() argument 130 if (!refcount_dec_and_test(&t->usage)) in put_task_struct() 159 call_rcu(&t->rcu, __put_task_struct_rcu_cb); in put_task_struct() 164 static inline void put_task_struct_many(struct task_struct *t, int nr) in DEFINE_FREE() 166 if (refcount_sub_and_test(nr, &t->usage)) in DEFINE_FREE() 167 __put_task_struct(t); in DEFINE_FREE() [all …]
|
| /include/net/sctp/ |
| A D | sctp.h | 150 struct sctp_transport *t); 561 if (t->dst && !dst_check(t->dst, t->dst_cookie)) in sctp_transport_dst_check() 564 return t->dst; in sctp_transport_dst_check() 578 if (sp->udp_port && (!t || t->encap_port)) in __sctp_mtu_payload() 606 if (t->pathmtu == pmtu) in sctp_transport_pmtu_check() 609 t->pathmtu = pmtu; in sctp_transport_pmtu_check() 621 return __sctp_mtu_payload(sctp_sk(t->asoc->base.sk), t, 0, 0) - in sctp_transport_pl_hlen() 627 if (t->probe_interval && (t->param_flags & SPP_PMTUD_ENABLE) && in sctp_transport_pl_reset() 628 (t->state == SCTP_ACTIVE || t->state == SCTP_UNKNOWN)) { in sctp_transport_pl_reset() 638 sctp_transport_put(t); in sctp_transport_pl_reset() [all …]
|
| /include/crypto/ |
| A D | gf128mul.h | 221 be128 t[256]; member 225 void gf128mul_4k_lle(be128 *a, const struct gf128mul_4k *t); 227 static inline void gf128mul_free_4k(struct gf128mul_4k *t) in gf128mul_free_4k() argument 229 kfree_sensitive(t); in gf128mul_free_4k() 236 struct gf128mul_4k *t[16]; member 245 void gf128mul_free_64k(struct gf128mul_64k *t); 246 void gf128mul_64k_bbe(be128 *a, const struct gf128mul_64k *t);
|
| /include/net/tc_act/ |
| A D | tc_tunnel_key.h | 30 struct tcf_tunnel_key *t = to_tunnel_key(a); in is_tcf_tunnel_set() local 33 params = rcu_dereference_protected(t->params, in is_tcf_tunnel_set() 44 struct tcf_tunnel_key *t = to_tunnel_key(a); in is_tcf_tunnel_release() local 47 params = rcu_dereference_protected(t->params, in is_tcf_tunnel_release() 58 struct tcf_tunnel_key *t = to_tunnel_key(a); in tcf_tunnel_info() local 61 params = rcu_dereference_protected(t->params, in tcf_tunnel_info()
|
| /include/media/ |
| A D | v4l2-dv-timings.h | 23 struct v4l2_fract v4l2_calc_timeperframe(const struct v4l2_dv_timings *t); 38 typedef bool v4l2_check_dv_timings_fnc(const struct v4l2_dv_timings *t, void *handle); 52 bool v4l2_valid_dv_timings(const struct v4l2_dv_timings *t, 73 int v4l2_enum_dv_timings_cap(struct v4l2_enum_dv_timings *t, 96 bool v4l2_find_dv_timings_cap(struct v4l2_dv_timings *t, 110 bool v4l2_find_dv_timings_cea861_vic(struct v4l2_dv_timings *t, u8 vic); 136 const struct v4l2_dv_timings *t, bool detailed); 209 struct v4l2_fract v4l2_dv_timings_aspect_ratio(const struct v4l2_dv_timings *t);
|