Home
last modified time | relevance | path

Searched refs:cnt (Results 1 – 25 of 58) sorted by relevance

123

/kernel/
A Dsoftirq.c122 int cnt; member
180 newcnt = __this_cpu_add_return(softirq_ctrl.cnt, cnt); in __local_bh_disable_ip()
209 newcnt = __this_cpu_sub_return(softirq_ctrl.cnt, cnt); in __local_bh_enable()
237 if (curcnt != cnt) in __local_bh_enable_ip()
257 cnt = SOFTIRQ_OFFSET; in __local_bh_enable_ip()
258 __local_bh_enable(cnt, false); in __local_bh_enable_ip()
342 __preempt_count_add(cnt); in __local_bh_disable_ip()
350 if (preempt_count() == cnt) { in __local_bh_disable_ip()
364 if (preempt_count() == cnt) in __local_bh_enable()
370 __preempt_count_sub(cnt); in __local_bh_enable()
[all …]
/kernel/bpf/
A Dmemalloc.c226 if (i >= cnt) in alloc_bulk()
235 if (i >= cnt) in alloc_bulk()
266 int cnt = 0; in free_all() local
270 cnt++; in free_all()
272 return cnt; in free_all()
338 int cnt; in free_bulk() local
349 cnt = 0; in free_bulk()
430 int cnt; in bpf_mem_refill() local
490 int cnt = 1; in prefill_mem_cache() local
498 cnt = 4; in prefill_mem_cache()
[all …]
A Drqspinlock.c103 int cnt = min(RES_NR_HELD, rqh->cnt); in check_deadlock_AA() local
109 for (int i = 0; i < cnt - 1; i++) { in check_deadlock_AA()
125 int rqh_cnt = min(RES_NR_HELD, rqh->cnt); in check_deadlock_ABBA()
138 int real_cnt = READ_ONCE(rqh_cpu->cnt); in check_deadlock_ABBA()
139 int cnt = min(RES_NR_HELD, real_cnt); in check_deadlock_ABBA() local
165 remote_lock = READ_ONCE(rqh_cpu->locks[cnt - 1]); in check_deadlock_ABBA()
179 for (int i = 0; i < cnt - 1; i++) { in check_deadlock_ABBA()
369 int cnt = _Q_PENDING_LOOPS; in resilient_queued_spin_lock_slowpath() local
371 (VAL != _Q_PENDING_VAL) || !cnt--); in resilient_queued_spin_lock_slowpath()
681 bpf_stream_printk(ss, "Total held locks = %d\n", rqh->cnt); in bpf_prog_report_rqspinlock_violation()
[all …]
A Dnet_namespace.c108 int cnt, idx; in bpf_netns_link_release() local
127 cnt = link_count(net, type); in bpf_netns_link_release()
128 if (!cnt) { in bpf_netns_link_release()
135 new_array = bpf_prog_array_alloc(cnt, GFP_KERNEL); in bpf_netns_link_release()
426 int cnt, err; in netns_bpf_link_attach() local
430 cnt = link_count(net, type); in netns_bpf_link_attach()
431 if (cnt >= netns_bpf_max_progs(type)) { in netns_bpf_link_attach()
455 run_array = bpf_prog_array_alloc(cnt + 1, GFP_KERNEL); in netns_bpf_link_attach()
A Dbtf.c243 u32 cnt; member
248 u32 cnt; member
3975 cnt = ret; in btf_parse_fields()
4057 rec->cnt++; in btf_parse_fields()
7590 u16 cnt; member
8637 for (i = set->cnt; i < set->cnt + add_set->cnt; i++) in btf_populate_kfunc_set()
8640 set->cnt += add_set->cnt; in btf_populate_kfunc_set()
9504 u32 cnt; in bpf_struct_ops_find_value() local
9511 cnt = btf->struct_ops_tab->cnt; in bpf_struct_ops_find_value()
9526 u32 cnt; in bpf_struct_ops_find() local
[all …]
A Dverifier.c3600 cnt++; in mark_reg_read()
3900 int cnt; member
3982 cnt++; in push_jmp_history()
4033 if (cnt && st->jmp_history[cnt - 1].idx == i) { in get_prev_insn_idx()
21222 cnt = 0; in convert_ctx_accesses()
21446 if (cnt == 0 || cnt >= INSN_BUF_SIZE || in convert_ctx_accesses()
21866 *cnt = 4; in __fixup_collection_insert_kfunc()
21879 *cnt = 0; in fixup_kfunc_call()
21996 info[cnt + 1].start = info[cnt].start; in add_hidden_subprog()
22213 if (cnt == 0 || cnt >= INSN_BUF_SIZE) { in do_misc_fixups()
[all …]
A Dcgroup.c388 u32 cnt = 0; in prog_list_length() local
395 cnt++; in prog_list_length()
397 return cnt; in prog_list_length()
414 u32 cnt; in hierarchy_allows_attach() local
420 if (cnt == 1) in hierarchy_allows_attach()
455 cnt = 0; in compute_effective_progs()
478 cnt++; in compute_effective_progs()
1221 int cnt, ret = 0, i; in __cgroup_bpf_query() local
1294 if (++i == cnt) in __cgroup_bpf_query()
1309 prog_ids += cnt; in __cgroup_bpf_query()
[all …]
A Dkmem_cache_iter.c117 loff_t cnt = 0; in kmem_cache_iter_seq_start() local
131 if (cnt == *pos) { in kmem_cache_iter_seq_start()
141 cnt++; in kmem_cache_iter_seq_start()
/kernel/trace/
A Dtrace_kdb.c27 int cnt = 0, cpu; in ftrace_dump_buf() local
59 if (!cnt) in ftrace_dump_buf()
61 cnt++; in ftrace_dump_buf()
74 if (!cnt) in ftrace_dump_buf()
98 int cnt; in kdb_ftdump() local
125 cnt = trace_total_entries(NULL); in kdb_ftdump()
127 cnt = trace_total_entries_cpu(NULL, cpu_file); in kdb_ftdump()
128 skip_entries = max(cnt + skip_entries, 0); in kdb_ftdump()
A Dtrace.c854 cnt -= ret; in trace_pid_write()
1822 cnt--; in trace_get_user()
1835 cnt--; in trace_get_user()
1858 cnt--; in trace_get_user()
1888 cnt = len; in trace_seq_to_buffer()
1892 return cnt; in trace_seq_to_buffer()
9234 int cnt; in create_trace_option_files() local
9260 for (cnt = 0; opts[cnt].name; cnt++) in create_trace_option_files()
9279 for (cnt = 0; opts[cnt].name; cnt++) { in create_trace_option_files()
10624 cnt++; in ftrace_dump_one()
[all …]
A Dbpf_trace.c2465 u32 cnt; member
2925 cnt = attr->link_create.kprobe_multi.cnt; in bpf_kprobe_multi_link_attach()
2926 if (!cnt) in bpf_kprobe_multi_link_attach()
3003 link->cnt = cnt; in bpf_kprobe_multi_link_attach()
3072 u32 cnt; member
3090 if (cnt) in bpf_uprobe_unregister()
3361 cnt = attr->link_create.uprobe_multi.cnt; in bpf_uprobe_multi_link_attach()
3436 link->cnt = cnt; in bpf_uprobe_multi_link_attach()
3556 int cnt, err; in __bpf_dynptr_copy_str() local
3574 if (cnt < 0) in __bpf_dynptr_copy_str()
[all …]
A Dtrace_events.c1503 if (!cnt) in ftrace_event_write()
1836 *ppos += cnt; in event_enable_write()
1838 return cnt; in event_enable_write()
1936 ret = cnt; in system_enable_write()
1939 *ppos += cnt; in system_enable_write()
2144 if (cnt >= PAGE_SIZE) in event_filter_write()
2165 *ppos += cnt; in event_filter_write()
2167 return cnt; in event_filter_write()
2309 *ppos += cnt; in subsystem_filter_write()
2311 return cnt; in subsystem_filter_write()
[all …]
A Dtrace_mmiotrace.c122 unsigned long cnt = atomic_xchg(&dropped_count, 0); in count_overruns() local
126 cnt += over - prev_overruns; in count_overruns()
128 return cnt; in count_overruns()
132 char __user *ubuf, size_t cnt, loff_t *ppos) in mmio_read() argument
161 ret = trace_seq_to_user(s, ubuf, cnt); in mmio_read()
A Dtrace_selftest.c66 unsigned long flags, cnt = 0; in trace_test_buffer() local
73 cnt = ring_buffer_entries(buf->buffer); in trace_test_buffer()
93 *count = cnt; in trace_test_buffer()
183 static int trace_selftest_ops(struct trace_array *tr, int cnt) in trace_selftest_ops() argument
194 pr_info("Testing dynamic ftrace ops #%d: ", cnt); in trace_selftest_ops()
219 if (cnt > 1) { in trace_selftest_ops()
234 if (cnt > 1) { in trace_selftest_ops()
273 if (cnt > 1) { in trace_selftest_ops()
307 if (cnt > 1) { in trace_selftest_ops()
335 if (cnt > 1) in trace_selftest_ops()
A Dtrace_sched_switch.c599 size_t cnt, loff_t *ppos) in tracing_saved_cmdlines_size_read() argument
610 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); in tracing_saved_cmdlines_size_read()
639 size_t cnt, loff_t *ppos) in tracing_saved_cmdlines_size_write() argument
644 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); in tracing_saved_cmdlines_size_write()
656 *ppos += cnt; in tracing_saved_cmdlines_size_write()
658 return cnt; in tracing_saved_cmdlines_size_write()
A Dftrace.c960 return cnt; in ftrace_profile_write()
3802 int cnt; in ftrace_allocate_records() local
3831 return cnt; in ftrace_allocate_records()
3855 int cnt; in ftrace_allocate_pages() local
5751 if (!cnt) in ftrace_regex_write()
6962 if (!cnt) in ftrace_graph_write()
7357 cnt++; in referenced_filters()
7527 int cnt; in ftrace_module_enable() local
7544 cnt = 0; in ftrace_module_enable()
8444 if (!cnt) in pid_write()
[all …]
A Dring_buffer_benchmark.c241 int cnt = 0; in ring_buffer_producer() local
268 cnt++; in ring_buffer_producer()
269 if (consumer && !(cnt % wakeup_interval)) in ring_buffer_producer()
282 if (cnt % wakeup_interval) in ring_buffer_producer()
A Dtrace_hwlat.c678 size_t cnt, loff_t *ppos) in hwlat_mode_write() argument
685 if (cnt >= sizeof(buf)) in hwlat_mode_write()
688 if (copy_from_user(buf, ubuf, cnt)) in hwlat_mode_write()
691 buf[cnt] = 0; in hwlat_mode_write()
710 ret = cnt; in hwlat_mode_write()
720 *ppos += cnt; in hwlat_mode_write()
A Dtrace_events_inject.c284 event_inject_write(struct file *filp, const char __user *ubuf, size_t cnt, in event_inject_write() argument
293 if (cnt >= PAGE_SIZE) in event_inject_write()
296 buf = memdup_user_nul(ubuf, cnt); in event_inject_write()
320 return cnt; in event_inject_write()
/kernel/cgroup/
A Ddmem.c72 struct page_counter cnt; member
115 page_counter_set_min(&pool->cnt, val); in set_resource_min()
121 page_counter_set_low(&pool->cnt, val); in set_resource_low()
127 page_counter_set_max(&pool->cnt, val); in set_resource_max()
212 if (!pool->cnt.parent) in pool_parent()
215 return container_of(pool->cnt.parent, typeof(*pool), cnt); in pool_parent()
227 climit = &limit_pool->cnt; in dmem_cgroup_calculate_protection()
245 climit, &found_pool->cnt, true); in dmem_cgroup_calculate_protection()
297 ctest = &test_pool->cnt; in dmem_cgroup_state_evict_valuable()
341 page_counter_init(&pool->cnt, in alloc_pool_single()
[all …]
A Dcpuset-v1.c67 fmp->cnt = 0; in fmeter_init()
90 fmp->val += ((FM_SCALE - FM_COEF) * fmp->cnt) / FM_SCALE; in fmeter_update()
91 fmp->cnt = 0; in fmeter_update()
99 fmp->cnt = min(FM_MAXCNT, fmp->cnt + FM_SCALE); in fmeter_markevent()
/kernel/sched/
A Ddebug.c135 if (cnt > 63) in sched_feat_write()
136 cnt = 63; in sched_feat_write()
141 buf[cnt] = 0; in sched_feat_write()
154 *ppos += cnt; in sched_feat_write()
156 return cnt; in sched_feat_write()
178 if (cnt > 15) in sched_scaling_write()
179 cnt = 15; in sched_scaling_write()
196 return cnt; in sched_scaling_write()
227 cnt = 15; in sched_dynamic_write()
241 return cnt; in sched_dynamic_write()
[all …]
/kernel/unwind/
A Ddeferred.c24 static inline bool try_assign_cnt(struct unwind_task_info *info, u32 cnt) in try_assign_cnt() argument
28 return try_cmpxchg(&info->id.cnt, &old, cnt); in try_assign_cnt()
33 static inline bool try_assign_cnt(struct unwind_task_info *info, u32 cnt) in try_assign_cnt() argument
35 info->id.cnt = cnt; in try_assign_cnt()
80 u32 cnt = 1; in get_cookie() local
86 cnt |= __this_cpu_read(unwind_ctx_ctr) + 2; in get_cookie()
87 if (try_assign_cnt(info, cnt)) { in get_cookie()
89 __this_cpu_write(unwind_ctx_ctr, cnt); in get_cookie()
/kernel/irq/
A Dirqdesc.c178 if (mas_empty_area(&mas, from, MAX_SPARSE_IRQS, cnt)) in irq_find_free_area()
505 for (i = 0; i < cnt; i++) { in alloc_descs()
511 for (i = 0; i < cnt; i++) { in alloc_descs()
636 for (i = 0; i < cnt; i++) { in alloc_descs()
789 if (from >= nr_irqs || (from + cnt) > nr_irqs) in irq_free_descs()
793 for (i = 0; i < cnt; i++) in irq_free_descs()
816 if (!cnt) in __irq_alloc_descs()
834 start = irq_find_free_area(from, cnt); in __irq_alloc_descs()
838 if (start + cnt > nr_irqs) { in __irq_alloc_descs()
839 if (!irq_expand_nr_irqs(start + cnt)) in __irq_alloc_descs()
[all …]
A Ddevres.c151 unsigned int cnt; member
158 irq_free_descs(this->from, this->cnt); in devm_irq_desc_release()
179 unsigned int cnt, int node, struct module *owner, in __devm_irq_alloc_descs() argument
189 base = __irq_alloc_descs(irq, from, cnt, node, owner, affinity); in __devm_irq_alloc_descs()
196 dr->cnt = cnt; in __devm_irq_alloc_descs()

Completed in 155 milliseconds

123