| /drivers/gpu/drm/i915/gt/ |
| A D | selftest_slpc.c | 499 struct slpc_thread *threads; in live_slpc_tile_interaction() local 502 threads = kcalloc(I915_MAX_GT, sizeof(*threads), GFP_KERNEL); in live_slpc_tile_interaction() 503 if (!threads) in live_slpc_tile_interaction() 509 if (IS_ERR(threads[i].worker)) { in live_slpc_tile_interaction() 510 ret = PTR_ERR(threads[i].worker); in live_slpc_tile_interaction() 514 threads[i].gt = gt; in live_slpc_tile_interaction() 516 kthread_queue_work(threads[i].worker, &threads[i].work); in live_slpc_tile_interaction() 522 if (IS_ERR_OR_NULL(threads[i].worker)) in live_slpc_tile_interaction() 525 kthread_flush_work(&threads[i].work); in live_slpc_tile_interaction() 526 status = READ_ONCE(threads[i].result); in live_slpc_tile_interaction() [all …]
|
| A D | selftest_hangcheck.c | 968 struct active_engine *threads; in __igt_reset_engines() local 989 threads = kmalloc_array(I915_NUM_ENGINES, sizeof(*threads), GFP_KERNEL); in __igt_reset_engines() 990 if (!threads) in __igt_reset_engines() 1012 memset(threads, 0, sizeof(*threads) * I915_NUM_ENGINES); in __igt_reset_engines() 1016 threads[tmp].resets = in __igt_reset_engines() 1025 threads[tmp].engine = other; in __igt_reset_engines() 1026 threads[tmp].flags = flags; in __igt_reset_engines() 1041 &threads[tmp].work); in __igt_reset_engines() 1191 if (!threads[tmp].worker) in __igt_reset_engines() 1209 threads[tmp].resets != in __igt_reset_engines() [all …]
|
| A D | gen7_renderclear.c | 266 u32 threads = bv->max_threads - 1; in gen7_emit_vfe_state() local 275 *cs++ = threads << 16 | 1 << 8 | mode << 2; in gen7_emit_vfe_state()
|
| /drivers/firmware/psci/ |
| A D | psci_checker.c | 370 struct task_struct **threads; in suspend_tests() local 373 threads = kmalloc_array(nb_available_cpus, sizeof(*threads), in suspend_tests() 375 if (!threads) in suspend_tests() 405 threads[nb_threads++] = thread; in suspend_tests() 421 wake_up_process(threads[i]); in suspend_tests() 429 err += kthread_park(threads[i]); in suspend_tests() 430 err += kthread_stop(threads[i]); in suspend_tests() 434 kfree(threads); in suspend_tests()
|
| /drivers/gpu/drm/i915/selftests/ |
| A D | i915_request.c | 474 threads = kcalloc(ncpus, sizeof(*threads), GFP_KERNEL); in mock_breadcrumbs_smoketest() 475 if (!threads) in mock_breadcrumbs_smoketest() 503 threads[n].t = &t; in mock_breadcrumbs_smoketest() 505 threads[n].result = 0; in mock_breadcrumbs_smoketest() 538 kfree(threads); in mock_breadcrumbs_smoketest() 1628 threads = kcalloc(nengines, sizeof(*threads), GFP_KERNEL); in live_parallel_engines() 1629 if (!threads) in live_parallel_engines() 1683 kfree(threads); in live_parallel_engines() 1762 threads = kcalloc(ncpus * nengines, sizeof(*threads), GFP_KERNEL); in live_breadcrumbs_smoketest() 1763 if (!threads) { in live_breadcrumbs_smoketest() [all …]
|
| /drivers/dma-buf/ |
| A D | st-dma-fence-chain.c | 446 struct task_struct **threads; in find_race() local 455 threads = kmalloc_array(ncpus, sizeof(*threads), GFP_KERNEL); in find_race() 456 if (!threads) { in find_race() 463 threads[i] = kthread_run(__find_race, &data, "dmabuf/%d", i); in find_race() 464 if (IS_ERR(threads[i])) { in find_race() 469 get_task_struct(threads[i]); in find_race() 479 ret = kthread_stop_put(threads[i]); in find_race() 483 kfree(threads); in find_race()
|
| /drivers/acpi/acpica/ |
| A D | dbexec.c | 533 if (info->threads && (info->num_created < info->num_threads)) { in acpi_db_method_thread() 534 info->threads[info->num_created++] = acpi_os_get_thread_id(); in acpi_db_method_thread() 787 acpi_gbl_db_method_info.threads = acpi_os_allocate(size); in acpi_db_create_execution_threads() 788 if (acpi_gbl_db_method_info.threads == NULL) { in acpi_db_create_execution_threads() 795 memset(acpi_gbl_db_method_info.threads, 0, size); in acpi_db_create_execution_threads() 873 acpi_os_free(acpi_gbl_db_method_info.threads); in acpi_db_create_execution_threads() 874 acpi_gbl_db_method_info.threads = NULL; in acpi_db_create_execution_threads()
|
| A D | aclocal.h | 1200 acpi_thread_id *threads; member
|
| /drivers/md/dm-vdo/ |
| A D | vdo.c | 412 struct vdo_thread *thread = &vdo->threads[thread_id]; in vdo_make_thread() 556 struct vdo_thread, __func__, &vdo->threads); in vdo_make() 625 if (vdo->threads == NULL) in finish_vdo() 632 vdo_finish_work_queue(vdo->threads[i].queue); in finish_vdo() 702 if (vdo->threads != NULL) { in vdo_destroy() 704 free_listeners(&vdo->threads[i]); in vdo_destroy() 707 vdo_free(vdo_forget(vdo->threads)); in vdo_destroy() 988 struct vdo_thread *thread = &vdo->threads[thread_id]; in vdo_register_read_only_listener() 1056 vdo->threads[id].is_read_only = is_read_only; in vdo_enable_read_only_entry() 1247 thread = &vdo->threads[thread_id]; in vdo_enter_read_only_mode() [all …]
|
| A D | dump.c | 69 if (((dump_options_requested & FLAG_SHOW_QUEUES) != 0) && (vdo->threads != NULL)) { in do_dump() 73 vdo_dump_work_queue(vdo->threads[id].queue); in do_dump()
|
| A D | completion.c | 123 vdo_enqueue_work_queue(vdo->threads[thread_id].queue, completion); in vdo_enqueue_completion()
|
| A D | vdo.h | 168 struct vdo_thread *threads; member
|
| A D | io-submitter.c | 433 bio_queue_data->queue = vdo->threads[vdo->thread_config.bio_threads[i]].queue; in vdo_make_io_submitter()
|
| /drivers/dma/ |
| A D | dmatest.c | 246 struct list_head threads; member 259 list_for_each_entry(thread, &dtc->threads, node) { in is_threaded_test_run() 275 list_for_each_entry(thread, &dtc->threads, node) { in is_threaded_test_pending() 954 list_for_each_entry_safe(thread, _thread, &dtc->threads, node) { in dmatest_cleanup_channel() 1013 list_add_tail(&thread->node, &dtc->threads); in dmatest_add_threads() 1035 INIT_LIST_HEAD(&dtc->threads); in dmatest_add_channel() 1141 list_for_each_entry(thread, &dtc->threads, node) { in run_pending_tests() 1329 list_for_each_entry(thread, &dtc->threads, node) { in dmatest_test_list_get()
|
| /drivers/ntb/test/ |
| A D | ntb_perf.c | 201 struct perf_thread threads[MAX_THREADS_CNT]; member 1072 wake_up(&perf->threads[tidx].dma_wait); in perf_terminate_test() 1073 cancel_work_sync(&perf->threads[tidx].work); in perf_terminate_test() 1094 pthr = &perf->threads[tidx]; in perf_submit_test() 1128 pthr = &perf->threads[tidx]; in perf_read_stats() 1160 pthr = &perf->threads[tidx]; in perf_init_threads()
|
| /drivers/android/ |
| A D | binder_internal.h | 419 struct rb_root threads; member
|
| A D | binder.c | 1566 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) && in binder_proc_dec_tmpref() 5177 struct rb_node **p = &proc->threads.rb_node; in binder_get_thread_ilocked() 5200 rb_insert_color(&thread->rb_node, &proc->threads); in binder_get_thread_ilocked() 5284 rb_erase(&thread->rb_node, &proc->threads); in binder_thread_release() 5541 for (n = rb_first(&proc->threads); n; n = rb_next(n)) { in binder_txns_pending_ilocked() 6216 threads = 0; in binder_deferred_release() 6218 while ((n = rb_first(&proc->threads))) { in binder_deferred_release() 6223 threads++; in binder_deferred_release() 6268 __func__, proc->pid, threads, nodes, incoming_refs, in binder_deferred_release() 6560 for (n = rb_first(&proc->threads); n; n = rb_next(n)) in print_binder_proc() [all …]
|
| /drivers/cpufreq/ |
| A D | Kconfig | 239 which gets used as a hint to schedule vCPU threads and select CPU
|
| /drivers/s390/net/ |
| A D | qeth_core.h | 1043 void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
|
| A D | qeth_core_main.c | 161 void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads, in qeth_set_allowed_threads() argument 167 card->thread_allowed_mask = threads; in qeth_set_allowed_threads() 169 card->thread_start_mask &= threads; in qeth_set_allowed_threads() 175 int qeth_threads_running(struct qeth_card *card, unsigned long threads) in qeth_threads_running() argument 181 rc = (card->thread_running_mask & threads); in qeth_threads_running()
|
| /drivers/char/ |
| A D | Kconfig | 118 of threads across a large system which avoids bouncing a cacheline
|
| /drivers/scsi/aic7xxx/ |
| A D | aic79xx.reg | 3724 * The execution head pointer threads the head SCBs for
|