| /linux/drivers/gpu/drm/i915/gvt/ |
| A D | scheduler.c | 189 workload->engine->name, workload->ctx_desc.lrca, in populate_shadow_context() 497 workload); in intel_gvt_scan_and_shadow_workload() 782 ret = workload->prepare(workload); in prepare_workload() 804 workload->engine->name, workload); in dispatch_workload() 834 workload->engine->name, workload->req); in dispatch_workload() 1110 ring_id, workload, workload->status); in complete_current_workload() 1133 workload->complete(workload); in complete_current_workload() 1167 if (workload) in workload_thread() 1211 workload, workload->status); in workload_thread() 1546 if (!workload) in alloc_workload() [all …]
|
| A D | execlist.c | 372 struct intel_vgpu *vgpu = workload->vgpu; in prepare_execlist_workload() 377 if (!workload->emulate_schedule_in) in prepare_execlist_workload() 394 struct intel_vgpu *vgpu = workload->vgpu; in complete_execlist_workload() 397 &s->execlist[workload->engine->id]; in complete_execlist_workload() 404 workload, workload->status); in complete_execlist_workload() 406 if (workload->status || vgpu->resetting_eng & workload->engine->mask) in complete_execlist_workload() 414 this_desc = &workload->ctx_desc; in complete_execlist_workload() 436 struct intel_vgpu_workload *workload = NULL; in submit_context() local 439 if (IS_ERR(workload)) in submit_context() 440 return PTR_ERR(workload); in submit_context() [all …]
|
| A D | cmd_parser.c | 2840 gma_head = workload->rb_start + workload->rb_head; in scan_workload() 2841 gma_tail = workload->rb_start + workload->rb_tail; in scan_workload() 2853 s.workload = workload; in scan_workload() 2863 ret = command_scan(&s, workload->rb_head, workload->rb_tail, in scan_workload() 2864 workload->rb_start, _RING_CTL_BUF_SIZE(workload->rb_ctl)); in scan_workload() 2901 s.workload = workload; in scan_wa_ctx() 2925 workload->rb_len = (workload->rb_tail + guest_rb_size - in shadow_workload_ring_buffer() 2928 gma_head = workload->rb_start + workload->rb_head; in shadow_workload_ring_buffer() 2929 gma_tail = workload->rb_start + workload->rb_tail; in shadow_workload_ring_buffer() 2943 s->ring_scan_buffer_size[workload->engine->id] = workload->rb_len; in shadow_workload_ring_buffer() [all …]
|
| A D | trace.h | 231 void *workload, const char *cmd_name), 234 buf_addr_type, workload, cmd_name), 243 __field(void*, workload) 255 __entry->workload = workload; 271 __entry->workload)
|
| A D | cmd_parser.h | 50 int intel_gvt_scan_and_shadow_ringbuffer(struct intel_vgpu_workload *workload); 56 int intel_gvt_scan_engine_context(struct intel_vgpu_workload *workload);
|
| A D | scheduler.h | 139 void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload); 166 void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload);
|
| /linux/tools/perf/tests/ |
| A D | perf-record.c | 114 err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask); in test__PERF_RECORD() 126 if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, &cpu_mask) < 0) { in test__PERF_RECORD() 212 if ((pid_t)sample.pid != evlist->workload.pid) { in test__PERF_RECORD() 214 name, evlist->workload.pid, sample.pid); in test__PERF_RECORD() 218 if ((pid_t)sample.tid != evlist->workload.pid) { in test__PERF_RECORD() 220 name, evlist->workload.pid, sample.tid); in test__PERF_RECORD() 229 (pid_t)event->comm.pid != evlist->workload.pid) { in test__PERF_RECORD()
|
| /linux/tools/perf/Documentation/ |
| A D | perf-sched.txt | 18 of an arbitrary workload. 21 and other scheduling properties of the workload. 23 'perf sched script' to see a detailed trace of the workload that 26 'perf sched replay' to simulate the workload that was recorded 28 that mimic the workload based on the events in the trace. These 30 of the workload as it occurred when it was recorded - and can repeat 34 workload captured via perf sched record. Columns stand for
|
| A D | perf-timechart.txt | 6 perf-timechart - Tool to visualize total system behavior during a workload 18 of an arbitrary workload. By default timechart records only scheduler
|
| /linux/Documentation/admin-guide/mm/ |
| A D | idle_page_tracking.rst | 11 accessed by a workload and which are idle. This information can be useful for 12 estimating the workload's working set size, which, in turn, can be taken into 13 account when configuring the workload parameters, setting memory cgroup limits, 14 or deciding where to place the workload within a compute cluster. 53 workload one should: 55 1. Mark all the workload's pages as idle by setting corresponding bits in 57 ``/proc/pid/pagemap`` if the workload is represented by a process, or by 58 filtering out alien pages using ``/proc/kpagecgroup`` in case the workload 61 2. Wait until the workload accesses its working set.
|
| /linux/tools/perf/bench/ |
| A D | find-bit-bench.c | 34 static noinline void workload(int val) in workload() function 80 workload(bit); in do_for_each_set_bit() 93 workload(bit); in do_for_each_set_bit()
|
| /linux/Documentation/filesystems/nfs/ |
| A D | knfsd-stats.rst | 54 Depending on the NFS workload patterns and various network stack 58 However this is a more accurate and less workload-dependent measure 74 pool for the NFS workload (the workload is thread-limited), in which 76 performance of the NFS workload. 93 threads configured than can be used by the NFS workload. This is 99 slow; the idle timeout is 60 minutes. Unless the NFS workload
|
| /linux/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ |
| A D | pp_psm.c | 270 long workload; in psm_adjust_power_state_dynamic() local 295 workload = hwmgr->workload_setting[index]; in psm_adjust_power_state_dynamic() 297 if (hwmgr->power_profile_mode != workload && hwmgr->hwmgr_func->set_power_profile_mode) in psm_adjust_power_state_dynamic() 298 hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0); in psm_adjust_power_state_dynamic()
|
| /linux/Documentation/scheduler/ |
| A D | sched-capacity.rst | 72 With a workload that periodically does a fixed amount of work, you will get an 103 Executing the same workload as described in 1.3.1, which each CPU running at its 111 workload on CPU1 151 One issue that needs to be taken into account is that a workload's duty cycle is 153 periodic workload at a given frequency F:: 162 Now, consider running the *same* workload at frequency F/2:: 184 identical workload on CPUs of different capacity values will yield different 192 Executing a given periodic workload on each CPU at their maximum frequency would 383 workload on CPU0 390 workload on CPU1 [all …]
|
| /linux/Documentation/admin-guide/mm/damon/ |
| A D | start.rst | 57 with your real workload. The last line asks ``damo`` to record the access 130 accessed for >=60 seconds in your workload to be swapped out. :: 134 $ damo schemes -c test_scheme <pid of your workload>
|
| /linux/Documentation/admin-guide/pm/ |
| A D | intel-speed-select.rst | 10 variety of diverse workload requirements. 82 This feature allows configuration of a server dynamically based on workload 216 workload, disable turbo:: 220 Then runs a busy workload on all CPUs, for example:: 518 the user control base frequency. If some critical workload threads demand 546 Before enabling Intel(R) SST-BF and measuring its impact on a workload 547 performance, execute some workload and measure performance and get a baseline 566 Below, the workload is measuring average scheduler wakeup latency, so a lower 666 With this configuration, if the same workload is executed by pinning the 667 workload to high priority CPUs (CPU 5 and 6 in this case):: [all …]
|
| /linux/Documentation/timers/ |
| A D | no_hz.rst | 52 However, if you are instead running a light workload with long idle 59 In addition, if you are running either a real-time workload or an HPC 60 workload with short iterations, the scheduling-clock interrupts can 61 degrade your applications performance. If this describes your workload, 210 but do not see any change in your workload's behavior. Is this because 211 your workload isn't affected that much by OS jitter, or is it because 222 possible, then you can conclude that your workload is not all that 298 constraining the workload. For example, the only way to eliminate
|
| /linux/tools/perf/util/ |
| A D | evlist.c | 69 evlist->workload.pid = -1; in evlist__init() 1377 evlist->workload.pid = fork(); in evlist__prepare_workload() 1378 if (evlist->workload.pid < 0) { in evlist__prepare_workload() 1383 if (!evlist->workload.pid) { in evlist__prepare_workload() 1454 perf_thread_map__set_pid(evlist->core.threads, 0, evlist->workload.pid); in evlist__prepare_workload() 1468 evlist->workload.cork_fd = go_pipe[1]; in evlist__prepare_workload() 1483 if (evlist->workload.cork_fd > 0) { in evlist__start_workload() 1489 ret = write(evlist->workload.cork_fd, &bf, 1); in evlist__start_workload() 1493 close(evlist->workload.cork_fd); in evlist__start_workload()
|
| /linux/Documentation/accounting/ |
| A D | psi.rst | 24 hardware according to workload demand. 32 workload health or risking major disruptions such as OOM kills. 54 actual CPU cycles are going to waste, and a workload that spends
|
| /linux/tools/lib/perf/Documentation/ |
| A D | libperf-counting.txt | 35 * does some workload 158 From this moment events are counting and we can do our workload.
|
| /linux/security/ |
| A D | Kconfig.hardening | 42 on the function calling complexity of a given workload's 170 are advised to test this feature on your expected workload before 218 workload, but most cases see <1% impact. Some synthetic 233 The performance impact varies by workload, but is more expensive
|
| /linux/Documentation/admin-guide/ |
| A D | kernel-per-CPU-kthreads.rst | 31 # run workload 230 1. Run your workload at a real-time priority, which will allow 270 slowly. Of course, you can also run your workload at 272 but if your workload is CPU-bound, this is a bad idea. 308 is feasible only if your workload never requires RCU priority
|
| /linux/Documentation/block/ |
| A D | bfq-iosched.rst | 84 Regardless of the actual background workload, BFQ guarantees that, for 100 until the background workload terminates (also on SSDs). 106 of the background I/O workload. As a consequence, these applications 107 do not suffer from almost any glitch due to the background workload. 112 If some additional workload happens to be executed in parallel, then 131 workload and regardless of the device parameters. From these bandwidth 248 workload and the budgets assigned to the queue. 347 So depending on storage and workload, it might be useful to set 370 throughput. One important case is random workload. Because of this
|
| /linux/Documentation/driver-api/ |
| A D | dma-buf.rst | 286 means any workload using recoverable page faults cannot use DMA fences for 300 job with a DMA fence and a compute workload using recoverable page faults are 303 - The 3D workload might need to wait for the compute job to finish and release 306 - The compute workload might be stuck in a page fault, because the memory 307 allocation is waiting for the DMA fence of the 3D workload to complete.
|
| /linux/block/ |
| A D | Kconfig.iosched | 24 regardless of the device parameters and with any workload. It
|