/linux-6.3-rc2/kernel/irq/ |
A D | cpuhotplug.c | 58 const struct cpumask *affinity; in migrate_one_irq() local 105 affinity = irq_desc_get_pending_mask(desc); in migrate_one_irq() 107 affinity = irq_data_get_affinity_mask(d); in migrate_one_irq() 113 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { in migrate_one_irq() 123 affinity = cpu_online_mask; in migrate_one_irq() 132 err = irq_do_set_affinity(d, affinity, false); in migrate_one_irq() 192 const struct cpumask *affinity = irq_data_get_affinity_mask(data); in irq_restore_affinity_of_irq() local 195 !irq_data_get_irq_chip(data) || !cpumask_test_cpu(cpu, affinity)) in irq_restore_affinity_of_irq() 211 irq_set_affinity_locked(data, affinity, false); in irq_restore_affinity_of_irq()
|
A D | irqdesc.c | 82 const struct cpumask *affinity) in desc_smp_init() argument 84 if (!affinity) in desc_smp_init() 85 affinity = irq_default_affinity; in desc_smp_init() 86 cpumask_copy(desc->irq_common_data.affinity, affinity); in desc_smp_init() 480 if (affinity) { in alloc_descs() 491 if (affinity) { in alloc_descs() 492 if (affinity->is_managed) { in alloc_descs() 496 mask = &affinity->mask; in alloc_descs() 498 affinity++; in alloc_descs() 890 if (affinity) in irq_set_percpu_devid_partition() [all …]
|
A D | irqdomain.c | 30 bool realloc, const struct irq_affinity_desc *affinity); 694 const struct irq_affinity_desc *affinity) in irq_create_mapping_affinity_locked() argument 703 affinity); in irq_create_mapping_affinity_locked() 733 const struct irq_affinity_desc *affinity) in irq_create_mapping_affinity() argument 1085 int node, const struct irq_affinity_desc *affinity) in irq_domain_alloc_descs() argument 1091 affinity); in irq_domain_alloc_descs() 1097 affinity); in irq_domain_alloc_descs() 1100 affinity); in irq_domain_alloc_descs() 1472 bool realloc, const struct irq_affinity_desc *affinity) in irq_domain_alloc_irqs_locked() argument 1480 affinity); in irq_domain_alloc_irqs_locked() [all …]
|
/linux-6.3-rc2/tools/testing/selftests/rseq/ |
A D | basic_test.c | 18 cpu_set_t affinity, test_affinity; in test_cpu_pointer() local 21 sched_getaffinity(0, sizeof(affinity), &affinity); in test_cpu_pointer() 24 if (CPU_ISSET(i, &affinity)) { in test_cpu_pointer() 39 sched_setaffinity(0, sizeof(affinity), &affinity); in test_cpu_pointer()
|
/linux-6.3-rc2/tools/perf/util/ |
A D | affinity.h | 7 struct affinity { struct 13 void affinity__cleanup(struct affinity *a); argument 14 void affinity__set(struct affinity *a, int cpu); 15 int affinity__setup(struct affinity *a);
|
A D | affinity.c | 24 int affinity__setup(struct affinity *a) in affinity__setup() 48 void affinity__set(struct affinity *a, int cpu) in affinity__set() 71 static void __affinity__cleanup(struct affinity *a) in __affinity__cleanup() 81 void affinity__cleanup(struct affinity *a) in affinity__cleanup()
|
A D | evlist.h | 205 int affinity, int flush, int comp_level); 365 struct affinity *affinity; member 378 #define evlist__for_each_cpu(evlist_cpu_itr, evlist, affinity) \ argument 379 for ((evlist_cpu_itr) = evlist__cpu_begin(evlist, affinity); \ 384 struct evlist_cpu_iterator evlist__cpu_begin(struct evlist *evlist, struct affinity *affinity);
|
A D | mmap.c | 97 static int perf_mmap__aio_bind(struct mmap *map, int idx, struct perf_cpu cpu, int affinity) in perf_mmap__aio_bind() argument 105 if (affinity != PERF_AFFINITY_SYS && cpu__max_node() > 1) { in perf_mmap__aio_bind() 141 struct perf_cpu cpu __maybe_unused, int affinity __maybe_unused) in perf_mmap__aio_bind() 175 ret = perf_mmap__aio_bind(map, i, map->core.cpu, mp->affinity); in perf_mmap__aio_mmap() 270 if (mp->affinity == PERF_AFFINITY_NODE && cpu__max_node() > 1) in perf_mmap__setup_affinity_mask() 272 else if (mp->affinity == PERF_AFFINITY_CPU) in perf_mmap__setup_affinity_mask() 286 if (mp->affinity != PERF_AFFINITY_SYS && in mmap__mmap()
|
A D | evlist.c | 404 .affinity = affinity, in evlist__cpu_begin() 412 if (itr.affinity) { in evlist__cpu_begin() 443 if (evlist_cpu_itr->affinity) in evlist_cpu_iterator__next() 489 struct affinity saved_affinity, *affinity = NULL; in __evlist__disable() local 496 affinity = &saved_affinity; in __evlist__disable() 519 affinity__cleanup(affinity); in __evlist__disable() 559 struct affinity saved_affinity, *affinity = NULL; in __evlist__enable() local 565 affinity = &saved_affinity; in __evlist__enable() 578 affinity__cleanup(affinity); in __evlist__enable() 1006 .affinity = affinity, in evlist__mmap_ex() [all …]
|
/linux-6.3-rc2/Documentation/arm64/ |
A D | asymmetric-32bit.rst | 51 CPU affinity. 68 On a homogeneous system, the CPU affinity of a task is preserved across 71 affinity mask contains 64-bit-only CPUs. In this situation, the kernel 72 determines the new affinity mask as follows: 74 1. If the 32-bit-capable subset of the affinity mask is not empty, 75 then the affinity is restricted to that subset and the old affinity 84 affinity of the task is then changed to match the 32-bit-capable 92 affinity of the task using the saved mask if it was previously valid. 95 with the affinity unchanged. 99 affinity for the task is updated and any saved mask from a prior [all …]
|
/linux-6.3-rc2/tools/virtio/ringtest/ |
A D | run-on-all.sh | 20 "$@" --host-affinity $HOST_AFFINITY --guest-affinity $cpu 24 "$@" --host-affinity $HOST_AFFINITY
|
/linux-6.3-rc2/Documentation/devicetree/bindings/interrupt-controller/ |
A D | apple,aic.yaml | 21 - Per-IRQ affinity setting 77 FIQ affinity can be expressed as a single "affinities" node, 79 affinity. 81 "^.+-affinity$": 88 the affinity is not the default.
|
A D | apple,aic2.yaml | 76 FIQ affinity can be expressed as a single "affinities" node, 78 affinity. 80 "^.+-affinity$": 87 the affinity is not the default.
|
A D | arm,gic-v3.yaml | 43 If the system requires describing PPI affinity, then the value must 137 PPI affinity can be expressed as a single "ppi-partitions" node, 143 affinity: 152 - affinity 281 affinity = <&cpu0>, <&cpu2>; 285 affinity = <&cpu1>, <&cpu3>;
|
/linux-6.3-rc2/drivers/infiniband/hw/hfi1/ |
A D | affinity.c | 966 struct hfi1_affinity_node_list *affinity) in find_hw_thread_mask() argument 970 affinity->num_core_siblings / in find_hw_thread_mask() 973 cpumask_copy(hw_thread_mask, &affinity->proc.mask); in find_hw_thread_mask() 974 if (affinity->num_core_siblings > 0) { in find_hw_thread_mask() 1004 struct cpu_mask_set *set = &affinity->proc; in hfi1_get_proc_affinity() 1062 mutex_lock(&affinity->lock); in hfi1_get_proc_affinity() 1092 if (affinity->num_core_siblings > 0) { in hfi1_get_proc_affinity() 1168 mutex_unlock(&affinity->lock); in hfi1_get_proc_affinity() 1185 struct cpu_mask_set *set = &affinity->proc; in hfi1_put_proc_affinity() 1190 mutex_lock(&affinity->lock); in hfi1_put_proc_affinity() [all …]
|
/linux-6.3-rc2/Documentation/core-api/irq/ |
A D | irq-affinity.rst | 2 SMP IRQ affinity 14 IRQ affinity then the value will not change from the default of all cpus. 16 /proc/irq/default_smp_affinity specifies default affinity mask that applies 17 to all non-active IRQs. Once IRQ is allocated/activated its affinity bitmask
|
/linux-6.3-rc2/Documentation/translations/zh_CN/core-api/irq/ |
A D | irq-affinity.rst | 3 :Original: Documentation/core-api/irq/irq-affinity.rst 9 .. _cn_irq-affinity.rst: 23 (IRQ affinity),那么所有cpu的默认值将保持不变(即关联到所有CPU).
|
/linux-6.3-rc2/arch/arm64/kernel/ |
A D | setup.c | 112 u32 i, affinity, fs[4], bits[4], ls; in smp_build_mpidr_hash() local 126 affinity = MPIDR_AFFINITY_LEVEL(mask, i); in smp_build_mpidr_hash() 132 ls = fls(affinity); in smp_build_mpidr_hash() 133 fs[i] = affinity ? ffs(affinity) - 1 : 0; in smp_build_mpidr_hash()
|
/linux-6.3-rc2/arch/alpha/kernel/ |
A D | sys_dp264.c | 136 cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity) in cpu_set_irq_affinity() argument 142 if (cpumask_test_cpu(cpu, &affinity)) in cpu_set_irq_affinity() 151 dp264_set_affinity(struct irq_data *d, const struct cpumask *affinity, in dp264_set_affinity() argument 155 cpu_set_irq_affinity(d->irq, *affinity); in dp264_set_affinity() 163 clipper_set_affinity(struct irq_data *d, const struct cpumask *affinity, in clipper_set_affinity() argument 167 cpu_set_irq_affinity(d->irq - 16, *affinity); in clipper_set_affinity()
|
A D | sys_titan.c | 135 titan_cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity) in titan_cpu_set_irq_affinity() argument 140 if (cpumask_test_cpu(cpu, &affinity)) in titan_cpu_set_irq_affinity() 149 titan_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity, in titan_set_irq_affinity() argument 154 titan_cpu_set_irq_affinity(irq - 16, *affinity); in titan_set_irq_affinity()
|
/linux-6.3-rc2/tools/testing/selftests/rcutorture/bin/ |
A D | kvm-test-1-run-batch.sh | 63 print "echo No CPU-affinity information, so no taskset command."; 69 print "echo " scenario ": Bogus CPU-affinity information, so no taskset command.";
|
/linux-6.3-rc2/drivers/irqchip/ |
A D | irq-bcm7038-l1.c | 48 u8 affinity[MAX_WORDS * IRQS_PER_WORD]; member 180 __bcm7038_l1_unmask(d, intc->affinity[d->hwirq]); in bcm7038_l1_unmask() 190 __bcm7038_l1_mask(d, intc->affinity[d->hwirq]); in bcm7038_l1_mask() 209 was_disabled = !!(intc->cpus[intc->affinity[hw]]->mask_cache[word] & in bcm7038_l1_set_affinity() 211 __bcm7038_l1_mask(d, intc->affinity[hw]); in bcm7038_l1_set_affinity() 212 intc->affinity[hw] = first_cpu; in bcm7038_l1_set_affinity()
|
/linux-6.3-rc2/drivers/net/ethernet/mellanox/mlx5/core/ |
A D | pci_irq.c | 210 const struct cpumask *affinity) in mlx5_irq_alloc() argument 239 if (affinity) { in mlx5_irq_alloc() 240 cpumask_copy(irq->mask, affinity); in mlx5_irq_alloc() 303 struct cpumask *affinity) in irq_pool_request_vector() argument 313 irq = mlx5_irq_alloc(pool, vecidx, affinity); in irq_pool_request_vector() 423 struct cpumask *affinity) in mlx5_irq_request() argument 430 irq = irq_pool_request_vector(pool, vecidx, affinity); in mlx5_irq_request() 434 irq->irqn, cpumask_pr_args(affinity), in mlx5_irq_request()
|
/linux-6.3-rc2/tools/perf/ |
A D | builtin-record.c | 99 struct mmap_cpu_mask affinity; member 1232 opts->nr_cblocks, opts->affinity, in record__mmap_evlist() 1470 thread->mask->affinity.nbits)) { in record__adjust_affinity() 1471 bitmap_zero(thread->mask->affinity.bits, thread->mask->affinity.nbits); in record__adjust_affinity() 1472 bitmap_or(thread->mask->affinity.bits, thread->mask->affinity.bits, in record__adjust_affinity() 2966 opts->affinity = PERF_AFFINITY_NODE; in record__parse_affinity() 2968 opts->affinity = PERF_AFFINITY_CPU; in record__parse_affinity() 2995 mask->affinity.bits = NULL; in record__thread_mask_alloc() 3691 if (!bitmap_and(thread_mask.affinity.bits, thread_mask.affinity.bits, in record__init_thread_masks_spec() 3705 if (bitmap_intersects(thread_mask.affinity.bits, full_mask.affinity.bits, in record__init_thread_masks_spec() [all …]
|
/linux-6.3-rc2/arch/arm64/boot/dts/apple/ |
A D | t6001.dtsi | 52 e-core-pmu-affinity { 57 p-core-pmu-affinity {
|