/linux-6.3-rc2/tools/testing/selftests/cgroup/ |
A D | memcg_protection.m | 6 % hierarchy to illustrate how overcommitted protection spreads among siblings 9 % Simulation assumes siblings consumed the initial amount of memory (w/out 16 % n vector nominal protection of siblings set at the given level (memory.low) 48 siblings = sum(u); variable 52 e = protected * min(1, E / siblings); % normalize overcommit 55 unclaimed = max(0, E - siblings); 56 parent_overuse = sum(c) - siblings; 79 % XXX here I do parallel reclaim of all siblings
|
/linux-6.3-rc2/drivers/infiniband/hw/irdma/ |
A D | ws.c | 133 list_for_each_entry(node, &parent->child_list_head, siblings) { in ws_find_node() 139 list_for_each_entry(node, &parent->child_list_head, siblings) { in ws_find_node() 214 list_del(&tc_node->siblings); in irdma_remove_leaf() 219 list_del(&vsi_node->siblings); in irdma_remove_leaf() 295 list_add(&vsi_node->siblings, &ws_tree_root->child_list_head); in irdma_ws_add() 322 list_add(&tc_node->siblings, &vsi_node->child_list_head); in irdma_ws_add() 356 list_del(&tc_node->siblings); in irdma_ws_add() 362 list_del(&vsi_node->siblings); in irdma_ws_add()
|
A D | ws.h | 19 struct list_head siblings; member
|
/linux-6.3-rc2/drivers/gpu/drm/i915/gt/uc/ |
A D | selftest_guc_multi_lrc.c | 33 struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1]; in multi_lrc_create_parent() local 42 siblings[i++] = engine; in multi_lrc_create_parent() 48 logical_sort(siblings, i); in multi_lrc_create_parent() 50 return intel_engine_create_parallel(siblings, 1, i); in multi_lrc_create_parent()
|
/linux-6.3-rc2/drivers/gpu/drm/i915/gem/ |
A D | i915_gem_context.c | 442 siblings = kmalloc_array(num_siblings, sizeof(*siblings), GFP_KERNEL); in set_proto_ctx_engines_balance() 443 if (!siblings) in set_proto_ctx_engines_balance() 457 if (!siblings[n]) { in set_proto_ctx_engines_balance() 469 kfree(siblings); in set_proto_ctx_engines_balance() 473 set->engines[idx].siblings = siblings; in set_proto_ctx_engines_balance() 479 kfree(siblings); in set_proto_ctx_engines_balance() 647 if (!siblings) in set_proto_ctx_engines_parallel_submit() 663 siblings[n] = in set_proto_ctx_engines_parallel_submit() 666 if (!siblings[n]) { in set_proto_ctx_engines_parallel_submit() 715 set->engines[slot].siblings = siblings; in set_proto_ctx_engines_parallel_submit() [all …]
|
A D | i915_gem_context_types.h | 122 struct intel_engine_cs **siblings; member
|
/linux-6.3-rc2/drivers/gpu/drm/i915/gt/ |
A D | selftest_execlists.c | 3708 struct intel_engine_cs **siblings, in nop_virtual_engine() argument 3836 struct intel_engine_cs **siblings, in __select_siblings() argument 3858 struct intel_engine_cs **siblings) in select_siblings() argument 4018 struct intel_engine_cs **siblings, in slicein_virtual_engine() argument 4036 ce = intel_context_create(siblings[n]); in slicein_virtual_engine() 4085 struct intel_engine_cs **siblings, in sliceout_virtual_engine() argument 4121 ce = intel_context_create(siblings[n]); in sliceout_virtual_engine() 4138 __func__, siblings[n]->name); in sliceout_virtual_engine() 4185 struct intel_engine_cs **siblings, in preserved_virtual_engine() argument 4327 struct intel_engine_cs **siblings, in reset_virtual_engine() argument [all …]
|
A D | intel_execlists_submission.c | 198 struct intel_engine_cs *siblings[]; member 1054 if (likely(engine == ve->siblings[0])) in virtual_xfer_context() 1068 if (ve->siblings[n] == engine) { in virtual_xfer_context() 1069 swap(ve->siblings[n], ve->siblings[0]); in virtual_xfer_context() 3697 swap(ve->siblings[swp], ve->siblings[0]); in virtual_engine_initial_hint() 3704 return lrc_alloc(ce, ve->siblings[0]); in virtual_context_alloc() 3730 intel_engine_pm_get(ve->siblings[n]); in virtual_context_enter() 3743 intel_engine_pm_put(ve->siblings[n]); in virtual_context_exit() 3754 return ve->siblings[sibling]; in virtual_get_sibling() 3791 mask = ve->siblings[0]->mask; in virtual_submission_mask() [all …]
|
/linux-6.3-rc2/drivers/gpio/ |
A D | gpio-sim.c | 560 struct list_head siblings; member 594 struct list_head siblings; member 724 list_for_each_entry(line, &bank->line_list, siblings) in gpio_sim_make_line_names() 755 list_for_each_entry(bank, &dev->bank_list, siblings) { in gpio_sim_add_hogs() 770 list_for_each_entry(bank, &dev->bank_list, siblings) { in gpio_sim_add_hogs() 865 list_for_each_entry(this, &dev->bank_list, siblings) { in gpio_sim_bank_labels_non_unique() 866 list_for_each_entry(pos, &dev->bank_list, siblings) { in gpio_sim_bank_labels_non_unique() 1369 list_del(&line->siblings); in gpio_sim_line_config_group_release() 1423 list_add_tail(&line->siblings, &bank->line_list); in gpio_sim_bank_config_make_line_group() 1436 list_del(&bank->siblings); in gpio_sim_bank_config_group_release() [all …]
|
/linux-6.3-rc2/Documentation/admin-guide/hw-vuln/ |
A D | core-scheduling.rst | 100 siblings of a core such that all the selected tasks running on a core are 107 the sibling has the task enqueued. For rest of the siblings in the core, 112 Once a task has been selected for all the siblings in the core, an IPI is sent to 113 siblings for whom a new task was selected. Siblings on receiving the IPI will 125 siblings could be forced to select a lower priority task if the highest 157 and are considered system-wide trusted. The forced-idling of siblings running 174 the siblings to switch to the new task. But there could be hardware delays in 176 cause an attacker task to start running on a CPU before its siblings receive the 177 IPI. Even though cache is flushed on entry to user mode, victim tasks on siblings 185 Core scheduling cannot protect against MDS attacks between the siblings [all …]
|
/linux-6.3-rc2/drivers/nvme/host/ |
A D | multipath.c | 205 list_for_each_entry_rcu(ns, &head->list, siblings) { in nvme_mpath_revalidate_paths() 237 list_for_each_entry_rcu(ns, &head->list, siblings) { in __nvme_find_path() 274 ns = list_next_or_null_rcu(&head->list, &ns->siblings, struct nvme_ns, in nvme_next_ns() 275 siblings); in nvme_next_ns() 278 return list_first_or_null_rcu(&head->list, struct nvme_ns, siblings); in nvme_next_ns() 350 list_for_each_entry_rcu(ns, &head->list, siblings) { in nvme_available_path()
|
/linux-6.3-rc2/include/scsi/ |
A D | scsi_device.h | 112 struct list_head siblings; /* list of all devices on this host */ member 309 struct list_head siblings; member 415 list_for_each_entry((sdev), &((shost)->__devices), siblings)
|
/linux-6.3-rc2/drivers/scsi/ |
A D | scsi.c | 602 struct list_head *list = (prev ? &prev->siblings : &shost->__devices); in __scsi_iterate_devices() 608 next = list_entry(list->next, struct scsi_device, siblings); in __scsi_iterate_devices() 753 list_for_each_entry(sdev, &shost->__devices, siblings) { in __scsi_device_lookup()
|
A D | scsi_scan.c | 302 INIT_LIST_HEAD(&sdev->siblings); in scsi_alloc_sdev() 401 list_del_init(&starget->siblings); in scsi_target_destroy() 434 list_for_each_entry(starget, &shost->__targets, siblings) { in __scsi_find_target() 519 INIT_LIST_HEAD(&starget->siblings); in scsi_alloc_target() 531 list_add_tail(&starget->siblings, &shost->__targets); in scsi_alloc_target() 1977 list_for_each_entry(sdev, &shost->__devices, siblings) { in scsi_forget_host()
|
/linux-6.3-rc2/Documentation/devicetree/bindings/display/mediatek/ |
A D | mediatek,od.yaml | 16 OD device node must be siblings to the central MMSYS_CONFIG node.
|
A D | mediatek,split.yaml | 16 SPLIT device node must be siblings to the central MMSYS_CONFIG node.
|
A D | mediatek,ufoe.yaml | 17 UFOe device node must be siblings to the central MMSYS_CONFIG node.
|
A D | mediatek,postmask.yaml | 16 POSTMASK device node must be siblings to the central MMSYS_CONFIG node.
|
A D | mediatek,wdma.yaml | 16 WDMA device node must be siblings to the central MMSYS_CONFIG node.
|
A D | mediatek,ccorr.yaml | 16 CCORR device node must be siblings to the central MMSYS_CONFIG node.
|
A D | mediatek,dither.yaml | 17 DITHER device node must be siblings to the central MMSYS_CONFIG node.
|
A D | mediatek,gamma.yaml | 16 GAMMA device node must be siblings to the central MMSYS_CONFIG node.
|
/linux-6.3-rc2/kernel/ |
A D | Kconfig.preempt | 123 selection across SMT siblings. When enabled -- see 124 prctl(PR_SCHED_CORE) -- task selection ensures that all SMT siblings
|
/linux-6.3-rc2/drivers/scsi/libsas/ |
A D | sas_expander.c | 835 list_add_tail(&child->siblings, &parent_ex->children); in sas_ex_discover_end_dev() 945 list_add_tail(&child->siblings, &parent->ex_dev.children); in sas_ex_discover_expander() 1073 list_for_each_entry(child, &ex->children, siblings) { in sas_check_level_subtractive_boundary() 1778 list_for_each_entry(ch, &ex->children, siblings) { in sas_find_bcast_dev() 1794 list_for_each_entry_safe(child, n, &ex->children, siblings) { in sas_unregister_ex_tree() 1812 &ex_dev->children, siblings) { in sas_unregister_devs_sas_addr() 1843 list_for_each_entry(child, &ex_root->children, siblings) { in sas_discover_bfs_by_root_level() 1894 list_for_each_entry(child, &dev->ex_dev.children, siblings) { in sas_discover_new()
|
/linux-6.3-rc2/Documentation/admin-guide/pm/ |
A D | intel_epb.rst | 40 example, SMT siblings or cores in one package). For this reason, updating the
|