/linux-6.3-rc2/drivers/thunderbolt/ |
A D | path.c | 163 path->hops = kcalloc(num_hops, sizeof(*path->hops), GFP_KERNEL); in tb_path_discover() 164 if (!path->hops) { in tb_path_discover() 198 path->hops[i].in_port = p; in tb_path_discover() 199 path->hops[i].in_hop_index = h; in tb_path_discover() 201 path->hops[i].out_port = out_port; in tb_path_discover() 204 tb_dump_hop(&path->hops[i], &hop); in tb_path_discover() 270 path->hops = kcalloc(num_hops, sizeof(*path->hops), GFP_KERNEL); in tb_path_alloc() 271 if (!path->hops) { in tb_path_alloc() 333 path->hops[i].in_port = in_port; in tb_path_alloc() 335 path->hops[i].out_port = out_port; in tb_path_alloc() [all …]
|
A D | test.c | 896 in_port = path->hops[i].in_port; in tb_test_path_not_bonded_lane0() 897 out_port = path->hops[i].out_port; in tb_test_path_not_bonded_lane0() 958 in_port = path->hops[i].in_port; in tb_test_path_not_bonded_lane1() 959 out_port = path->hops[i].out_port; in tb_test_path_not_bonded_lane1() 1038 in_port = path->hops[i].in_port; in tb_test_path_not_bonded_lane1_chain() 1039 out_port = path->hops[i].out_port; in tb_test_path_not_bonded_lane1_chain() 1118 in_port = path->hops[i].in_port; in tb_test_path_not_bonded_lane1_chain_reverse() 1119 out_port = path->hops[i].out_port; in tb_test_path_not_bonded_lane1_chain_reverse() 1210 in_port = path->hops[i].in_port; in tb_test_path_mixed_chain() 1211 out_port = path->hops[i].out_port; in tb_test_path_mixed_chain() [all …]
|
A D | tunnel.c | 746 paths[TB_DP_VIDEO_PATH_OUT]->hops[0].in_hop_index, in tb_dp_activate() 747 paths[TB_DP_AUX_PATH_OUT]->hops[0].in_hop_index, in tb_dp_activate() 748 paths[TB_DP_AUX_PATH_IN]->hops[last].next_hop_index); in tb_dp_activate() 751 paths[TB_DP_VIDEO_PATH_OUT]->hops[last].next_hop_index, in tb_dp_activate() 752 paths[TB_DP_AUX_PATH_IN]->hops[0].in_hop_index, in tb_dp_activate() 753 paths[TB_DP_AUX_PATH_OUT]->hops[last].next_hop_index); in tb_dp_activate() 1360 hop = &path->hops[0]; in tb_dma_init_rx_path() 1368 ret = tb_dma_reserve_credits(&path->hops[i], credits); in tb_dma_init_rx_path() 1533 if (tb_port_is_nhi(path->hops[0].in_port)) in tb_tunnel_match_dma() 1543 (tx_path->hops[0].in_hop_index != transmit_ring)) in tb_tunnel_match_dma() [all …]
|
A D | tb.h | 429 struct tb_path_hop *hops; member 1116 for ((hop) = &(path)->hops[0]; \ 1117 (hop) <= &(path)->hops[(path)->path_length - 1]; (hop)++)
|
/linux-6.3-rc2/drivers/accel/habanalabs/common/mmu/ |
A D | mmu.c | 516 struct hl_mmu_hop_info *hops, in hl_mmu_pa_page_with_offset() argument 524 if (hops->unscrambled_paddr) in hl_mmu_pa_page_with_offset() 527 tmp_phys_addr = hops->hop_info[hops->used_hops - 1].hop_pte_val; in hl_mmu_pa_page_with_offset() 573 struct hl_mmu_hop_info hops; in hl_mmu_va_to_pa() local 576 memset(&hops, 0, sizeof(hops)); in hl_mmu_va_to_pa() 588 struct hl_mmu_hop_info *hops) in hl_mmu_get_tlb_info() argument 620 if (hops->unscrambled_paddr) in hl_mmu_get_tlb_info() 621 hl_mmu_pa_page_with_offset(ctx, virt_addr, hops, &hops->unscrambled_paddr); in hl_mmu_get_tlb_info() 1219 hops->scrambled_vaddr); in hl_mmu_hr_get_tlb_info() 1240 hops->unscrambled_paddr = hops->hop_info[i].hop_pte_val; in hl_mmu_hr_get_tlb_info() [all …]
|
A D | mmu_v1.c | 718 struct hl_mmu_hop_info *hops) in hl_mmu_v1_get_tlb_info() argument 756 hops->hop_info[0].hop_pte_addr = in hl_mmu_v1_get_tlb_info() 759 hops->hop_info[0].hop_pte_val = in hl_mmu_v1_get_tlb_info() 761 hops->hop_info[0].hop_pte_addr); in hl_mmu_v1_get_tlb_info() 764 hops->hop_info[i].hop_addr = in hl_mmu_v1_get_tlb_info() 766 hops->hop_info[i - 1].hop_pte_val); in hl_mmu_v1_get_tlb_info() 770 hops->hop_info[i].hop_pte_addr = in hl_mmu_v1_get_tlb_info() 772 hops->hop_info[i].hop_addr, in hl_mmu_v1_get_tlb_info() 774 hops->hop_info[i].hop_pte_val = in hl_mmu_v1_get_tlb_info() 776 hops->hop_info[i].hop_pte_addr); in hl_mmu_v1_get_tlb_info() [all …]
|
A D | mmu_v2_hr.c | 335 struct hl_mmu_hop_info *hops, in hl_mmu_v2_hr_get_tlb_mapping_params() argument 354 hops->range_type = HL_VA_RANGE_TYPE_DRAM; in hl_mmu_v2_hr_get_tlb_mapping_params() 358 hops->range_type = HL_VA_RANGE_TYPE_HOST; in hl_mmu_v2_hr_get_tlb_mapping_params() 362 hops->range_type = HL_VA_RANGE_TYPE_HOST_HUGE; in hl_mmu_v2_hr_get_tlb_mapping_params() 371 struct hl_mmu_hop_info *hops) in hl_mmu_v2_hr_get_tlb_info() argument 373 return hl_mmu_hr_get_tlb_info(ctx, virt_addr, hops, in hl_mmu_v2_hr_get_tlb_info()
|
/linux-6.3-rc2/tools/testing/selftests/bpf/prog_tests/ |
A D | cls_redirect.c | 175 enum hops { enum 200 enum hops hops; member 207 const char *family_str, *type, *conn, *hops, *result, *flags; in test_str() local 221 hops = "no hops"; in test_str() 222 if (test->hops == ONE_HOP) in test_str() 223 hops = "one hop"; in test_str() 236 type, result, conn, hops, flags); in test_str() 292 encap_init(&encap, test->hops == ONE_HOP ? 1 : 0, proto); in build_input() 295 if (test->hops == ONE_HOP) { in build_input()
|
/linux-6.3-rc2/Documentation/networking/ |
A D | nexthop-group-resilient.rst | 9 weights of constituent next hops. 48 constituent next hops: a hash table. The selection algorithm uses SKB hash 55 the individual next hops is arbitrary. Therefore when a next hop is deleted 56 the buckets that held it are simply reassigned to other next hops:: 69 When weights of next hops in a group are altered, it may be possible to 83 cause bucket allocation change, the wants counts for individual next hops 86 Next hops that have fewer buckets than their wants count, are called 88 overweight (and therefore no underweight) next hops in the group, it is 97 After assigning wants counts to next hops, an "upkeep" algorithm runs. For 105 underweight next hops. If, after considering all buckets in this manner, [all …]
|
/linux-6.3-rc2/drivers/firmware/arm_scmi/ |
A D | perf.c | 239 ph->hops->extended_name_get(ph, PERF_DOMAIN_NAME_GET, domain, in scmi_perf_domain_attributes_get() 318 iter = ph->hops->iter_response_init(ph, &ops, MAX_OPPS, in scmi_perf_describe_levels_get() 325 ret = ph->hops->iter_response_run(iter); in scmi_perf_describe_levels_get() 375 ph->hops->fastchannel_db_ring(fci->set_db); in scmi_perf_limits_set() 461 ph->hops->fastchannel_db_ring(fci->set_db); in scmi_perf_level_set() 537 ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL, in scmi_perf_domain_init_fc() 542 ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL, in scmi_perf_domain_init_fc() 546 ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL, in scmi_perf_domain_init_fc() 551 ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL, in scmi_perf_domain_init_fc()
|
A D | powercap.c | 270 ph->hops->extended_name_get(ph, POWERCAP_DOMAIN_NAME_GET, in scmi_powercap_domain_attributes_get() 394 ph->hops->fastchannel_db_ring(fci->set_db); in scmi_powercap_cap_set() 483 ph->hops->fastchannel_db_ring(fci->set_db); in scmi_powercap_pai_set() 588 ph->hops->fastchannel_init(ph, POWERCAP_DESCRIBE_FASTCHANNEL, in scmi_powercap_domain_init_fc() 593 ph->hops->fastchannel_init(ph, POWERCAP_DESCRIBE_FASTCHANNEL, in scmi_powercap_domain_init_fc() 597 ph->hops->fastchannel_init(ph, POWERCAP_DESCRIBE_FASTCHANNEL, in scmi_powercap_domain_init_fc() 602 ph->hops->fastchannel_init(ph, POWERCAP_DESCRIBE_FASTCHANNEL, in scmi_powercap_domain_init_fc()
|
A D | sensors.c | 351 iter = ph->hops->iter_response_init(ph, &ops, s->intervals.count, in scmi_sensor_update_intervals() 358 return ph->hops->iter_response_run(iter); in scmi_sensor_update_intervals() 489 iter = ph->hops->iter_response_init(ph, &ops, s->num_axis, in scmi_sensor_axis_extended_names_get() 500 ret = ph->hops->iter_response_run(iter); in scmi_sensor_axis_extended_names_get() 530 iter = ph->hops->iter_response_init(ph, &ops, s->num_axis, in scmi_sensor_axis_description() 537 ret = ph->hops->iter_response_run(iter); in scmi_sensor_axis_description() 646 ph->hops->extended_name_get(ph, SENSOR_NAME_GET, s->id, in iter_sens_descr_process_response() 685 iter = ph->hops->iter_response_init(ph, &ops, si->num_sensors, in scmi_sensor_description_get() 691 return ph->hops->iter_response_run(iter); in scmi_sensor_description_get()
|
A D | voltage.c | 193 iter = ph->hops->iter_response_init(ph, &ops, v->num_levels, in scmi_voltage_levels_get() 200 ret = ph->hops->iter_response_run(iter); in scmi_voltage_levels_get() 243 ph->hops->extended_name_get(ph, in scmi_voltage_descriptors_get()
|
A D | clock.c | 171 ph->hops->extended_name_get(ph, CLOCK_NAME_GET, clk_id, in scmi_clock_attributes_get() 304 iter = ph->hops->iter_response_init(ph, &ops, SCMI_MAX_NUM_RATES, in scmi_clock_describe_rates_get() 311 ret = ph->hops->iter_response_run(iter); in scmi_clock_describe_rates_get()
|
A D | protocols.h | 176 const struct scmi_proto_helpers_ops *hops; member
|
A D | power.c | 135 ph->hops->extended_name_get(ph, POWER_DOMAIN_NAME_GET, in scmi_power_domain_attributes_get()
|
A D | reset.c | 130 ph->hops->extended_name_get(ph, RESET_DOMAIN_NAME_GET, domain, in scmi_reset_domain_attributes_get()
|
/linux-6.3-rc2/arch/s390/kernel/ |
A D | dis.c | 506 int start, end, opsize, hops, i; in show_code() local 526 for (i = 0, hops = 0; start + i < 32 && hops < 3; hops++) { in show_code() 539 hops = 0; in show_code() 540 while (start < end && hops < 8) { in show_code() 562 hops++; in show_code()
|
/linux-6.3-rc2/net/ipv6/ |
A D | exthdrs.c | 1112 int hops; in ipv6_push_rthdr0() local 1119 hops = ihdr->rt_hdr.hdrlen >> 1; in ipv6_push_rthdr0() 1121 if (hops > 1) in ipv6_push_rthdr0() 1123 (hops - 1) * sizeof(struct in6_addr)); in ipv6_push_rthdr0() 1125 phdr->addr[hops - 1] = **addr_p; in ipv6_push_rthdr0() 1137 int plen, hops; in ipv6_push_rthdr4() local 1145 hops = sr_ihdr->first_segment + 1; in ipv6_push_rthdr4() 1147 (hops - 1) * sizeof(struct in6_addr)); in ipv6_push_rthdr4() 1152 if (sr_ihdr->hdrlen > hops * 2) { in ipv6_push_rthdr4() 1155 tlvs_offset = (1 + hops * 2) << 3; in ipv6_push_rthdr4() [all …]
|
/linux-6.3-rc2/include/linux/ |
A D | topology.h | 250 extern const struct cpumask *sched_numa_hop_mask(unsigned int node, unsigned int hops); 258 sched_numa_hop_mask(unsigned int node, unsigned int hops) in sched_numa_hop_mask() argument
|
/linux-6.3-rc2/include/dt-bindings/usb/ |
A D | pd.h | 404 #define VDO_ACABLE2(mtemp, stemp, u3p, trans, phy, ele, u4, hops, u2, u32, lane, iso, gen) \ argument 407 | ((hops) & 0x3) << 6 | (u2) << 5 | (u32) << 4 | (lane) << 3 \
|
/linux-6.3-rc2/include/linux/usb/ |
A D | pd_vdo.h | 429 #define VDO_ACABLE2(mtemp, stemp, u3p, trans, phy, ele, u4, hops, u2, u32, lane, iso, gen) \ argument 432 | ((hops) & 0x3) << 6 | (u2) << 5 | (u32) << 4 | (lane) << 3 \
|
/linux-6.3-rc2/Documentation/ABI/testing/ |
A D | sysfs-bus-rapidio | 156 (RO) number of hops on the path to the switch
|
/linux-6.3-rc2/net/batman-adv/ |
A D | distributed-arp-table.c | 78 __u8 hops; member 1485 __u8 hops; in batadv_dat_check_dhcp() member
|
/linux-6.3-rc2/kernel/sched/ |
A D | topology.c | 2150 const struct cpumask *sched_numa_hop_mask(unsigned int node, unsigned int hops) in sched_numa_hop_mask() argument 2154 if (node >= nr_node_ids || hops >= sched_domains_numa_levels) in sched_numa_hop_mask() 2161 return masks[hops][node]; in sched_numa_hop_mask()
|