/linux-6.3-rc2/drivers/infiniband/hw/mlx5/ |
A D | umr.h | 30 if (MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled)) in mlx5r_umr_can_load_pas() 37 if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset) && in mlx5r_umr_can_load_pas() 55 MLX5_CAP_GEN(dev->mdev, atomic) && in mlx5r_umr_can_reconfig() 56 MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled)) in mlx5r_umr_can_reconfig() 60 MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write) && in mlx5r_umr_can_reconfig() 61 !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr)) in mlx5r_umr_can_reconfig() 65 MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) && in mlx5r_umr_can_reconfig() 66 !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr)) in mlx5r_umr_can_reconfig()
|
A D | main.c | 844 if (MLX5_CAP_GEN(mdev, pkv)) in mlx5_ib_query_device() 846 if (MLX5_CAP_GEN(mdev, qkv)) in mlx5_ib_query_device() 848 if (MLX5_CAP_GEN(mdev, apm)) in mlx5_ib_query_device() 850 if (MLX5_CAP_GEN(mdev, xrc)) in mlx5_ib_query_device() 862 if (MLX5_CAP_GEN(mdev, sho)) { in mlx5_ib_query_device() 947 if (MLX5_CAP_GEN(mdev, end_pad)) in mlx5_ib_query_device() 1056 MLX5_CAP_GEN(dev->mdev, in mlx5_ib_query_device() 1072 MLX5_CAP_GEN(mdev, qos)) { in mlx5_ib_query_device() 1780 MLX5_CAP_GEN(dev->mdev, in set_ucontext_resp() 3855 if (MLX5_CAP_GEN(mdev, imaicl)) in mlx5_ib_stage_caps_init() [all …]
|
A D | counters.c | 300 if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) { in do_get_hw_stats() 492 if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt)) { in mlx5_ib_fill_counters() 513 if (MLX5_CAP_GEN(dev->mdev, roce_accl)) { in mlx5_ib_fill_counters() 520 if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) { in mlx5_ib_fill_counters() 567 if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt)) in __mlx5_ib_alloc_counters() 570 if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters)) in __mlx5_ib_alloc_counters() 573 if (MLX5_CAP_GEN(dev->mdev, enhanced_error_q_counters)) in __mlx5_ib_alloc_counters() 576 if (MLX5_CAP_GEN(dev->mdev, roce_accl)) in __mlx5_ib_alloc_counters() 581 if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) { in __mlx5_ib_alloc_counters() 914 if (!MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) in mlx5_ib_counters_init() [all …]
|
A D | qp.c | 474 MLX5_CAP_GEN(dev->mdev, in set_rq_size() 2050 if (MLX5_CAP_GEN(mdev, ece_support)) in create_xrc_tgt_qp() 2120 if (MLX5_CAP_GEN(mdev, ece_support)) in create_dci() 2195 if (MLX5_CAP_GEN(mdev, ece_support)) in create_dci() 2291 if (MLX5_CAP_GEN(mdev, ece_support)) in create_user_qp() 2764 if (!MLX5_CAP_GEN(dev->mdev, xrc)) in check_qp_type() 2982 MLX5_CAP_GEN(mdev, sho), qp); in process_create_flags() 2987 MLX5_CAP_GEN(mdev, cd), qp); in process_create_flags() 2989 MLX5_CAP_GEN(mdev, cd), qp); in process_create_flags() 2991 MLX5_CAP_GEN(mdev, cd), qp); in process_create_flags() [all …]
|
A D | umr.c | 55 if (MLX5_CAP_GEN(dev->mdev, atomic)) in get_umr_update_access_mask() 58 if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr)) in get_umr_update_access_mask() 61 if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr)) in get_umr_update_access_mask() 79 MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled)) in umr_check_mkey_mask() 83 MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled)) in umr_check_mkey_mask() 87 !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr)) in umr_check_mkey_mask() 91 !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr)) in umr_check_mkey_mask() 682 return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled); in umr_can_use_indirect_mkey()
|
/linux-6.3-rc2/drivers/net/ethernet/mellanox/mlx5/core/lib/ |
A D | sf.h | 11 return MLX5_CAP_GEN(dev, sf_base_id); in mlx5_sf_start_function_id() 18 return MLX5_CAP_GEN(dev, sf); in mlx5_sf_supported() 25 if (MLX5_CAP_GEN(dev, max_num_sf)) in mlx5_sf_max_functions() 26 return MLX5_CAP_GEN(dev, max_num_sf); in mlx5_sf_max_functions() 28 return 1 << MLX5_CAP_GEN(dev, log_max_sf); in mlx5_sf_max_functions()
|
A D | clock.h | 38 u8 rq_ts_format_cap = MLX5_CAP_GEN(mdev, rq_ts_format); in mlx5_is_real_time_rq() 47 u8 sq_ts_format_cap = MLX5_CAP_GEN(mdev, sq_ts_format); in mlx5_is_real_time_sq()
|
/linux-6.3-rc2/drivers/net/ethernet/mellanox/mlx5/core/ |
A D | fw.c | 174 if (MLX5_CAP_GEN(dev, pg)) { in mlx5_query_hca_caps() 180 if (MLX5_CAP_GEN(dev, atomic)) { in mlx5_query_hca_caps() 186 if (MLX5_CAP_GEN(dev, roce)) { in mlx5_query_hca_caps() 218 if (MLX5_CAP_GEN(dev, qos)) { in mlx5_query_hca_caps() 224 if (MLX5_CAP_GEN(dev, debug)) in mlx5_query_hca_caps() 227 if (MLX5_CAP_GEN(dev, pcam_reg)) in mlx5_query_hca_caps() 230 if (MLX5_CAP_GEN(dev, mcam_reg)) { in mlx5_query_hca_caps() 236 if (MLX5_CAP_GEN(dev, qcam_reg)) in mlx5_query_hca_caps() 251 if (MLX5_CAP_GEN(dev, tls_tx) || MLX5_CAP_GEN(dev, tls_rx)) { in mlx5_query_hca_caps() 270 if (MLX5_CAP_GEN(dev, crypto)) { in mlx5_query_hca_caps() [all …]
|
A D | vport.c | 470 if (!MLX5_CAP_GEN(mdev, vport_group_manager)) in mlx5_modify_nic_vport_node_guid() 564 if (MLX5_CAP_GEN(dev, num_ports) == 2) in mlx5_query_hca_vport_gid() 629 if (MLX5_CAP_GEN(dev, num_ports) == 2) in mlx5_query_hca_vport_pkey() 677 if (MLX5_CAP_GEN(dev, num_ports) == 2) in mlx5_query_hca_vport_context() 836 if (!MLX5_CAP_GEN(mdev, disable_local_lb_mc) && in mlx5_nic_vport_update_local_lb() 837 !MLX5_CAP_GEN(mdev, disable_local_lb_uc)) in mlx5_nic_vport_update_local_lb() 849 if (MLX5_CAP_GEN(mdev, disable_local_lb_mc)) in mlx5_nic_vport_update_local_lb() 853 if (MLX5_CAP_GEN(mdev, disable_local_lb_uc)) in mlx5_nic_vport_update_local_lb() 988 if (MLX5_CAP_GEN(dev, num_ports) == 2) in mlx5_core_query_vport_counter() 1051 if (MLX5_CAP_GEN(dev, num_ports) > 1) in mlx5_core_modify_hca_vport_context() [all …]
|
A D | dev.c | 65 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) in mlx5_eth_supported() 68 if (!MLX5_CAP_GEN(dev, eth_net_offloads)) { in mlx5_eth_supported() 73 if (!MLX5_CAP_GEN(dev, nic_flow_table)) { in mlx5_eth_supported() 106 if (!MLX5_CAP_GEN(dev, cq_moderation)) in mlx5_eth_supported() 190 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) in is_mp_supported() 596 if (!MLX5_CAP_GEN(mdev, vport_group_manager) || in next_phys_dev_lag() 597 !MLX5_CAP_GEN(mdev, lag_master) || in next_phys_dev_lag() 598 (MLX5_CAP_GEN(mdev, num_lag_ports) > MLX5_MAX_PORTS || in next_phys_dev_lag() 599 MLX5_CAP_GEN(mdev, num_lag_ports) <= 1)) in next_phys_dev_lag()
|
A D | en_dcbnl.c | 92 if (!MLX5_CAP_GEN(priv->mdev, dcbx)) in mlx5e_dcbnl_switch_to_host_mode() 117 if (!MLX5_CAP_GEN(priv->mdev, ets)) in mlx5e_dcbnl_ieee_getets() 326 if (!MLX5_CAP_GEN(priv->mdev, ets)) in mlx5e_dcbnl_ieee_setets() 422 if ((!mode) && MLX5_CAP_GEN(priv->mdev, dcbx)) { in mlx5e_dcbnl_setdcbx() 629 if (!MLX5_CAP_GEN(mdev, ets)) in mlx5e_dcbnl_setall() 738 if (!MLX5_CAP_GEN(priv->mdev, ets)) { in mlx5e_dcbnl_getpgtccfgtx() 1025 if (MLX5_CAP_GEN(mdev, vport_group_manager) && MLX5_CAP_GEN(mdev, qos)) in mlx5e_dcbnl_build_netdev() 1052 if (!MLX5_CAP_GEN(priv->mdev, ets)) in mlx5e_ets_init() 1222 if (!MLX5_CAP_GEN(mdev, sbcam_reg)) in mlx5e_query_port_buffers_cell_size() 1238 if (!MLX5_CAP_GEN(priv->mdev, qos)) in mlx5e_dcbnl_initialize() [all …]
|
A D | pci_irq.c | 56 min_msix = MLX5_CAP_GEN(dev, min_dynamic_vf_msix_table_size); in mlx5_get_default_msix_vec_count() 57 max_msix = MLX5_CAP_GEN(dev, max_dynamic_vf_msix_table_size); in mlx5_get_default_msix_vec_count() 86 if (!MLX5_CAP_GEN(dev, vport_group_manager) || !mlx5_core_is_pf(dev)) in mlx5_set_msix_vec_count() 89 min_msix = MLX5_CAP_GEN(dev, min_dynamic_vf_msix_table_size); in mlx5_set_msix_vec_count() 90 max_msix = MLX5_CAP_GEN(dev, max_dynamic_vf_msix_table_size); in mlx5_set_msix_vec_count() 630 int num_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ? in mlx5_irq_table_create() 631 MLX5_CAP_GEN(dev, max_num_eqs) : in mlx5_irq_table_create() 632 1 << MLX5_CAP_GEN(dev, log_max_eq); in mlx5_irq_table_create() 640 pf_vec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() + 1; in mlx5_irq_table_create()
|
A D | uar.c | 63 if (MLX5_CAP_GEN(mdev, uar_4k)) in uars_per_sys_page() 64 return MLX5_CAP_GEN(mdev, num_of_uars_per_page); in uars_per_sys_page() 73 if (MLX5_CAP_GEN(mdev, uar_4k)) in uar2pfn() 201 (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) + MLX5_BF_OFFSET; in map_offset() 281 bf_reg_size = 1 << MLX5_CAP_GEN(dev, log_bf_reg_size); in addr_to_dbi_in_syspage()
|
A D | mlx5_core.h | 255 #define MLX5_PPS_CAP(mdev) (MLX5_CAP_GEN((mdev), pps) && \ 256 MLX5_CAP_GEN((mdev), pps_modify) && \ 285 return MLX5_CAP_GEN(dev, vport_group_manager) && in mlx5_lag_is_lacp_owner() 286 (MLX5_CAP_GEN(dev, num_lag_ports) > 1) && in mlx5_lag_is_lacp_owner() 287 MLX5_CAP_GEN(dev, lag_master); in mlx5_lag_is_lacp_owner()
|
A D | eq.c | 310 if (!param->mask[0] && MLX5_CAP_GEN(dev, log_max_uctx)) in create_map_eq() 546 if (MLX5_CAP_GEN(dev, general_notification_event)) in gather_async_events_mask() 549 if (MLX5_CAP_GEN(dev, port_module_event)) in gather_async_events_mask() 557 if (MLX5_CAP_GEN(dev, fpga)) in gather_async_events_mask() 563 if (MLX5_CAP_GEN(dev, temp_warn_event)) in gather_async_events_mask() 569 if (MLX5_CAP_GEN(dev, max_num_of_monitor_counters)) in gather_async_events_mask() 588 if (MLX5_CAP_GEN(dev, event_cap)) in gather_async_events_mask() 1096 int num_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ? in mlx5_eq_table_create() 1097 MLX5_CAP_GEN(dev, max_num_eqs) : in mlx5_eq_table_create() 1098 1 << MLX5_CAP_GEN(dev, log_max_eq); in mlx5_eq_table_create()
|
A D | main.c | 217 if (!MLX5_CAP_GEN(dev, driver_version)) in mlx5_set_driver_version() 428 if (!MLX5_CAP_GEN(dev, atomic)) in handle_hca_cap_atomic() 458 !MLX5_CAP_GEN(dev, pg)) in handle_hca_cap_odp() 528 return MLX5_CAP_GEN(dev, roce); in mlx5_is_roce_on() 574 mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size)), in handle_hca_cap() 663 (!MLX5_CAP_GEN(dev, roce_rw_supported) && !MLX5_CAP_GEN(dev, roce)); in is_roce_fw_disabled() 697 if (!MLX5_CAP_GEN(dev, port_selection_cap)) in handle_hca_cap_port_selection() 796 if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) in mlx5_core_set_hca_defaults() 1615 seq_printf(file, "0x%x\n", MLX5_CAP_GEN(dev, vhca_id)); in vhca_id_show() 1935 fast_teardown = MLX5_CAP_GEN(dev, fast_teardown); in mlx5_try_fast_unload() [all …]
|
/linux-6.3-rc2/drivers/net/ethernet/mellanox/mlx5/core/en/ |
A D | monitor_stats.c | 27 if (!MLX5_CAP_GEN(mdev, max_num_of_monitor_counters)) in mlx5e_monitor_counter_supported() 30 MLX5_CAP_GEN(mdev, num_ppcnt_monitor_counters) < in mlx5e_monitor_counter_supported() 33 if (MLX5_CAP_GEN(mdev, num_q_monitor_counters) < in mlx5e_monitor_counter_supported() 103 int max_num_of_counters = MLX5_CAP_GEN(mdev, max_num_of_monitor_counters); in mlx5e_set_monitor_counter() 104 int num_q_counters = MLX5_CAP_GEN(mdev, num_q_monitor_counters); in mlx5e_set_monitor_counter() 106 MLX5_CAP_GEN(mdev, num_ppcnt_monitor_counters); in mlx5e_set_monitor_counter()
|
/linux-6.3-rc2/drivers/net/ethernet/mellanox/mlx5/core/esw/ |
A D | debugfs.c | 152 if (!MLX5_CAP_GEN(esw->dev, vport_group_manager)) in mlx5_esw_vport_debugfs_create() 169 if (MLX5_CAP_GEN(esw->dev, vnic_env_queue_counters)) { in mlx5_esw_vport_debugfs_create() 176 if (MLX5_CAP_GEN(esw->dev, eq_overrun_count)) { in mlx5_esw_vport_debugfs_create() 183 if (MLX5_CAP_GEN(esw->dev, vnic_env_cq_overrun)) in mlx5_esw_vport_debugfs_create() 186 if (MLX5_CAP_GEN(esw->dev, invalid_command_count)) in mlx5_esw_vport_debugfs_create() 190 if (MLX5_CAP_GEN(esw->dev, quota_exceeded_count)) in mlx5_esw_vport_debugfs_create() 194 if (MLX5_CAP_GEN(esw->dev, nic_receive_steering_discard)) in mlx5_esw_vport_debugfs_create()
|
/linux-6.3-rc2/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/ |
A D | dev.c | 30 return MLX5_CAP_GEN(dev, sf) && mlx5_vhca_event_supported(dev); in mlx5_sf_dev_supported() 170 base_id = MLX5_CAP_GEN(table->dev, sf_base_id); in mlx5_sf_dev_state_change_handler() 212 function_id = MLX5_CAP_GEN(dev, sf_base_id); in mlx5_sf_dev_vhca_arm_all() 237 function_id = MLX5_CAP_GEN(dev, sf_base_id); in mlx5_sf_dev_add_active_work() 274 if (MLX5_CAP_GEN(table->dev, eswitch_manager)) in mlx5_sf_dev_queue_active_work() 313 if (MLX5_CAP_GEN(dev, max_num_sf)) in mlx5_sf_dev_table_create() 314 max_sfs = MLX5_CAP_GEN(dev, max_num_sf); in mlx5_sf_dev_table_create() 316 max_sfs = 1 << MLX5_CAP_GEN(dev, log_max_sf); in mlx5_sf_dev_table_create() 317 table->sf_bar_length = 1 << (MLX5_CAP_GEN(dev, log_min_sf_size) + 12); in mlx5_sf_dev_table_create()
|
/linux-6.3-rc2/drivers/net/ethernet/mellanox/mlx5/core/lag/ |
A D | lag.h | 79 if (!MLX5_CAP_GEN(dev, vport_group_manager) || in mlx5_is_lag_supported() 80 !MLX5_CAP_GEN(dev, lag_master) || in mlx5_is_lag_supported() 81 MLX5_CAP_GEN(dev, num_lag_ports) < 2 || in mlx5_is_lag_supported() 82 MLX5_CAP_GEN(dev, num_lag_ports) > MLX5_MAX_PORTS) in mlx5_is_lag_supported()
|
/linux-6.3-rc2/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ |
A D | ktls.h | 25 if (!MLX5_CAP_GEN(mdev, tls_tx) && !MLX5_CAP_GEN(mdev, tls_rx)) in mlx5e_is_ktls_device() 28 if (!MLX5_CAP_GEN(mdev, log_max_dek)) in mlx5e_is_ktls_device() 64 return !is_kdump_kernel() && MLX5_CAP_GEN(mdev, tls_tx); in mlx5e_is_ktls_tx()
|
/linux-6.3-rc2/drivers/net/ethernet/mellanox/mlx5/core/steering/ |
A D | dr_cmd.c | 116 caps->eswitch_manager = MLX5_CAP_GEN(mdev, eswitch_manager); in mlx5dr_cmd_query_device() 117 caps->gvmi = MLX5_CAP_GEN(mdev, vhca_id); in mlx5dr_cmd_query_device() 121 if (MLX5_CAP_GEN(mdev, roce)) { in mlx5dr_cmd_query_device() 133 caps->isolate_vl_tc = MLX5_CAP_GEN(mdev, isolate_vl_tc_new); in mlx5dr_cmd_query_device() 149 MLX5_CAP_GEN(mdev, flex_parser_id_icmpv6_dw0); in mlx5dr_cmd_query_device() 151 MLX5_CAP_GEN(mdev, flex_parser_id_icmpv6_dw1); in mlx5dr_cmd_query_device() 156 MLX5_CAP_GEN(mdev, flex_parser_id_geneve_tlv_option_0); in mlx5dr_cmd_query_device() 168 MLX5_CAP_GEN(mdev, flex_parser_id_gtpu_dw_0); in mlx5dr_cmd_query_device() 172 MLX5_CAP_GEN(mdev, flex_parser_id_gtpu_teid); in mlx5dr_cmd_query_device() 176 MLX5_CAP_GEN(mdev, flex_parser_id_gtpu_dw_2); in mlx5dr_cmd_query_device() [all …]
|
/linux-6.3-rc2/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ |
A D | egress_ofld.c | 63 if (MLX5_CAP_GEN(esw->dev, prio_tag_required)) { in esw_acl_egress_ofld_rules_create() 111 if (MLX5_CAP_GEN(esw->dev, prio_tag_required)) { in esw_acl_egress_ofld_groups_create() 178 !MLX5_CAP_GEN(esw->dev, prio_tag_required)) in esw_acl_egress_ofld_setup() 188 if (MLX5_CAP_GEN(esw->dev, prio_tag_required)) in esw_acl_egress_ofld_setup() 244 fwd_dest.vport.vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id); in mlx5_esw_acl_egress_vport_bond()
|
/linux-6.3-rc2/include/linux/mlx5/ |
A D | driver.h | 1216 return MLX5_CAP_GEN(dev, num_ports) == 1 && !MLX5_CAP_GEN(dev, native_port_num); in mlx5_core_is_management_pf() 1227 return dev->caps.embedded_cpu && MLX5_CAP_GEN(dev, eswitch_manager); in mlx5_core_is_ecpf_esw_manager() 1257 return MLX5_CAP_GEN(dev, affiliate_nic_vport_criteria) && in mlx5_core_is_mp_slave() 1258 MLX5_CAP_GEN(dev, num_vhca_ports) <= 1; in mlx5_core_is_mp_slave() 1263 return MLX5_CAP_GEN(dev, num_vhca_ports) > 1; in mlx5_core_is_mp_master() 1277 return MLX5_CAP_GEN(dev, native_port_num); in mlx5_core_native_port_num() 1282 int idx = MLX5_CAP_GEN(dev, native_port_num); in mlx5_get_dev_index() 1298 if (MLX5_CAP_GEN(dev, roce_rw_supported)) in mlx5_get_roce_state() 1299 return MLX5_CAP_GEN(dev, roce); in mlx5_get_roce_state()
|
A D | vport.h | 40 (MLX5_CAP_GEN(mdev, vport_group_manager) && \ 41 (MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && \
|