| /drivers/net/ethernet/mellanox/mlx5/core/ |
| A D | eswitch.c | 111 if (!esw) in mlx5_eswitch_get_vport() 1487 mlx5_eq_notifier_register(esw->dev, &esw->nb); in mlx5_eswitch_enable_locked() 1508 esw->esw_funcs.num_vfs, esw->esw_funcs.num_ec_vfs, esw->enabled_vports); in mlx5_eswitch_enable_locked() 1510 mlx5_esw_mode_change_notify(esw, esw->mode); in mlx5_eswitch_enable_locked() 1515 mlx5_eq_notifier_unregister(esw->dev, &esw->nb); in mlx5_eswitch_enable_locked() 1586 esw->esw_funcs.num_vfs, esw->esw_funcs.num_ec_vfs, esw->enabled_vports); in mlx5_eswitch_disable_sriov() 1625 mlx5_eq_notifier_unregister(esw->dev, &esw->nb); in mlx5_eswitch_disable_locked() 1630 esw->esw_funcs.num_vfs, esw->esw_funcs.num_ec_vfs, esw->enabled_vports); in mlx5_eswitch_disable_locked() 1857 esw = kzalloc(sizeof(*esw), GFP_KERNEL); in mlx5_eswitch_init() 1858 if (!esw) in mlx5_eswitch_init() [all …]
|
| A D | eswitch_offloads.c | 1941 esw_chains_destroy(esw, esw_chains(esw)); in esw_create_offloads_fdb_tables() 1969 esw_chains_destroy(esw, esw_chains(esw)); in esw_destroy_offloads_fdb_tables() 2828 vport = mlx5_eswitch_get_vport(esw, esw->manager_vport); in esw_set_master_egress_rule() 3107 esw); in mlx5_esw_offloads_devcom_init() 3118 esw); in mlx5_esw_offloads_devcom_cleanup() 3288 manager = mlx5_eswitch_get_vport(esw, esw->manager_vport); in esw_create_offloads_acl_tables() 3309 vport = mlx5_eswitch_get_vport(esw, esw->manager_vport); in esw_destroy_offloads_acl_tables() 3324 if (!esw || esw->mode != MLX5_ESWITCH_OFFLOADS) in mlx5_eswitch_reload_ib_reps() 3463 esw = host_work->esw; in esw_functions_changed_event_handler() 3488 host_work->esw = esw; in mlx5_esw_funcs_changed_handler() [all …]
|
| A D | eswitch.h | 86 #define esw_chains(esw) \ argument 87 ((esw)->fdb_table.offloads.esw_chains_priv) 321 struct mlx5_eswitch *esw; member 636 return esw && MLX5_ESWITCH_MANAGER(esw->dev); in mlx5_esw_allowed() 642 return esw->manager_vport == vport_num; in mlx5_esw_is_manager_vport() 720 xa_for_each_range(&((esw)->vports), \ 867 if (mlx5_esw_allowed(esw)) in mlx5_eswitch_num_vfs() 868 return esw->esw_funcs.num_vfs; in mlx5_eswitch_num_vfs() 875 if (mlx5_esw_allowed(esw)) in mlx5_eswitch_get_npeers() 876 return esw->num_peers; in mlx5_eswitch_get_npeers() [all …]
|
| A D | eswitch_offloads_termtbl.c | 122 mutex_lock(&esw->offloads.termtbl_mutex); in mlx5_eswitch_termtbl_get_create() 124 hash_for_each_possible(esw->offloads.termtbl_tbl, tt, in mlx5_eswitch_termtbl_get_create() 154 mutex_unlock(&esw->offloads.termtbl_mutex); in mlx5_eswitch_termtbl_get_create() 158 mutex_unlock(&esw->offloads.termtbl_mutex); in mlx5_eswitch_termtbl_get_create() 163 mlx5_eswitch_termtbl_put(struct mlx5_eswitch *esw, in mlx5_eswitch_termtbl_put() argument 166 mutex_lock(&esw->offloads.termtbl_mutex); in mlx5_eswitch_termtbl_put() 169 mutex_unlock(&esw->offloads.termtbl_mutex); in mlx5_eswitch_termtbl_put() 202 if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source)) in mlx5_eswitch_offload_is_uplink_port() 214 mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw, in mlx5_eswitch_termtbl_required() argument 244 mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw, in mlx5_eswitch_add_termtbl_rule() argument [all …]
|
| /drivers/net/ethernet/mellanox/mlx5/core/esw/ |
| A D | legacy.c | 54 if (!esw->fdb_table.legacy.fdb) in esw_destroy_legacy_fdb_table() 199 esw_cleanup_vepa_rules(esw); in esw_destroy_legacy_table() 218 mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) in esw_legacy_enable() 314 if (!esw) in mlx5_eswitch_set_vepa() 317 if (!mlx5_esw_allowed(esw)) in mlx5_eswitch_set_vepa() 321 if (esw->mode != MLX5_ESWITCH_LEGACY || !mlx5_esw_is_fdb_created(esw)) { in mlx5_eswitch_set_vepa() 335 if (!esw) in mlx5_eswitch_get_vepa() 338 if (!mlx5_esw_allowed(esw)) in mlx5_eswitch_get_vepa() 341 if (esw->mode != MLX5_ESWITCH_LEGACY || !mlx5_esw_is_fdb_created(esw)) in mlx5_eswitch_get_vepa() 431 if (!mlx5_esw_allowed(esw)) in mlx5_eswitch_set_vport_vlan() [all …]
|
| A D | qos.c | 132 node->esw = parent->esw; in esw_qos_node_set_parent() 334 if (node->esw == esw && node->ix != esw->qos.root_tsar_ix && in esw_qos_calculate_min_rate_divider() 385 if (node->esw != esw || node->ix == esw->qos.root_tsar_ix) in esw_qos_normalize_min_rate() 420 struct mlx5_eswitch *esw = node->esw; in esw_qos_set_node_min_rate() local 529 node->esw = esw; in __esw_qos_alloc_node() 764 struct mlx5_eswitch *esw = node->esw; in __esw_qos_destroy_node() local 1793 struct mlx5_eswitch *esw = node->esw; in mlx5_esw_devlink_rate_node_tc_bw_set() local 1822 struct mlx5_eswitch *esw = node->esw; in mlx5_esw_devlink_rate_node_tx_share_set() local 1839 struct mlx5_eswitch *esw = node->esw; in mlx5_esw_devlink_rate_node_tx_max_set() local 1887 struct mlx5_eswitch *esw = node->esw; in mlx5_esw_devlink_rate_node_del() local [all …]
|
| A D | vporttbl.c | 62 key->vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id); in flow_attr_to_vport_key() 83 struct mlx5_core_dev *dev = esw->dev; in mlx5_esw_vporttbl_get() 90 mutex_lock(&esw->fdb_table.offloads.vports.lock); in mlx5_esw_vporttbl_get() 91 esw_vport_tbl_init(esw, attr->vport_ns); in mlx5_esw_vporttbl_get() 92 hkey = flow_attr_to_vport_key(esw, attr, &skey); in mlx5_esw_vporttbl_get() 93 e = esw_vport_tbl_lookup(esw, &skey, hkey); in mlx5_esw_vporttbl_get() 121 mutex_unlock(&esw->fdb_table.offloads.vports.lock); in mlx5_esw_vporttbl_get() 138 mutex_lock(&esw->fdb_table.offloads.vports.lock); in mlx5_esw_vporttbl_put() 139 esw_vport_tbl_init(esw, attr->vport_ns); in mlx5_esw_vporttbl_put() 140 hkey = flow_attr_to_vport_key(esw, attr, &key); in mlx5_esw_vporttbl_put() [all …]
|
| A D | indir_table.c | 67 mlx5_esw_indir_table_needed(struct mlx5_eswitch *esw, in mlx5_esw_indir_table_needed() argument 76 mlx5_esw_is_sf_vport(esw, vport_num); in mlx5_esw_indir_table_needed() 83 esw->dev == dest_mdev && in mlx5_esw_indir_table_needed() 100 struct mlx5_fs_chains *chains = esw_chains(esw); in mlx5_esw_indir_table_rule_get() 175 struct mlx5_fs_chains *chains = esw_chains(esw); in mlx5_esw_indir_table_rule_put() 185 mlx5_modify_header_dealloc(esw->dev, rule->mh); in mlx5_esw_indir_table_rule_put() 289 err = mlx5_create_indir_fwd_group(esw, e); in mlx5_esw_indir_table_entry_create() 300 mlx5_esw_indir_table_rule_put(esw, e); in mlx5_esw_indir_table_entry_create() 330 mutex_lock(&esw->fdb_table.offloads.indir->lock); in mlx5_esw_indir_table_get() 361 mutex_lock(&esw->fdb_table.offloads.indir->lock); in mlx5_esw_indir_table_put() [all …]
|
| A D | bridge.c | 368 struct mlx5_eswitch *esw = br_offloads->esw; in mlx5_esw_bridge_ingress_table_init() local 376 esw); in mlx5_esw_bridge_ingress_table_init() 382 esw); in mlx5_esw_bridge_ingress_table_init() 473 struct mlx5_eswitch *esw = br_offloads->esw; in mlx5_esw_bridge_egress_table_init() local 479 esw); in mlx5_esw_bridge_egress_table_init() 1224 struct mlx5_eswitch *esw = bridge->br_offloads->esw; in mlx5_esw_bridge_vlan_flush() local 1358 esw); in mlx5_esw_bridge_fdb_entry_init() 1517 struct mlx5_eswitch *esw = br_offloads->esw; in mlx5_esw_bridge_mcast_set() local 1548 struct mlx5_eswitch *esw = br_offloads->esw; in mlx5_esw_bridge_vport_init() local 1808 struct mlx5_eswitch *esw = br_offloads->esw; in mlx5_esw_bridge_fdb_remove() local [all …]
|
| A D | devlink_port.c | 20 mlx5_eswitch_is_vf_vport(esw, vport_num) || in mlx5_esw_devlink_port_supported() 21 mlx5_core_is_ec_vf_vport(esw->dev, vport_num); in mlx5_esw_devlink_port_supported() 28 struct mlx5_core_dev *dev = esw->dev; in mlx5_esw_offloads_pf_vf_devlink_port_attrs_set() 44 } else if (mlx5_eswitch_is_vf_vport(esw, vport_num)) { in mlx5_esw_offloads_pf_vf_devlink_port_attrs_set() 49 } else if (mlx5_core_is_ec_vf_vport(esw->dev, vport_num)) { in mlx5_esw_offloads_pf_vf_devlink_port_attrs_set() 63 if (!mlx5_esw_devlink_port_supported(esw, vport_num)) in mlx5_esw_offloads_pf_vf_devlink_port_init() 109 struct mlx5_core_dev *dev = esw->dev; in mlx5_esw_offloads_sf_devlink_port_attrs_set() 154 struct mlx5_core_dev *dev = esw->dev; in mlx5_esw_offloads_devlink_port_register() 166 if (mlx5_esw_is_sf_vport(esw, vport_num)) in mlx5_esw_offloads_devlink_port_register() 168 else if (mlx5_eswitch_is_pf_vf_vport(esw, vport_num)) in mlx5_esw_offloads_devlink_port_register() [all …]
|
| A D | bridge_mcast.c | 313 struct mlx5_eswitch *esw = bridge->br_offloads->esw; in mlx5_esw_bridge_port_mcast_fts_init() local 318 esw); in mlx5_esw_bridge_port_mcast_fts_init() 359 esw_warn(esw->dev, in mlx5_esw_bridge_mcast_filter_fg_create() 368 struct mlx5_eswitch *esw, in mlx5_esw_bridge_mcast_vlan_proto_fg_create() argument 394 esw_warn(esw->dev, in mlx5_esw_bridge_mcast_vlan_proto_fg_create() 440 esw_warn(esw->dev, in mlx5_esw_bridge_mcast_fwd_fg_create() 450 struct mlx5_eswitch *esw = port->bridge->br_offloads->esw; in mlx5_esw_bridge_port_mcast_fgs_init() local 798 esw_warn(esw->dev, in mlx5_esw_bridge_ingress_igmp_fg_create() 814 esw_warn(esw->dev, in mlx5_esw_bridge_ingress_mld_fg_create() 838 esw_warn(esw->dev, in mlx5_esw_bridge_ingress_mld_fg_create() [all …]
|
| A D | indir_table.h | 14 struct mlx5_flow_table *mlx5_esw_indir_table_get(struct mlx5_eswitch *esw, 17 void mlx5_esw_indir_table_put(struct mlx5_eswitch *esw, 21 mlx5_esw_indir_table_needed(struct mlx5_eswitch *esw, 43 mlx5_esw_indir_table_get(struct mlx5_eswitch *esw, in mlx5_esw_indir_table_get() argument 51 mlx5_esw_indir_table_put(struct mlx5_eswitch *esw, in mlx5_esw_indir_table_put() argument 57 mlx5_esw_indir_table_needed(struct mlx5_eswitch *esw, in mlx5_esw_indir_table_needed() argument
|
| /drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ |
| A D | ingress_ofld.c | 16 return (MLX5_CAP_GEN(esw->dev, prio_tag_required) && in esw_acl_ingress_prio_tag_enabled() 17 mlx5_eswitch_is_vf_vport(esw, vport->vport)); in esw_acl_ingress_prio_tag_enabled() 55 esw_warn(esw->dev, in esw_acl_ingress_prio_tag_create() 90 esw_warn(esw->dev, in esw_acl_ingress_mod_metadata_create() 104 esw_warn(esw->dev, in esw_acl_ingress_mod_metadata_create() 133 err = acl_ingress_ofld_setup(esw, vport); in esw_acl_ingress_src_port_drop_create() 153 esw_acl_ingress_ofld_cleanup(esw, vport); in esw_acl_ingress_src_port_drop_create() 175 esw_warn(esw->dev, in esw_acl_ingress_ofld_rules_create() 185 esw_warn(esw->dev, in esw_acl_ingress_ofld_rules_create() 346 esw_debug(esw->dev, in acl_ingress_ofld_setup() [all …]
|
| A D | egress_ofld.c | 61 esw_warn(esw->dev, in esw_acl_egress_ofld_fwd2vport_create() 77 if (MLX5_CAP_GEN(esw->dev, prio_tag_required)) { in esw_acl_egress_ofld_rules_create() 82 esw_debug(esw->dev, in esw_acl_egress_ofld_rules_create() 125 if (MLX5_CAP_GEN(esw->dev, prio_tag_required)) { in esw_acl_egress_ofld_groups_create() 126 ret = esw_acl_egress_vlan_grp_create(esw, vport); in esw_acl_egress_ofld_groups_create() 150 esw_warn(esw->dev, in esw_acl_egress_ofld_groups_create() 183 return mlx5_eswitch_is_vf_vport(esw, vport_num) || mlx5_esw_is_sf_vport(esw, vport_num); in esw_acl_egress_needed() 192 !MLX5_CAP_GEN(esw->dev, prio_tag_required)) in esw_acl_egress_ofld_setup() 195 if (!esw_acl_egress_needed(esw, vport->vport)) in esw_acl_egress_ofld_setup() 200 if (mlx5_esw_acl_egress_fwd2vport_supported(esw)) in esw_acl_egress_ofld_setup() [all …]
|
| A D | ofld.h | 11 int esw_acl_egress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport); 14 int mlx5_esw_acl_egress_vport_bond(struct mlx5_eswitch *esw, u16 active_vport_num, 16 int mlx5_esw_acl_egress_vport_unbond(struct mlx5_eswitch *esw, u16 vport_num); 18 static inline bool mlx5_esw_acl_egress_fwd2vport_supported(struct mlx5_eswitch *esw) in mlx5_esw_acl_egress_fwd2vport_supported() argument 20 return esw && esw->mode == MLX5_ESWITCH_OFFLOADS && in mlx5_esw_acl_egress_fwd2vport_supported() 21 mlx5_eswitch_vport_match_metadata_enabled(esw) && in mlx5_esw_acl_egress_fwd2vport_supported() 22 MLX5_CAP_ESW_FLOWTABLE(esw->dev, egress_acl_forward_to_vport); in mlx5_esw_acl_egress_fwd2vport_supported() 26 int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport); 27 void esw_acl_ingress_ofld_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport); 35 mlx5_esw_acl_ingress_vport_drop_rule_destroy(struct mlx5_eswitch *esw, in mlx5_esw_acl_ingress_vport_drop_rule_destroy() argument [all …]
|
| A D | egress_lgcy.c | 22 struct mlx5_core_dev *dev = esw->dev; in esw_acl_egress_lgcy_groups_create() 27 err = esw_acl_egress_vlan_grp_create(esw, vport); in esw_acl_egress_lgcy_groups_create() 67 int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw, in esw_acl_egress_lgcy_setup() argument 87 drop_counter = mlx5_fc_create(esw->dev, false); in esw_acl_egress_lgcy_setup() 89 esw_warn(esw->dev, in esw_acl_egress_lgcy_setup() 100 esw_acl_egress_lgcy_cleanup(esw, vport); in esw_acl_egress_lgcy_setup() 105 vport->egress.acl = esw_acl_table_create(esw, vport, in esw_acl_egress_lgcy_setup() 114 err = esw_acl_egress_lgcy_groups_create(esw, vport); in esw_acl_egress_lgcy_setup() 119 esw_debug(esw->dev, in esw_acl_egress_lgcy_setup() 147 esw_warn(esw->dev, in esw_acl_egress_lgcy_setup() [all …]
|
| A D | ingress_lgcy.c | 22 struct mlx5_core_dev *dev = esw->dev; in esw_acl_ingress_lgcy_groups_create() 139 int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw, in esw_acl_ingress_lgcy_setup() argument 169 counter = mlx5_fc_create(esw->dev, false); in esw_acl_ingress_lgcy_setup() 171 esw_warn(esw->dev, in esw_acl_ingress_lgcy_setup() 180 esw_acl_ingress_lgcy_cleanup(esw, vport); in esw_acl_ingress_lgcy_setup() 185 vport->ingress.acl = esw_acl_table_create(esw, vport, in esw_acl_ingress_lgcy_setup() 194 err = esw_acl_ingress_lgcy_groups_create(esw, vport); in esw_acl_ingress_lgcy_setup() 199 esw_debug(esw->dev, in esw_acl_ingress_lgcy_setup() 247 esw_warn(esw->dev, in esw_acl_ingress_lgcy_setup() 272 esw_warn(esw->dev, in esw_acl_ingress_lgcy_setup() [all …]
|
| /drivers/s390/cio/ |
| A D | device_status.c | 125 cdev_irb->esw.esw1.lpum = irb->esw.esw1.lpum; in ccw_device_accumulate_esw() 159 cdev_irb->esw.esw0.erw.cpt = irb->esw.esw0.erw.cpt; in ccw_device_accumulate_esw() 161 cdev_irb->esw.esw0.erw.fsavf = irb->esw.esw0.erw.fsavf; in ccw_device_accumulate_esw() 164 memcpy(cdev_irb->esw.esw0.faddr, irb->esw.esw0.faddr, in ccw_device_accumulate_esw() 167 cdev_irb->esw.esw0.erw.fsaf = irb->esw.esw0.erw.fsaf; in ccw_device_accumulate_esw() 170 cdev_irb->esw.esw0.erw.scavf = irb->esw.esw0.erw.scavf; in ccw_device_accumulate_esw() 173 cdev_irb->esw.esw0.saddr = irb->esw.esw0.saddr; in ccw_device_accumulate_esw() 179 cdev_irb->esw.esw0.erw.auth = irb->esw.esw0.erw.auth; in ccw_device_accumulate_esw() 181 cdev_irb->esw.esw0.erw.pvrf = irb->esw.esw0.erw.pvrf; in ccw_device_accumulate_esw() 185 cdev_irb->esw.esw0.erw.cons = irb->esw.esw0.erw.cons; in ccw_device_accumulate_esw() [all …]
|
| /drivers/net/ethernet/mellanox/mlx5/core/en/tc/ |
| A D | sample.c | 24 struct mlx5_eswitch *esw; member 63 struct mlx5_eswitch *esw = tc_psample->esw; in sampler_termtbl_create() local 66 struct mlx5_core_dev *dev = esw->dev; in sampler_termtbl_create() 95 dest.vport.num = esw->manager_vport; in sampler_termtbl_create() 265 struct mlx5_eswitch *esw = tc_psample->esw; in sample_restore_get() local 266 struct mlx5_core_dev *mdev = esw->dev; in sample_restore_get() 480 struct mlx5_eswitch *esw; in mlx5e_tc_sample_offload() local 499 esw = tc_psample->esw; in mlx5e_tc_sample_offload() 594 struct mlx5_eswitch *esw; in mlx5e_tc_sample_unoffload() local 602 esw = tc_psample->esw; in mlx5e_tc_sample_unoffload() [all …]
|
| A D | int_port.c | 33 return mlx5_eswitch_vport_match_metadata_enabled(esw) && in mlx5e_tc_int_port_supported() 34 MLX5_CAP_GEN(esw->dev, reg_c_preserve); in mlx5e_tc_int_port_supported() 61 mlx5e_int_port_create_rx_rule(struct mlx5_eswitch *esw, in mlx5e_int_port_create_rx_rule() argument 154 struct mlx5_eswitch *esw = priv->dev->priv.eswitch; in mlx5e_int_port_add() local 182 ctx = esw->offloads.reg_c0_obj_pool; in mlx5e_int_port_add() 230 struct mlx5_eswitch *esw = priv->dev->priv.eswitch; in mlx5e_int_port_remove() local 233 ctx = esw->offloads.reg_c0_obj_pool; in mlx5e_int_port_remove() 308 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; in mlx5e_tc_int_port_init() local 312 if (!mlx5e_tc_int_port_supported(esw)) in mlx5e_tc_int_port_init() 360 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; in mlx5e_tc_int_port_init_rep_rx() local [all …]
|
| /drivers/net/ethernet/mellanox/mlx5/core/en/rep/ |
| A D | bridge.c | 26 return esw == priv->mdev->priv.eswitch; in mlx5_esw_bridge_dev_same_esw() 36 esw_mdev = esw->dev; in mlx5_esw_bridge_dev_same_hw() 112 struct mlx5_eswitch *esw) in mlx5_esw_bridge_is_local() argument 135 struct mlx5_eswitch *esw = br_offloads->esw; in mlx5_esw_bridge_port_changeupper() local 432 struct mlx5_eswitch *esw = br_offloads->esw; in mlx5_esw_bridge_switchdev_event() local 522 struct mlx5_eswitch *esw = in mlx5e_rep_bridge_init() local 527 br_offloads = mlx5_esw_bridge_init(esw); in mlx5e_rep_bridge_init() 574 mlx5_esw_bridge_cleanup(esw); in mlx5e_rep_bridge_init() 582 struct mlx5_eswitch *esw = in mlx5e_rep_bridge_cleanup() local 585 br_offloads = esw->br_offloads; in mlx5e_rep_bridge_cleanup() [all …]
|
| A D | tc.c | 101 mutex_lock(&esw->offloads.encap_tbl_lock); in mlx5e_rep_update_flows() 205 struct mlx5_eswitch *esw; in mlx5e_rep_setup_ft_cb() local 212 esw = priv->mdev->priv.eswitch; in mlx5e_rep_setup_ft_cb() 387 struct mlx5_eswitch *esw; in mlx5e_rep_indr_setup_ft_cb() local 392 esw = mpriv->mdev->priv.eswitch; in mlx5e_rep_indr_setup_ft_cb() 477 mlx5e_tc_int_port_supported(esw)) in mlx5e_rep_check_indr_block_supported() 566 if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS) in mlx5e_rep_indr_replace_act() 595 if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS) in mlx5e_rep_indr_destroy_act() 617 if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS) in mlx5e_rep_indr_stats_act() 709 struct mlx5_eswitch *esw; in mlx5e_rep_tc_receive() local [all …]
|
| A D | bond.c | 26 struct mlx5_eswitch *esw; member 74 mlx5_esw_match_metadata_free(mdata->esw, mdata->metadata_reg_c_0); in mlx5e_rep_bond_metadata_release() 80 int mlx5e_rep_bond_enslave(struct mlx5_eswitch *esw, struct net_device *netdev, in mlx5e_rep_bond_enslave() argument 91 rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); in mlx5e_rep_bond_enslave() 100 mdata->esw = esw; in mlx5e_rep_bond_enslave() 102 mdata->metadata_reg_c_0 = mlx5_esw_match_metadata_alloc(esw); in mlx5e_rep_bond_enslave() 123 err = mlx5_esw_acl_ingress_vport_metadata_update(esw, rpriv->rep->vport, in mlx5e_rep_bond_enslave() 144 void mlx5e_rep_bond_unslave(struct mlx5_eswitch *esw, in mlx5e_rep_bond_unslave() argument 155 rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); in mlx5e_rep_bond_unslave() 170 mlx5_esw_acl_ingress_vport_metadata_update(esw, rpriv->rep->vport, 0); in mlx5e_rep_bond_unslave() [all …]
|
| /drivers/infiniband/hw/mlx5/ |
| A D | ib_rep.c | 24 ndev = mlx5_ib_get_rep_netdev(rep->esw, rep->vport); in mlx5_ib_set_vport_rep() 179 struct mlx5_eswitch *esw; in mlx5_ib_vport_rep_unload() local 182 esw = peer_mdev->priv.eswitch; in mlx5_ib_vport_rep_unload() 199 struct mlx5_eswitch *esw; in mlx5_ib_register_peer_vport_reps() local 203 esw = peer_mdev->priv.eswitch; in mlx5_ib_register_peer_vport_reps() 229 return mlx5_eswitch_add_send_to_vport_rule(esw, esw, rep, sq->base.mqp.qpn); in create_flow_rule_vport_sq() 237 struct mlx5_eswitch *esw; in mlx5r_rep_probe() local 239 esw = mdev->priv.eswitch; in mlx5r_rep_probe() 248 struct mlx5_eswitch *esw; in mlx5r_rep_remove() local 250 esw = mdev->priv.eswitch; in mlx5r_rep_remove() [all …]
|
| /drivers/net/ethernet/mellanox/mlx5/core/en/ |
| A D | tc_tun_encap.c | 416 struct mlx5_eswitch *esw; in mlx5e_tc_update_neigh_used_value() local 419 esw = priv->mdev->priv.eswitch; in mlx5e_tc_update_neigh_used_value() 420 mutex_lock(&esw->offloads.encap_tbl_lock); in mlx5e_tc_update_neigh_used_value() 552 mutex_lock(&esw->offloads.encap_tbl_lock); in mlx5e_detach_encap() 575 mutex_lock(&esw->offloads.decap_tbl_lock); in mlx5e_detach_decap() 966 mutex_lock(&esw->offloads.decap_tbl_lock); in mlx5e_attach_decap() 1003 mutex_lock(&esw->offloads.decap_tbl_lock); in mlx5e_attach_decap() 1036 struct mlx5_eswitch *esw; in mlx5e_tc_tun_encap_dests_set() local 1044 esw = priv->mdev->priv.eswitch; in mlx5e_tc_tun_encap_dests_set() 1696 struct mlx5_eswitch *esw; in mlx5e_tc_fib_event_work() local [all …]
|