esw              2889 arch/mips/include/asm/octeon/cvmx-npei-defs.h 		uint64_t esw:2;
esw              2901 arch/mips/include/asm/octeon/cvmx-npei-defs.h 		uint64_t esw:2;
esw              1562 arch/mips/include/asm/octeon/cvmx-npi-defs.h 		uint64_t esw:2;
esw              1574 arch/mips/include/asm/octeon/cvmx-npi-defs.h 		uint64_t esw:2;
esw              1585 arch/mips/include/asm/octeon/cvmx-npi-defs.h 		uint64_t esw:2;
esw              1597 arch/mips/include/asm/octeon/cvmx-npi-defs.h 		uint64_t esw:2;
esw               108 arch/mips/include/asm/octeon/cvmx-sli-defs.h 		__BITFIELD_FIELD(uint64_t esw:2,
esw               120 arch/mips/include/asm/octeon/cvmx-sli-defs.h 		__BITFIELD_FIELD(uint64_t esw:2,
esw               600 arch/mips/pci/pci-octeon.c 	mem_access.s.esw = 1;	/* Endian-Swap on write. */
esw               893 arch/mips/pci/pcie-octeon.c 	mem_access_subid.s.esw = 1;	/* Endian-swap for Writes. */
esw              1345 arch/mips/pci/pcie-octeon.c 	mem_access_subid.s.esw = 1;	/* Endian-swap for Writes. */
esw               243 arch/s390/include/asm/cio.h 	} esw;
esw                99 drivers/infiniband/hw/mlx5/ib_rep.c 	struct mlx5_eswitch *esw = mdev->priv.eswitch;
esw               101 drivers/infiniband/hw/mlx5/ib_rep.c 	mlx5_eswitch_register_vport_reps(esw, &rep_ops, REP_IB);
esw               106 drivers/infiniband/hw/mlx5/ib_rep.c 	struct mlx5_eswitch *esw = mdev->priv.eswitch;
esw               108 drivers/infiniband/hw/mlx5/ib_rep.c 	mlx5_eswitch_unregister_vport_reps(esw, REP_IB);
esw               111 drivers/infiniband/hw/mlx5/ib_rep.c u8 mlx5_ib_eswitch_mode(struct mlx5_eswitch *esw)
esw               113 drivers/infiniband/hw/mlx5/ib_rep.c 	return mlx5_eswitch_mode(esw);
esw               116 drivers/infiniband/hw/mlx5/ib_rep.c struct mlx5_ib_dev *mlx5_ib_get_rep_ibdev(struct mlx5_eswitch *esw,
esw               119 drivers/infiniband/hw/mlx5/ib_rep.c 	return mlx5_eswitch_get_proto_dev(esw, vport_num, REP_IB);
esw               122 drivers/infiniband/hw/mlx5/ib_rep.c struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw,
esw               125 drivers/infiniband/hw/mlx5/ib_rep.c 	return mlx5_eswitch_get_proto_dev(esw, vport_num, REP_ETH);
esw               128 drivers/infiniband/hw/mlx5/ib_rep.c struct mlx5_ib_dev *mlx5_ib_get_uplink_ibdev(struct mlx5_eswitch *esw)
esw               130 drivers/infiniband/hw/mlx5/ib_rep.c 	return mlx5_eswitch_uplink_get_proto_dev(esw, REP_IB);
esw               133 drivers/infiniband/hw/mlx5/ib_rep.c struct mlx5_eswitch_rep *mlx5_ib_vport_rep(struct mlx5_eswitch *esw,
esw               136 drivers/infiniband/hw/mlx5/ib_rep.c 	return mlx5_eswitch_vport_rep(esw, vport_num);
esw               143 drivers/infiniband/hw/mlx5/ib_rep.c 	struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
esw               154 drivers/infiniband/hw/mlx5/ib_rep.c 	return mlx5_eswitch_add_send_to_vport_rule(esw, rep->vport,
esw                15 drivers/infiniband/hw/mlx5/ib_rep.h u8 mlx5_ib_eswitch_mode(struct mlx5_eswitch *esw);
esw                16 drivers/infiniband/hw/mlx5/ib_rep.h struct mlx5_ib_dev *mlx5_ib_get_rep_ibdev(struct mlx5_eswitch *esw,
esw                18 drivers/infiniband/hw/mlx5/ib_rep.h struct mlx5_ib_dev *mlx5_ib_get_uplink_ibdev(struct mlx5_eswitch *esw);
esw                19 drivers/infiniband/hw/mlx5/ib_rep.h struct mlx5_eswitch_rep *mlx5_ib_vport_rep(struct mlx5_eswitch *esw,
esw                26 drivers/infiniband/hw/mlx5/ib_rep.h struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw,
esw                29 drivers/infiniband/hw/mlx5/ib_rep.h static inline u8 mlx5_ib_eswitch_mode(struct mlx5_eswitch *esw)
esw                35 drivers/infiniband/hw/mlx5/ib_rep.h struct mlx5_ib_dev *mlx5_ib_get_rep_ibdev(struct mlx5_eswitch *esw,
esw                42 drivers/infiniband/hw/mlx5/ib_rep.h struct mlx5_ib_dev *mlx5_ib_get_uplink_ibdev(struct mlx5_eswitch *esw)
esw                48 drivers/infiniband/hw/mlx5/ib_rep.h struct mlx5_eswitch_rep *mlx5_ib_vport_rep(struct mlx5_eswitch *esw,
esw                65 drivers/infiniband/hw/mlx5/ib_rep.h struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw,
esw               164 drivers/infiniband/hw/mlx5/main.c 	struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
esw               175 drivers/infiniband/hw/mlx5/main.c 		rep_ndev = mlx5_ib_get_rep_netdev(esw,
esw              3487 drivers/infiniband/hw/mlx5/main.c 	struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
esw              3490 drivers/infiniband/hw/mlx5/main.c 	if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
esw              3495 drivers/infiniband/hw/mlx5/main.c 			 mlx5_eswitch_get_vport_metadata_for_match(esw,
esw                29 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
esw                33 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 	uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
esw                89 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 		struct mlx5_eswitch *esw = mdev->priv.eswitch;
esw                91 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 		uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
esw               137 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
esw               144 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	err = mlx5_eswitch_get_vport_stats(esw, rep->vport, &vf_stats);
esw               240 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
esw               245 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	flow_rule = mlx5_eswitch_create_vport_rx_rule(esw,
esw               408 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw,
esw               414 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	if (esw->mode != MLX5_ESWITCH_OFFLOADS)
esw               425 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
esw               435 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	if (esw->mode != MLX5_ESWITCH_OFFLOADS)
esw               447 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 		flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
esw               461 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	mlx5e_sqs2vport_stop(esw, rep);
esw               467 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
esw               485 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	err = mlx5e_sqs2vport_start(esw, rep, sqs, num_sqs);
esw               496 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
esw               500 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	mlx5e_sqs2vport_stop(esw, rep);
esw               603 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
esw               612 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	mutex_lock(&esw->offloads.encap_tbl_lock);
esw               635 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	mutex_unlock(&esw->offloads.encap_tbl_lock);
esw              1539 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
esw              1547 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	flow_rule = mlx5_eswitch_create_vport_rx_rule(esw,
esw              1980 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	struct mlx5_eswitch *esw = mdev->priv.eswitch;
esw              1982 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	mlx5_eswitch_register_vport_reps(esw, &rep_ops, REP_ETH);
esw              1987 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	struct mlx5_eswitch *esw = mdev->priv.eswitch;
esw              1989 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	mlx5_eswitch_unregister_vport_reps(esw, REP_ETH);
esw               302 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
esw               304 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	return namespace == MLX5_FLOW_NAMESPACE_FDB ? &esw->offloads.mod_hdr :
esw              1031 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
esw              1038 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
esw              1043 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 		flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, spec, attr);
esw              1045 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 			mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
esw              1054 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
esw              1061 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 		mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
esw              1063 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
esw              1067 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
esw              1079 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
esw              1087 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
esw              1095 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
esw              1122 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	struct mlx5_eswitch *esw;
esw              1124 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	esw = flow->priv->mdev->priv.eswitch;
esw              1125 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
esw              1137 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	struct mlx5_eswitch *esw;
esw              1139 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	esw = flow->priv->mdev->priv.eswitch;
esw              1140 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
esw              1153 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
esw              1154 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	u32 max_chain = mlx5_eswitch_get_chain_range(esw);
esw              1157 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	u16 max_prio = mlx5_eswitch_get_prio_range(esw);
esw              1166 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	if (!mlx5_eswitch_prios_supported(esw) && attr->prio != 1) {
esw              1201 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	err = mlx5_eswitch_add_vlan_action(esw, attr);
esw              1228 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 		flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec, &slow_attr);
esw              1230 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 		flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr);
esw              1257 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
esw              1270 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 			mlx5e_tc_unoffload_from_slow_path(esw, flow, &slow_attr);
esw              1272 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 			mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
esw              1278 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	mlx5_eswitch_del_vlan_action(esw, attr);
esw              1298 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
esw              1344 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 		rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, esw_attr);
esw              1352 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 		mlx5e_tc_unoffload_from_slow_path(esw, flow, &slow_attr);
esw              1363 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
esw              1376 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 		rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec, &slow_attr);
esw              1387 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 		mlx5e_tc_unoffload_fdb_rules(esw, flow, flow->esw_attr);
esw              1505 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 		struct mlx5_eswitch *esw;
esw              1508 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 		esw = priv->mdev->priv.eswitch;
esw              1509 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 		mutex_lock(&esw->offloads.encap_tbl_lock);
esw              1526 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 		mutex_unlock(&esw->offloads.encap_tbl_lock);
esw              1571 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
esw              1573 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	if (!refcount_dec_and_mutex_lock(&e->refcnt, &esw->offloads.encap_tbl_lock))
esw              1576 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	mutex_unlock(&esw->offloads.encap_tbl_lock);
esw              1585 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
esw              1591 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	mutex_lock(&esw->offloads.encap_tbl_lock);
esw              1595 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 		mutex_unlock(&esw->offloads.encap_tbl_lock);
esw              1599 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	mutex_unlock(&esw->offloads.encap_tbl_lock);
esw              1606 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch;
esw              1612 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	mutex_lock(&esw->offloads.peer_mutex);
esw              1614 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	mutex_unlock(&esw->offloads.peer_mutex);
esw              2161 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	struct mlx5_eswitch *esw = dev->priv.eswitch;
esw              2179 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 		    (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
esw              2180 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 		    esw->offloads.inline_mode < non_tunnel_match_level)) {
esw              2185 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 				    non_tunnel_match_level, esw->offloads.inline_mode);
esw              2965 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
esw              2969 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
esw              2996 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
esw              3018 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	mutex_lock(&esw->offloads.encap_tbl_lock);
esw              3023 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 		mutex_unlock(&esw->offloads.encap_tbl_lock);
esw              3027 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 		mutex_lock(&esw->offloads.encap_tbl_lock);
esw              3055 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
esw              3056 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	mutex_unlock(&esw->offloads.encap_tbl_lock);
esw              3064 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	mutex_lock(&esw->offloads.encap_tbl_lock);
esw              3084 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	mutex_unlock(&esw->offloads.encap_tbl_lock);
esw              3089 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	mutex_unlock(&esw->offloads.encap_tbl_lock);
esw              3095 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	mutex_unlock(&esw->offloads.encap_tbl_lock);
esw              3216 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
esw              3289 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 				struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
esw              3290 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 				struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
esw              3387 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 			u32 max_chain = mlx5_eswitch_get_chain_range(esw);
esw              3407 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	if (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
esw              3493 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
esw              3497 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 		uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
esw              3570 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
esw              3579 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	if (MLX5_CAP_ESW(esw->dev, counter_eswitch_affinity) ==
esw              3643 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch, *peer_esw;
esw              3679 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	mutex_lock(&esw->offloads.peer_mutex);
esw              3680 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	list_add_tail(&flow->peer, &esw->offloads.peer_flows);
esw              3681 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	mutex_unlock(&esw->offloads.peer_mutex);
esw              3780 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
esw              3789 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS)
esw              3950 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	struct mlx5_eswitch *esw;
esw              3962 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	esw = priv->mdev->priv.eswitch;
esw              3970 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps);
esw              4175 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw)
esw              4179 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	list_for_each_entry_safe(flow, tmp, &esw->offloads.peer_flows, peer)
esw                61 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw);
esw                62 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw);
esw                65 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num)
esw                69 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager))
esw                72 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	idx = mlx5_eswitch_vport_num_to_index(esw, vport_num);
esw                74 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	if (idx > esw->total_vports - 1) {
esw                75 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		esw_debug(esw->dev, "vport out of range: num(0x%x), idx(0x%x)\n",
esw                80 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	return &esw->vports[idx];
esw               126 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c int mlx5_eswitch_modify_esw_vport_context(struct mlx5_eswitch *esw, u16 vport,
esw               129 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	return modify_esw_vport_context_cmd(esw->dev, vport, in, inlen);
esw               144 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c int mlx5_eswitch_query_esw_vport_context(struct mlx5_eswitch *esw, u16 vport,
esw               147 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	return query_esw_vport_context_cmd(esw->dev, vport, out, outlen);
esw               187 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u16 vport, bool rx_rule,
esw               230 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw_debug(esw->dev,
esw               236 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		mlx5_add_flow_rules(esw->fdb_table.legacy.fdb, spec,
esw               239 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		esw_warn(esw->dev,
esw               250 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u16 vport)
esw               255 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac);
esw               259 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u16 vport)
esw               268 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac_v);
esw               272 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u16 vport)
esw               279 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	return __esw_fdb_set_vport_rule(esw, vport, true, mac_c, mac_v);
esw               287 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c static int esw_create_legacy_vepa_table(struct mlx5_eswitch *esw)
esw               289 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	struct mlx5_core_dev *dev = esw->dev;
esw               308 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw->fdb_table.legacy.vepa_fdb = fdb;
esw               313 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw)
esw               317 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	struct mlx5_core_dev *dev = esw->dev;
esw               349 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw->fdb_table.legacy.fdb = fdb;
esw               366 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw->fdb_table.legacy.addr_grp = g;
esw               381 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw->fdb_table.legacy.allmulti_grp = g;
esw               398 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw->fdb_table.legacy.promisc_grp = g;
esw               402 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		esw_destroy_legacy_fdb_table(esw);
esw               408 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c static void esw_destroy_legacy_vepa_table(struct mlx5_eswitch *esw)
esw               410 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw_debug(esw->dev, "Destroy VEPA Table\n");
esw               411 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	if (!esw->fdb_table.legacy.vepa_fdb)
esw               414 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	mlx5_destroy_flow_table(esw->fdb_table.legacy.vepa_fdb);
esw               415 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw->fdb_table.legacy.vepa_fdb = NULL;
esw               418 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw)
esw               420 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw_debug(esw->dev, "Destroy FDB Table\n");
esw               421 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	if (!esw->fdb_table.legacy.fdb)
esw               424 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	if (esw->fdb_table.legacy.promisc_grp)
esw               425 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		mlx5_destroy_flow_group(esw->fdb_table.legacy.promisc_grp);
esw               426 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	if (esw->fdb_table.legacy.allmulti_grp)
esw               427 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp);
esw               428 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	if (esw->fdb_table.legacy.addr_grp)
esw               429 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp);
esw               430 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	mlx5_destroy_flow_table(esw->fdb_table.legacy.fdb);
esw               432 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw->fdb_table.legacy.fdb = NULL;
esw               433 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw->fdb_table.legacy.addr_grp = NULL;
esw               434 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw->fdb_table.legacy.allmulti_grp = NULL;
esw               435 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw->fdb_table.legacy.promisc_grp = NULL;
esw               438 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c static int esw_create_legacy_table(struct mlx5_eswitch *esw)
esw               442 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	memset(&esw->fdb_table.legacy, 0, sizeof(struct legacy_fdb));
esw               444 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	err = esw_create_legacy_vepa_table(esw);
esw               448 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	err = esw_create_legacy_fdb_table(esw);
esw               450 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		esw_destroy_legacy_vepa_table(esw);
esw               459 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c static int esw_legacy_enable(struct mlx5_eswitch *esw)
esw               463 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	ret = esw_create_legacy_table(esw);
esw               467 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_LEGACY_SRIOV_VPORT_EVENTS);
esw               471 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c static void esw_destroy_legacy_table(struct mlx5_eswitch *esw)
esw               473 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw_cleanup_vepa_rules(esw);
esw               474 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw_destroy_legacy_fdb_table(esw);
esw               475 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw_destroy_legacy_vepa_table(esw);
esw               478 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c static void esw_legacy_disable(struct mlx5_eswitch *esw)
esw               482 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	mlx5_eswitch_disable_pf_vf_vports(esw);
esw               484 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	mc_promisc = &esw->mc_promisc;
esw               488 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw_destroy_legacy_table(esw);
esw               492 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c typedef int (*vport_addr_action)(struct mlx5_eswitch *esw,
esw               495 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
esw               504 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	if (esw->manager_vport == vport)
esw               507 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	err = mlx5_mpfs_add_mac(esw->dev, mac);
esw               509 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		esw_warn(esw->dev,
esw               518 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	if (esw->fdb_table.legacy.fdb && esw->mode == MLX5_ESWITCH_LEGACY)
esw               519 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
esw               521 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM fr(%p)\n",
esw               527 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
esw               536 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	if (!vaddr->mpfs || esw->manager_vport == vport)
esw               539 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	err = mlx5_mpfs_del_mac(esw->dev, mac);
esw               541 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		esw_warn(esw->dev,
esw               554 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c static void update_allmulti_vports(struct mlx5_eswitch *esw,
esw               562 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	mlx5_esw_for_all_vports(esw, i, vport) {
esw               580 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 				esw_warn(esw->dev,
esw               587 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 					esw_fdb_set_vport_rule(esw,
esw               602 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
esw               604 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	struct hlist_head *hash = esw->mc_table;
esw               609 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	if (!esw->fdb_table.legacy.fdb)
esw               621 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		esw_fdb_set_vport_rule(esw, mac, MLX5_VPORT_UPLINK);
esw               624 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	update_allmulti_vports(esw, vaddr, esw_mc);
esw               634 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
esw               635 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw_debug(esw->dev,
esw               642 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
esw               644 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	struct hlist_head *hash = esw->mc_table;
esw               649 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	if (!esw->fdb_table.legacy.fdb)
esw               654 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		esw_warn(esw->dev,
esw               659 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw_debug(esw->dev,
esw               675 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	update_allmulti_vports(esw, vaddr, esw_mc);
esw               685 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c static void esw_apply_vport_addr_list(struct mlx5_eswitch *esw,
esw               707 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 			vport_addr_add(esw, addr);
esw               711 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 			vport_addr_del(esw, addr);
esw               719 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c static void esw_update_vport_addr_list(struct mlx5_eswitch *esw,
esw               733 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	size = is_uc ? MLX5_MAX_UC_PER_VPORT(esw->dev) :
esw               734 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		       MLX5_MAX_MC_PER_VPORT(esw->dev);
esw               750 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	err = mlx5_query_nic_vport_mac_list(esw->dev, vport->vport, list_type,
esw               754 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw_debug(esw->dev, "vport[%d] context update %s list size (%d)\n",
esw               773 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 					l2addr_hash_find(esw->mc_table,
esw               777 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 					esw_warn(esw->dev,
esw               791 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 			esw_warn(esw->dev,
esw               806 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw,
esw               817 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	for_each_l2hash_node(node, tmp, esw->mc_table, hi) {
esw               829 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 			esw_warn(esw->dev,
esw               841 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw,
esw               845 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	struct esw_mc_addr *allmulti_addr = &esw->mc_promisc;
esw               852 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 			esw_fdb_set_vport_allmulti_rule(esw, vport->vport);
esw               855 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 				esw_fdb_set_vport_allmulti_rule(esw,
esw               876 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 			esw_fdb_set_vport_promisc_rule(esw, vport->vport);
esw               884 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw,
esw               892 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	err = mlx5_query_nic_vport_promisc(esw->dev,
esw               899 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw_debug(esw->dev, "vport[%d] context update rx mode promisc_all=%d, all_multi=%d\n",
esw               908 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw_apply_vport_rx_mode(esw, vport, promisc_all,
esw               915 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	struct mlx5_eswitch *esw = dev->priv.eswitch;
esw               923 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC);
esw               924 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC);
esw               928 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC);
esw               931 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		esw_update_vport_rx_mode(esw, vport);
esw               933 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 			esw_update_vport_mc_promisc(esw, vport);
esw               937 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC);
esw               939 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw_debug(esw->dev, "vport[%d] Context Changed: Done\n", vport->vport);
esw               949 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
esw               951 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	mutex_lock(&esw->state_lock);
esw               953 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	mutex_unlock(&esw->state_lock);
esw               956 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
esw               962 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	struct mlx5_core_dev *dev = esw->dev;
esw               984 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 			mlx5_eswitch_vport_num_to_index(esw, vport->vport));
esw              1040 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
esw              1053 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
esw              1059 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw_debug(esw->dev, "Destroy vport[%d] E-Switch egress ACL\n", vport->vport);
esw              1061 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw_vport_cleanup_egress_rules(esw, vport);
esw              1070 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
esw              1074 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	struct mlx5_core_dev *dev = esw->dev;
esw              1102 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 			mlx5_eswitch_vport_num_to_index(esw, vport->vport));
esw              1202 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
esw              1214 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw_vport_del_ingress_acl_modify_metadata(esw, vport);
esw              1217 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
esw              1223 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw_debug(esw->dev, "Destroy vport[%d] E-Switch ingress ACL\n", vport->vport);
esw              1225 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw_vport_cleanup_ingress_rules(esw, vport);
esw              1238 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
esw              1250 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw_vport_cleanup_ingress_rules(esw, vport);
esw              1253 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		esw_vport_disable_ingress_acl(esw, vport);
esw              1257 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	err = esw_vport_enable_ingress_acl(esw, vport);
esw              1259 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		mlx5_core_warn(esw->dev,
esw              1265 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw_debug(esw->dev,
esw              1294 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		esw_warn(esw->dev,
esw              1317 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		esw_warn(esw->dev,
esw              1326 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		esw_vport_cleanup_ingress_rules(esw, vport);
esw              1331 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c static int esw_vport_egress_config(struct mlx5_eswitch *esw,
esw              1342 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw_vport_cleanup_egress_rules(esw, vport);
esw              1345 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		esw_vport_disable_egress_acl(esw, vport);
esw              1349 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	err = esw_vport_enable_egress_acl(esw, vport);
esw              1351 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		mlx5_core_warn(esw->dev,
esw              1357 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw_debug(esw->dev,
esw              1380 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		esw_warn(esw->dev,
esw              1404 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		esw_warn(esw->dev,
esw              1414 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c static bool element_type_supported(struct mlx5_eswitch *esw, int type)
esw              1416 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	const struct mlx5_core_dev *dev = esw->dev;
esw              1436 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c static void esw_create_tsar(struct mlx5_eswitch *esw)
esw              1439 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	struct mlx5_core_dev *dev = esw->dev;
esw              1446 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	if (!element_type_supported(esw, SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR))
esw              1449 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	if (esw->qos.enabled)
esw              1461 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 						 &esw->qos.root_tsar_id);
esw              1463 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		esw_warn(esw->dev, "E-Switch create TSAR failed (%d)\n", err);
esw              1467 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw->qos.enabled = true;
esw              1470 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c static void esw_destroy_tsar(struct mlx5_eswitch *esw)
esw              1474 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	if (!esw->qos.enabled)
esw              1477 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	err = mlx5_destroy_scheduling_element_cmd(esw->dev,
esw              1479 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 						  esw->qos.root_tsar_id);
esw              1481 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		esw_warn(esw->dev, "E-Switch destroy TSAR failed (%d)\n", err);
esw              1483 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw->qos.enabled = false;
esw              1486 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c static int esw_vport_enable_qos(struct mlx5_eswitch *esw,
esw              1491 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	struct mlx5_core_dev *dev = esw->dev;
esw              1495 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	if (!esw->qos.enabled || !MLX5_CAP_GEN(dev, qos) ||
esw              1508 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		 esw->qos.root_tsar_id);
esw              1518 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		esw_warn(esw->dev, "E-Switch create TSAR vport element failed (vport=%d,err=%d)\n",
esw              1527 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c static void esw_vport_disable_qos(struct mlx5_eswitch *esw,
esw              1535 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	err = mlx5_destroy_scheduling_element_cmd(esw->dev,
esw              1539 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		esw_warn(esw->dev, "E-Switch destroy TSAR vport element failed (vport=%d,err=%d)\n",
esw              1545 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c static int esw_vport_qos_config(struct mlx5_eswitch *esw,
esw              1550 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	struct mlx5_core_dev *dev = esw->dev;
esw              1567 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		 esw->qos.root_tsar_id);
esw              1580 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		esw_warn(esw->dev, "E-Switch modify TSAR vport element failed (vport=%d,err=%d)\n",
esw              1588 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num,
esw              1594 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	vport = mlx5_eswitch_get_vport(esw, vport_num);
esw              1597 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	return mlx5_modify_scheduling_element_cmd(esw->dev,
esw              1616 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c static void esw_apply_vport_conf(struct mlx5_eswitch *esw,
esw              1622 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	if (esw->manager_vport == vport_num)
esw              1625 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	mlx5_modify_vport_admin_state(esw->dev,
esw              1632 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		mlx5_modify_nic_vport_mac_address(esw->dev, vport_num,
esw              1634 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		mlx5_modify_nic_vport_node_guid(esw->dev, vport_num,
esw              1640 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan, vport->info.qos,
esw              1644 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	if (esw->mode == MLX5_ESWITCH_LEGACY) {
esw              1645 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		esw_vport_ingress_config(esw, vport);
esw              1646 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		esw_vport_egress_config(esw, vport);
esw              1685 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c static void esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
esw              1690 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	mutex_lock(&esw->state_lock);
esw              1693 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
esw              1696 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	if (vport_num && esw->mode == MLX5_ESWITCH_LEGACY)
esw              1700 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw_apply_vport_conf(esw, vport);
esw              1703 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	if (esw_vport_enable_qos(esw, vport, vport->info.max_rate,
esw              1705 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		esw_warn(esw->dev, "Failed to attach vport %d to eswitch rate limiter", vport_num);
esw              1714 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	if (esw->manager_vport == vport_num ||
esw              1715 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	    (!vport_num && mlx5_core_is_ecpf(esw->dev)))
esw              1720 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw->enabled_vports++;
esw              1721 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num);
esw              1722 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	mutex_unlock(&esw->state_lock);
esw              1725 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c static void esw_disable_vport(struct mlx5_eswitch *esw,
esw              1733 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw_debug(esw->dev, "Disabling vport(%d)\n", vport_num);
esw              1738 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	flush_workqueue(esw->work_queue);
esw              1740 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	arm_vport_context_events_cmd(esw->dev, vport->vport, 0);
esw              1741 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	mutex_lock(&esw->state_lock);
esw              1748 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw_vport_disable_qos(esw, vport);
esw              1749 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	if (esw->manager_vport != vport_num &&
esw              1750 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	    esw->mode == MLX5_ESWITCH_LEGACY) {
esw              1751 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		mlx5_modify_vport_admin_state(esw->dev,
esw              1755 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		esw_vport_disable_egress_acl(esw, vport);
esw              1756 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		esw_vport_disable_ingress_acl(esw, vport);
esw              1759 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw->enabled_vports--;
esw              1760 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	mutex_unlock(&esw->state_lock);
esw              1766 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	struct mlx5_eswitch *esw = mlx5_nb_cof(nb, struct mlx5_eswitch, nb);
esw              1772 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	vport = mlx5_eswitch_get_vport(esw, vport_num);
esw              1777 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		queue_work(esw->work_queue, &vport->vport_change_handler);
esw              1812 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c static void mlx5_eswitch_event_handlers_register(struct mlx5_eswitch *esw)
esw              1814 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	MLX5_NB_INIT(&esw->nb, eswitch_vport_event, NIC_VPORT_CHANGE);
esw              1815 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	mlx5_eq_notifier_register(esw->dev, &esw->nb);
esw              1817 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev)) {
esw              1818 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		MLX5_NB_INIT(&esw->esw_funcs.nb, mlx5_esw_funcs_changed_handler,
esw              1820 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		mlx5_eq_notifier_register(esw->dev, &esw->esw_funcs.nb);
esw              1824 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c static void mlx5_eswitch_event_handlers_unregister(struct mlx5_eswitch *esw)
esw              1826 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev))
esw              1827 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		mlx5_eq_notifier_unregister(esw->dev, &esw->esw_funcs.nb);
esw              1829 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	mlx5_eq_notifier_unregister(esw->dev, &esw->nb);
esw              1831 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	flush_workqueue(esw->work_queue);
esw              1835 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c #define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev))
esw              1841 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
esw              1848 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
esw              1849 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw_enable_vport(esw, vport, enabled_events);
esw              1852 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	if (mlx5_ecpf_vport_exists(esw->dev)) {
esw              1853 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
esw              1854 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		esw_enable_vport(esw, vport, enabled_events);
esw              1858 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
esw              1859 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		esw_enable_vport(esw, vport, enabled_events);
esw              1865 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw)
esw              1870 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	mlx5_esw_for_all_vports_reverse(esw, i, vport)
esw              1871 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		esw_disable_vport(esw, vport);
esw              1874 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode)
esw              1878 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	if (!ESW_ALLOWED(esw) ||
esw              1879 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	    !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
esw              1880 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		esw_warn(esw->dev, "FDB is not supported, aborting ...\n");
esw              1884 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support))
esw              1885 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		esw_warn(esw->dev, "ingress ACL is not supported by FW\n");
esw              1887 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support))
esw              1888 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		esw_warn(esw->dev, "engress ACL is not supported by FW\n");
esw              1890 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw_create_tsar(esw);
esw              1892 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw->mode = mode;
esw              1894 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	mlx5_lag_update(esw->dev);
esw              1897 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		err = esw_legacy_enable(esw);
esw              1899 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
esw              1900 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
esw              1901 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		err = esw_offloads_enable(esw);
esw              1907 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	mlx5_eswitch_event_handlers_register(esw);
esw              1909 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw_info(esw->dev, "Enable: mode(%s), nvfs(%d), active vports(%d)\n",
esw              1911 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		 esw->esw_funcs.num_vfs, esw->enabled_vports);
esw              1916 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw->mode = MLX5_ESWITCH_NONE;
esw              1919 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
esw              1920 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
esw              1926 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c void mlx5_eswitch_disable(struct mlx5_eswitch *esw)
esw              1930 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	if (!ESW_ALLOWED(esw) || esw->mode == MLX5_ESWITCH_NONE)
esw              1933 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw_info(esw->dev, "Disable: mode(%s), nvfs(%d), active vports(%d)\n",
esw              1934 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		 esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
esw              1935 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		 esw->esw_funcs.num_vfs, esw->enabled_vports);
esw              1937 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	mlx5_eswitch_event_handlers_unregister(esw);
esw              1939 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	if (esw->mode == MLX5_ESWITCH_LEGACY)
esw              1940 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		esw_legacy_disable(esw);
esw              1941 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	else if (esw->mode == MLX5_ESWITCH_OFFLOADS)
esw              1942 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		esw_offloads_disable(esw);
esw              1944 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw_destroy_tsar(esw);
esw              1946 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	old_mode = esw->mode;
esw              1947 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw->mode = MLX5_ESWITCH_NONE;
esw              1949 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	mlx5_lag_update(esw->dev);
esw              1952 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
esw              1953 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
esw              1959 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	struct mlx5_eswitch *esw;
esw              1975 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw = kzalloc(sizeof(*esw), GFP_KERNEL);
esw              1976 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	if (!esw)
esw              1979 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw->dev = dev;
esw              1980 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw->manager_vport = mlx5_eswitch_manager_vport(dev);
esw              1981 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw->first_host_vport = mlx5_eswitch_first_host_vport_num(dev);
esw              1983 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw->work_queue = create_singlethread_workqueue("mlx5_esw_wq");
esw              1984 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	if (!esw->work_queue) {
esw              1989 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw->vports = kcalloc(total_vports, sizeof(struct mlx5_vport),
esw              1991 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	if (!esw->vports) {
esw              1996 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw->total_vports = total_vports;
esw              1998 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	err = esw_offloads_init_reps(esw);
esw              2002 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	mutex_init(&esw->offloads.encap_tbl_lock);
esw              2003 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	hash_init(esw->offloads.encap_tbl);
esw              2004 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	mutex_init(&esw->offloads.mod_hdr.lock);
esw              2005 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	hash_init(esw->offloads.mod_hdr.hlist);
esw              2006 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	atomic64_set(&esw->offloads.num_flows, 0);
esw              2007 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	mutex_init(&esw->state_lock);
esw              2009 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	mlx5_esw_for_all_vports(esw, i, vport) {
esw              2010 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		vport->vport = mlx5_eswitch_index_to_vport_num(esw, i);
esw              2017 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw->enabled_vports = 0;
esw              2018 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw->mode = MLX5_ESWITCH_NONE;
esw              2019 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE;
esw              2021 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	dev->priv.eswitch = esw;
esw              2024 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	if (esw->work_queue)
esw              2025 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		destroy_workqueue(esw->work_queue);
esw              2026 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw_offloads_cleanup_reps(esw);
esw              2027 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	kfree(esw->vports);
esw              2028 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	kfree(esw);
esw              2032 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
esw              2034 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	if (!esw || !MLX5_VPORT_MANAGER(esw->dev))
esw              2037 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw_info(esw->dev, "cleanup\n");
esw              2039 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw->dev->priv.eswitch = NULL;
esw              2040 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	destroy_workqueue(esw->work_queue);
esw              2041 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw_offloads_cleanup_reps(esw);
esw              2042 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	mutex_destroy(&esw->offloads.mod_hdr.lock);
esw              2043 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	mutex_destroy(&esw->offloads.encap_tbl_lock);
esw              2044 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	kfree(esw->vports);
esw              2045 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	kfree(esw);
esw              2049 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
esw              2052 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
esw              2061 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	mutex_lock(&esw->state_lock);
esw              2064 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		mlx5_core_warn(esw->dev,
esw              2068 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	err = mlx5_modify_nic_vport_mac_address(esw->dev, vport, mac);
esw              2070 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		mlx5_core_warn(esw->dev,
esw              2077 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	err = mlx5_modify_nic_vport_node_guid(esw->dev, vport, node_guid);
esw              2079 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		mlx5_core_warn(esw->dev,
esw              2085 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY)
esw              2086 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		err = esw_vport_ingress_config(esw, evport);
esw              2089 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	mutex_unlock(&esw->state_lock);
esw              2093 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
esw              2096 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
esw              2099 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	if (!ESW_ALLOWED(esw))
esw              2104 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	mutex_lock(&esw->state_lock);
esw              2106 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	err = mlx5_modify_vport_admin_state(esw->dev,
esw              2110 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		mlx5_core_warn(esw->dev,
esw              2119 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	mutex_unlock(&esw->state_lock);
esw              2123 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
esw              2126 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
esw              2134 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	mutex_lock(&esw->state_lock);
esw              2143 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	mutex_unlock(&esw->state_lock);
esw              2148 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
esw              2151 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
esw              2154 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	if (!ESW_ALLOWED(esw))
esw              2161 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set_flags);
esw              2167 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY) {
esw              2168 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		err = esw_vport_ingress_config(esw, evport);
esw              2171 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		err = esw_vport_egress_config(esw, evport);
esw              2177 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
esw              2186 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	mutex_lock(&esw->state_lock);
esw              2187 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	err = __mlx5_eswitch_set_vport_vlan(esw, vport, vlan, qos, set_flags);
esw              2188 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	mutex_unlock(&esw->state_lock);
esw              2193 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
esw              2196 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
esw              2200 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	if (!ESW_ALLOWED(esw))
esw              2205 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	mutex_lock(&esw->state_lock);
esw              2209 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		mlx5_core_warn(esw->dev,
esw              2212 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY)
esw              2213 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		err = esw_vport_ingress_config(esw, evport);
esw              2216 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	mutex_unlock(&esw->state_lock);
esw              2221 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw)
esw              2223 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	if (esw->fdb_table.legacy.vepa_uplink_rule)
esw              2224 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		mlx5_del_flow_rules(esw->fdb_table.legacy.vepa_uplink_rule);
esw              2226 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	if (esw->fdb_table.legacy.vepa_star_rule)
esw              2227 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		mlx5_del_flow_rules(esw->fdb_table.legacy.vepa_star_rule);
esw              2229 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw->fdb_table.legacy.vepa_uplink_rule = NULL;
esw              2230 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw->fdb_table.legacy.vepa_star_rule = NULL;
esw              2233 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c static int _mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch *esw,
esw              2244 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		esw_cleanup_vepa_rules(esw);
esw              2248 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	if (esw->fdb_table.legacy.vepa_uplink_rule)
esw              2264 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	dest.ft = esw->fdb_table.legacy.fdb;
esw              2266 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, spec,
esw              2272 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		esw->fdb_table.legacy.vepa_uplink_rule = flow_rule;
esw              2281 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, spec,
esw              2287 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		esw->fdb_table.legacy.vepa_star_rule = flow_rule;
esw              2293 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		esw_cleanup_vepa_rules(esw);
esw              2297 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting)
esw              2301 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	if (!esw)
esw              2304 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	if (!ESW_ALLOWED(esw))
esw              2307 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	mutex_lock(&esw->state_lock);
esw              2308 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	if (esw->mode != MLX5_ESWITCH_LEGACY) {
esw              2313 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	err = _mlx5_eswitch_set_vepa_locked(esw, setting);
esw              2316 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	mutex_unlock(&esw->state_lock);
esw              2320 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting)
esw              2322 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	if (!esw)
esw              2325 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	if (!ESW_ALLOWED(esw))
esw              2328 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	if (esw->mode != MLX5_ESWITCH_LEGACY)
esw              2331 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	*setting = esw->fdb_table.legacy.vepa_uplink_rule ? 1 : 0;
esw              2335 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
esw              2338 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
esw              2340 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	if (!ESW_ALLOWED(esw))
esw              2345 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	mutex_lock(&esw->state_lock);
esw              2349 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	mutex_unlock(&esw->state_lock);
esw              2354 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c static u32 calculate_vports_min_rate_divider(struct mlx5_eswitch *esw)
esw              2356 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
esw              2361 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	mlx5_esw_for_all_vports(esw, i, evport) {
esw              2370 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider)
esw              2372 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
esw              2380 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	mlx5_esw_for_all_vports(esw, i, evport) {
esw              2395 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		err = esw_vport_qos_config(esw, evport, vport_max_rate,
esw              2406 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport,
esw              2409 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
esw              2417 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	if (!ESW_ALLOWED(esw))
esw              2422 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
esw              2423 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	min_rate_supported = MLX5_CAP_QOS(esw->dev, esw_bw_share) &&
esw              2425 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	max_rate_supported = MLX5_CAP_QOS(esw->dev, esw_rate_limit);
esw              2430 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	mutex_lock(&esw->state_lock);
esw              2437 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	divider = calculate_vports_min_rate_divider(esw);
esw              2438 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	err = normalize_vports_min_rate(esw, divider);
esw              2448 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	err = esw_vport_qos_config(esw, evport, max_rate, evport->qos.bw_share);
esw              2453 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	mutex_unlock(&esw->state_lock);
esw              2461 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	struct mlx5_eswitch *esw = dev->priv.eswitch;
esw              2466 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	if (!vport->enabled || esw->mode != MLX5_ESWITCH_LEGACY)
esw              2495 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
esw              2499 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
esw              2519 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	err = mlx5_cmd_exec(esw->dev, in, sizeof(in), out, outlen);
esw              2562 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	err = mlx5_eswitch_query_vport_drop_stats(esw->dev, vport, &stats);
esw              2573 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw)
esw              2575 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	return ESW_ALLOWED(esw) ? esw->mode : MLX5_ESWITCH_NONE;
esw              2582 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	struct mlx5_eswitch *esw;
esw              2584 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw = dev->priv.eswitch;
esw              2585 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	return ESW_ALLOWED(esw) ? esw->offloads.encap :
esw              2608 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs)
esw              2612 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	WARN_ON_ONCE(esw->mode != MLX5_ESWITCH_NONE);
esw              2614 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	if (!mlx5_core_is_ecpf_esw_manager(esw->dev)) {
esw              2615 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		esw->esw_funcs.num_vfs = num_vfs;
esw              2619 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	out = mlx5_esw_query_functions(esw->dev);
esw              2623 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	esw->esw_funcs.num_vfs = MLX5_GET(query_esw_functions_out, out,
esw               205 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h 	struct mlx5_eswitch	*esw;
esw               248 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h void esw_offloads_disable(struct mlx5_eswitch *esw);
esw               249 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h int esw_offloads_enable(struct mlx5_eswitch *esw);
esw               250 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw);
esw               251 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h int esw_offloads_init_reps(struct mlx5_eswitch *esw);
esw               252 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
esw               254 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
esw               256 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
esw               258 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
esw               260 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
esw               262 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
esw               264 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
esw               266 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num,
esw               271 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw);
esw               272 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode);
esw               273 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h void mlx5_eswitch_disable(struct mlx5_eswitch *esw);
esw               274 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
esw               276 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
esw               278 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
esw               280 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
esw               282 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
esw               284 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport,
esw               286 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting);
esw               287 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting);
esw               288 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
esw               290 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
esw               295 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h int mlx5_eswitch_modify_esw_vport_context(struct mlx5_eswitch *esw, u16 vport,
esw               297 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h int mlx5_eswitch_query_esw_vport_context(struct mlx5_eswitch *esw, u16 vport,
esw               305 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
esw               310 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw,
esw               319 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h mlx5_eswitch_termtbl_put(struct mlx5_eswitch *esw,
esw               323 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
esw               327 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
esw               331 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
esw               335 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
esw               340 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h mlx5_eswitch_prios_supported(struct mlx5_eswitch *esw);
esw               343 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h mlx5_eswitch_get_prio_range(struct mlx5_eswitch *esw);
esw               346 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h mlx5_eswitch_get_chain_range(struct mlx5_eswitch *esw);
esw               349 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
esw               409 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode);
esw               415 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type);
esw               417 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
esw               419 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
esw               421 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
esw               480 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h static inline int mlx5_eswitch_uplink_idx(struct mlx5_eswitch *esw)
esw               483 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h 	return esw->total_vports - 1;
esw               486 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h static inline int mlx5_eswitch_ecpf_idx(struct mlx5_eswitch *esw)
esw               488 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h 	return esw->total_vports - 2;
esw               491 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h static inline int mlx5_eswitch_vport_num_to_index(struct mlx5_eswitch *esw,
esw               495 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h 		if (!mlx5_ecpf_vport_exists(esw->dev))
esw               496 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h 			esw_warn(esw->dev, "ECPF vport doesn't exist!\n");
esw               497 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h 		return mlx5_eswitch_ecpf_idx(esw);
esw               501 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h 		return mlx5_eswitch_uplink_idx(esw);
esw               506 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h static inline u16 mlx5_eswitch_index_to_vport_num(struct mlx5_eswitch *esw,
esw               509 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h 	if (index == mlx5_eswitch_ecpf_idx(esw) &&
esw               510 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h 	    mlx5_ecpf_vport_exists(esw->dev))
esw               513 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h 	if (index == mlx5_eswitch_uplink_idx(esw))
esw               520 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
esw               525 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h #define mlx5_esw_for_all_vports(esw, i, vport)		\
esw               527 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h 	     (vport) = &(esw)->vports[i],		\
esw               528 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h 	     (i) < (esw)->total_vports; (i)++)
esw               530 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h #define mlx5_esw_for_all_vports_reverse(esw, i, vport)	\
esw               531 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h 	for ((i) = (esw)->total_vports - 1;		\
esw               532 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h 	     (vport) = &(esw)->vports[i],		\
esw               535 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h #define mlx5_esw_for_each_vf_vport(esw, i, vport, nvfs)	\
esw               537 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h 	     (vport) = &(esw)->vports[(i)],		\
esw               540 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h #define mlx5_esw_for_each_vf_vport_reverse(esw, i, vport, nvfs)	\
esw               542 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h 	     (vport) = &(esw)->vports[(i)],			\
esw               548 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h #define mlx5_esw_for_all_reps(esw, i, rep)			\
esw               550 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h 	     (rep) = &(esw)->offloads.vport_reps[i],		\
esw               551 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h 	     (i) < (esw)->total_vports; (i)++)
esw               553 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h #define mlx5_esw_for_each_vf_rep(esw, i, rep, nvfs)		\
esw               555 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h 	     (rep) = &(esw)->offloads.vport_reps[i],		\
esw               558 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h #define mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, nvfs)	\
esw               560 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h 	     (rep) = &(esw)->offloads.vport_reps[i],		\
esw               563 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h #define mlx5_esw_for_each_vf_vport_num(esw, vport, nvfs)	\
esw               566 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h #define mlx5_esw_for_each_vf_vport_num_reverse(esw, vport, nvfs)	\
esw               570 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h #define mlx5_esw_for_each_host_func_rep(esw, i, rep, nvfs)	\
esw               571 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h 	for ((i) = (esw)->first_host_vport;			\
esw               572 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h 	     (rep) = &(esw)->offloads.vport_reps[i],		\
esw               575 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h #define mlx5_esw_for_each_host_func_rep_reverse(esw, i, rep, nvfs)	\
esw               577 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h 	     (rep) = &(esw)->offloads.vport_reps[i],			\
esw               578 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h 	     (i) >= (esw)->first_host_vport; (i)--)
esw               580 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h #define mlx5_esw_for_each_host_func_vport(esw, vport, nvfs)	\
esw               581 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h 	for ((vport) = (esw)->first_host_vport;			\
esw               584 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h #define mlx5_esw_for_each_host_func_vport_reverse(esw, vport, nvfs)	\
esw               586 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h 	     (vport) >= (esw)->first_host_vport; (vport)--)
esw               589 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num);
esw               591 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num);
esw               593 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs);
esw               597 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
esw               599 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw);
esw               604 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {}
esw               605 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h static inline int  mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode) { return 0; }
esw               606 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw) {}
esw               614 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h static inline void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs) {}
esw                51 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c #define fdb_prio_table(esw, chain, prio, level) \
esw                52 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	(esw)->fdb_table.offloads.fdb_prio[(chain)][(prio)][(level)]
esw                56 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
esw                59 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	int idx = mlx5_eswitch_vport_num_to_index(esw, vport_num);
esw                61 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	WARN_ON(idx > esw->total_vports - 1);
esw                62 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	return &esw->offloads.vport_reps[idx];
esw                66 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level);
esw                68 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level);
esw                70 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c bool mlx5_eswitch_prios_supported(struct mlx5_eswitch *esw)
esw                72 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	return (!!(esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED));
esw                75 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c u32 mlx5_eswitch_get_chain_range(struct mlx5_eswitch *esw)
esw                77 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)
esw                83 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c u16 mlx5_eswitch_get_prio_range(struct mlx5_eswitch *esw)
esw                85 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)
esw                92 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
esw               102 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
esw               119 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
esw               126 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
esw               133 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) &&
esw               139 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
esw               150 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	if (esw->mode != MLX5_ESWITCH_OFFLOADS)
esw               155 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
esw               173 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 			ft = esw_get_prio_table(esw, attr->dest_chain, 1, 0);
esw               188 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 				if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
esw               208 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	mlx5_eswitch_set_rule_source_port(esw, spec, attr);
esw               218 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	fdb = esw_get_prio_table(esw, attr->chain, attr->prio, !!split);
esw               224 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	if (mlx5_eswitch_termtbl_required(esw, &flow_act, spec))
esw               225 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, attr,
esw               232 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		atomic64_inc(&esw->offloads.num_flows);
esw               237 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	esw_put_prio_table(esw, attr->chain, attr->prio, !!split);
esw               240 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		esw_put_prio_table(esw, attr->dest_chain, 1, 0);
esw               246 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
esw               257 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	fast_fdb = esw_get_prio_table(esw, attr->chain, attr->prio, 0);
esw               263 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	fwd_fdb = esw_get_prio_table(esw, attr->chain, attr->prio, 1);
esw               275 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
esw               286 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	mlx5_eswitch_set_rule_source_port(esw, spec, attr);
esw               296 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	atomic64_inc(&esw->offloads.num_flows);
esw               300 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	esw_put_prio_table(esw, attr->chain, attr->prio, 1);
esw               302 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	esw_put_prio_table(esw, attr->chain, attr->prio, 0);
esw               308 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
esw               321 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 			mlx5_eswitch_termtbl_put(esw, attr->dests[i].termtbl);
esw               324 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	atomic64_dec(&esw->offloads.num_flows);
esw               327 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		esw_put_prio_table(esw, attr->chain, attr->prio, 1);
esw               328 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		esw_put_prio_table(esw, attr->chain, attr->prio, 0);
esw               330 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		esw_put_prio_table(esw, attr->chain, attr->prio, !!split);
esw               332 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 			esw_put_prio_table(esw, attr->dest_chain, 1, 0);
esw               337 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
esw               341 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	__mlx5_eswitch_del_rule(esw, rule, attr, false);
esw               345 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
esw               349 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	__mlx5_eswitch_del_rule(esw, rule, attr, true);
esw               352 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
esw               357 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
esw               358 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	mlx5_esw_for_each_host_func_rep(esw, i, rep, esw->esw_funcs.num_vfs) {
esw               362 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
esw               423 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
esw               426 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	struct offloads_fdb *offloads = &esw->fdb_table.offloads;
esw               432 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
esw               440 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	mutex_lock(&esw->state_lock);
esw               465 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
esw               475 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan_vid[0], 0,
esw               487 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	mutex_unlock(&esw->state_lock);
esw               491 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
esw               494 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	struct offloads_fdb *offloads = &esw->fdb_table.offloads;
esw               500 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
esw               510 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	mutex_lock(&esw->state_lock);
esw               528 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
esw               540 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	err = esw_set_global_vlan_pop(esw, 0);
esw               543 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	mutex_unlock(&esw->state_lock);
esw               548 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, u16 vport,
esw               566 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	MLX5_SET(fte_match_set_misc, misc, source_port, esw->manager_vport);
esw               577 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
esw               580 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
esw               592 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
esw               599 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
esw               602 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	err = mlx5_eswitch_query_esw_vport_context(esw, esw->manager_vport,
esw               621 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	return mlx5_eswitch_modify_esw_vport_context(esw, esw->manager_vport,
esw               625 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c static void peer_miss_rules_setup(struct mlx5_eswitch *esw,
esw               632 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
esw               660 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c static void esw_set_peer_miss_rule_source_port(struct mlx5_eswitch *esw,
esw               667 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
esw               680 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
esw               689 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	int nvports = esw->total_vports;
esw               697 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	peer_miss_rules_setup(esw, peer_dev, spec, &dest);
esw               709 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
esw               710 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
esw               713 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
esw               722 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	if (mlx5_ecpf_vport_exists(esw->dev)) {
esw               724 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
esw               730 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		flows[mlx5_eswitch_ecpf_idx(esw)] = flow;
esw               733 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	mlx5_esw_for_each_vf_vport_num(esw, i, mlx5_core_max_vfs(esw->dev)) {
esw               734 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		esw_set_peer_miss_rule_source_port(esw,
esw               738 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
esw               747 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	esw->fdb_table.offloads.peer_miss_rules = flows;
esw               754 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	mlx5_esw_for_each_vf_vport_num_reverse(esw, i, nvports)
esw               757 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	if (mlx5_ecpf_vport_exists(esw->dev))
esw               758 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]);
esw               760 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	if (mlx5_core_is_ecpf_esw_manager(esw->dev))
esw               763 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err);
esw               770 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw)
esw               775 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	flows = esw->fdb_table.offloads.peer_miss_rules;
esw               777 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	mlx5_esw_for_each_vf_vport_num_reverse(esw, i,
esw               778 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 					       mlx5_core_max_vfs(esw->dev))
esw               781 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	if (mlx5_ecpf_vport_exists(esw->dev))
esw               782 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]);
esw               784 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	if (mlx5_core_is_ecpf_esw_manager(esw->dev))
esw               790 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
esw               816 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	dest.vport.num = esw->manager_vport;
esw               819 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
esw               823 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		esw_warn(esw->dev,  "FDB: Failed to add unicast miss flow rule err %d\n", err);
esw               827 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	esw->fdb_table.offloads.miss_rule_uni = flow_rule;
esw               834 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
esw               838 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err);
esw               839 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
esw               843 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	esw->fdb_table.offloads.miss_rule_multi = flow_rule;
esw               864 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c get_sz_from_pool(struct mlx5_eswitch *esw)
esw               869 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		if (esw->fdb_table.offloads.fdb_left[i]) {
esw               870 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 			--esw->fdb_table.offloads.fdb_left[i];
esw               880 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c put_sz_to_pool(struct mlx5_eswitch *esw, int sz)
esw               886 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 			++esw->fdb_table.offloads.fdb_left[i];
esw               893 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c create_next_size_table(struct mlx5_eswitch *esw,
esw               902 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	sz = get_sz_from_pool(esw);
esw               913 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		esw_warn(esw->dev, "Failed to create FDB Table err %d (table prio: %d, level: %d, size: %d)\n",
esw               915 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		put_sz_to_pool(esw, sz);
esw               922 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level)
esw               924 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	struct mlx5_core_dev *dev = esw->dev;
esw               931 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		return esw->fdb_table.offloads.slow_fdb;
esw               933 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock);
esw               935 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	fdb = fdb_prio_table(esw, chain, prio, level).fdb;
esw               939 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 			fdb_prio_table(esw, chain, prio, level--).num_rules++;
esw               940 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
esw               947 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
esw               951 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
esw               961 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		if (fdb_prio_table(esw, chain, prio, l).fdb) {
esw               962 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 			fdb_prio_table(esw, chain, prio, l).num_rules++;
esw               966 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		fdb = create_next_size_table(esw, ns, table_prio, l, flags);
esw               972 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		fdb_prio_table(esw, chain, prio, l).fdb = fdb;
esw               973 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		fdb_prio_table(esw, chain, prio, l).num_rules = 1;
esw               976 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
esw               980 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
esw               982 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		esw_put_prio_table(esw, chain, prio, l);
esw               988 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level)
esw               995 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock);
esw               998 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		if (--(fdb_prio_table(esw, chain, prio, l).num_rules) > 0)
esw              1001 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		put_sz_to_pool(esw, fdb_prio_table(esw, chain, prio, l).fdb->max_fte);
esw              1002 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		mlx5_destroy_flow_table(fdb_prio_table(esw, chain, prio, l).fdb);
esw              1003 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		fdb_prio_table(esw, chain, prio, l).fdb = NULL;
esw              1006 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
esw              1009 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c static void esw_destroy_offloads_fast_fdb_tables(struct mlx5_eswitch *esw)
esw              1012 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	if (!(esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)) {
esw              1013 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		esw_put_prio_table(esw, 0, 1, 1);
esw              1014 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		esw_put_prio_table(esw, 0, 1, 0);
esw              1021 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c static void esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
esw              1028 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
esw              1045 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
esw              1049 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	struct mlx5_core_dev *dev = esw->dev;
esw              1059 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	esw_debug(esw->dev, "Create offloads FDB Tables\n");
esw              1070 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	esw->fdb_table.offloads.ns = root_ns;
esw              1072 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 					   esw->dev->priv.steering->mode);
esw              1088 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		esw->fdb_table.offloads.fdb_left[i] =
esw              1092 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		MLX5_ESW_MISS_FLOWS + esw->total_vports;
esw              1097 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
esw              1111 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	esw->fdb_table.offloads.slow_fdb = fdb;
esw              1114 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, multi_fdb_encap) &&
esw              1115 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	    esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
esw              1116 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		esw->fdb_table.flags &= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
esw              1118 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		esw_get_prio_table(esw, 0, 1, 0);
esw              1119 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		esw_get_prio_table(esw, 0, 1, 1);
esw              1122 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		esw->fdb_table.flags |= ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
esw              1144 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	esw->fdb_table.offloads.send_to_vport_grp = g;
esw              1149 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	esw_set_flow_group_source_port(esw, flow_group_in);
esw              1151 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) {
esw              1165 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		 ix + esw->total_vports - 1);
esw              1166 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	ix += esw->total_vports;
esw              1174 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	esw->fdb_table.offloads.peer_miss_grp = g;
esw              1196 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	esw->fdb_table.offloads.miss_grp = g;
esw              1198 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	err = esw_add_fdb_miss_rule(esw);
esw              1202 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	esw->nvports = nvports;
esw              1207 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
esw              1209 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
esw              1211 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
esw              1213 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	esw_destroy_offloads_fast_fdb_tables(esw);
esw              1214 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
esw              1223 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
esw              1225 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	if (!esw->fdb_table.offloads.slow_fdb)
esw              1228 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
esw              1229 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
esw              1230 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
esw              1231 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
esw              1232 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
esw              1233 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
esw              1235 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
esw              1236 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	esw_destroy_offloads_fast_fdb_tables(esw);
esw              1238 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns,
esw              1242 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c static int esw_create_offloads_table(struct mlx5_eswitch *esw, int nvports)
esw              1245 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	struct mlx5_core_dev *dev = esw->dev;
esw              1252 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
esw              1261 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
esw              1265 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	esw->offloads.ft_offloads = ft_offloads;
esw              1269 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
esw              1271 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	struct mlx5_esw_offload *offloads = &esw->offloads;
esw              1276 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c static int esw_create_vport_rx_group(struct mlx5_eswitch *esw, int nvports)
esw              1289 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	esw_set_flow_group_source_port(esw, flow_group_in);
esw              1294 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
esw              1298 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
esw              1302 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	esw->offloads.vport_rx_group = g;
esw              1308 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
esw              1310 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
esw              1314 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
esw              1328 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
esw              1331 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 			 mlx5_eswitch_get_vport_metadata_for_match(esw, vport));
esw              1348 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
esw              1351 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
esw              1360 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c static int esw_offloads_start(struct mlx5_eswitch *esw,
esw              1365 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	if (esw->mode != MLX5_ESWITCH_LEGACY &&
esw              1366 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	    !mlx5_core_is_ecpf_esw_manager(esw->dev)) {
esw              1372 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	mlx5_eswitch_disable(esw);
esw              1373 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	mlx5_eswitch_update_num_of_vfs(esw, esw->dev->priv.sriov.num_vfs);
esw              1374 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS);
esw              1378 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		err1 = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY);
esw              1384 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
esw              1385 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		if (mlx5_eswitch_inline_mode_get(esw,
esw              1386 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 						 &esw->offloads.inline_mode)) {
esw              1387 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 			esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
esw              1395 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
esw              1397 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	kfree(esw->offloads.vport_reps);
esw              1400 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c int esw_offloads_init_reps(struct mlx5_eswitch *esw)
esw              1402 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	int total_vports = esw->total_vports;
esw              1407 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	esw->offloads.vport_reps = kcalloc(total_vports,
esw              1410 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	if (!esw->offloads.vport_reps)
esw              1413 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	mlx5_esw_for_all_reps(esw, vport_index, rep) {
esw              1414 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		rep->vport = mlx5_eswitch_index_to_vport_num(esw, vport_index);
esw              1425 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
esw              1430 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		esw->offloads.rep_ops[rep_type]->unload(rep);
esw              1433 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c static void __unload_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type)
esw              1437 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	if (mlx5_ecpf_vport_exists(esw->dev)) {
esw              1438 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF);
esw              1439 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		__esw_offloads_unload_rep(esw, rep, rep_type);
esw              1442 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
esw              1443 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
esw              1444 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		__esw_offloads_unload_rep(esw, rep, rep_type);
esw              1447 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
esw              1448 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	__esw_offloads_unload_rep(esw, rep, rep_type);
esw              1451 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c static void __unload_reps_vf_vport(struct mlx5_eswitch *esw, int nvports,
esw              1457 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, nvports)
esw              1458 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		__esw_offloads_unload_rep(esw, rep, rep_type);
esw              1461 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c static void esw_offloads_unload_vf_reps(struct mlx5_eswitch *esw, int nvports)
esw              1466 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		__unload_reps_vf_vport(esw, nvports, rep_type);
esw              1469 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
esw              1471 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	__unload_reps_vf_vport(esw, esw->esw_funcs.num_vfs, rep_type);
esw              1474 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	__unload_reps_special_vport(esw, rep_type);
esw              1477 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c static void esw_offloads_unload_all_reps(struct mlx5_eswitch *esw)
esw              1482 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		__unload_reps_all_vport(esw, rep_type);
esw              1485 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c static int __esw_offloads_load_rep(struct mlx5_eswitch *esw,
esw              1492 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		err = esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
esw              1501 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c static int __load_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type)
esw              1506 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
esw              1507 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	err = __esw_offloads_load_rep(esw, rep, rep_type);
esw              1511 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
esw              1512 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
esw              1513 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		err = __esw_offloads_load_rep(esw, rep, rep_type);
esw              1518 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	if (mlx5_ecpf_vport_exists(esw->dev)) {
esw              1519 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF);
esw              1520 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		err = __esw_offloads_load_rep(esw, rep, rep_type);
esw              1528 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
esw              1529 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
esw              1530 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		__esw_offloads_unload_rep(esw, rep, rep_type);
esw              1534 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
esw              1535 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	__esw_offloads_unload_rep(esw, rep, rep_type);
esw              1539 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c static int __load_reps_vf_vport(struct mlx5_eswitch *esw, int nvports,
esw              1545 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	mlx5_esw_for_each_vf_rep(esw, i, rep, nvports) {
esw              1546 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		err = __esw_offloads_load_rep(esw, rep, rep_type);
esw              1554 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	__unload_reps_vf_vport(esw, --i, rep_type);
esw              1558 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c static int __load_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
esw              1563 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	err = __load_reps_special_vport(esw, rep_type);
esw              1567 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	err = __load_reps_vf_vport(esw, esw->esw_funcs.num_vfs, rep_type);
esw              1574 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	__unload_reps_special_vport(esw, rep_type);
esw              1578 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c static int esw_offloads_load_vf_reps(struct mlx5_eswitch *esw, int nvports)
esw              1584 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		err = __load_reps_vf_vport(esw, nvports, rep_type);
esw              1593 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		__unload_reps_vf_vport(esw, nvports, rep_type);
esw              1597 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c static int esw_offloads_load_all_reps(struct mlx5_eswitch *esw)
esw              1603 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		err = __load_reps_all_vport(esw, rep_type);
esw              1612 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		__unload_reps_all_vport(esw, rep_type);
esw              1619 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw,
esw              1624 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	err = esw_add_fdb_peer_miss_rules(esw, peer_esw->dev);
esw              1631 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw)
esw              1633 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	mlx5e_tc_clean_fdb_peer_flows(esw);
esw              1634 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	esw_del_fdb_peer_miss_rules(esw);
esw              1637 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c static int mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch *esw,
esw              1646 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	ns = esw->dev->priv.steering->fdb_root_ns;
esw              1670 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	struct mlx5_eswitch *esw = my_data;
esw              1671 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	struct mlx5_devcom *devcom = esw->dev->priv.devcom;
esw              1677 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		if (mlx5_eswitch_vport_match_metadata_enabled(esw) !=
esw              1681 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true);
esw              1684 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		err = mlx5_esw_offloads_pair(esw, peer_esw);
esw              1688 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		err = mlx5_esw_offloads_pair(peer_esw, esw);
esw              1701 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		mlx5_esw_offloads_unpair(esw);
esw              1702 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
esw              1709 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	mlx5_esw_offloads_unpair(esw);
esw              1711 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
esw              1713 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	mlx5_core_err(esw->dev, "esw offloads devcom event failure, event %u err %d",
esw              1718 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c static void esw_offloads_devcom_init(struct mlx5_eswitch *esw)
esw              1720 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	struct mlx5_devcom *devcom = esw->dev->priv.devcom;
esw              1722 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	INIT_LIST_HEAD(&esw->offloads.peer_flows);
esw              1723 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	mutex_init(&esw->offloads.peer_mutex);
esw              1725 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
esw              1731 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 				       esw);
esw              1735 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 			       ESW_OFFLOADS_DEVCOM_PAIR, esw);
esw              1738 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
esw              1740 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	struct mlx5_devcom *devcom = esw->dev->priv.devcom;
esw              1742 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
esw              1746 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 			       ESW_OFFLOADS_DEVCOM_UNPAIR, esw);
esw              1751 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c static int esw_vport_ingress_prio_tag_config(struct mlx5_eswitch *esw,
esw              1790 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		esw_warn(esw->dev,
esw              1801 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		esw_vport_cleanup_ingress_rules(esw, vport);
esw              1805 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c static int esw_vport_add_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
esw              1816 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		 mlx5_eswitch_get_vport_metadata_for_match(esw, vport->vport));
esw              1819 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
esw              1823 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		esw_warn(esw->dev,
esw              1835 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		esw_warn(esw->dev,
esw              1844 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		mlx5_modify_header_dealloc(esw->dev, vport->ingress.modify_metadata);
esw              1848 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
esw              1853 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		mlx5_modify_header_dealloc(esw->dev, vport->ingress.modify_metadata);
esw              1859 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c static int esw_vport_egress_prio_tag_config(struct mlx5_eswitch *esw,
esw              1866 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	if (!MLX5_CAP_GEN(esw->dev, prio_tag_required))
esw              1874 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	esw_vport_cleanup_egress_rules(esw, vport);
esw              1876 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	err = esw_vport_enable_egress_acl(esw, vport);
esw              1878 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		mlx5_core_warn(esw->dev,
esw              1884 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	esw_debug(esw->dev,
esw              1907 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		esw_warn(esw->dev,
esw              1918 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		esw_vport_cleanup_egress_rules(esw, vport);
esw              1922 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c static int esw_vport_ingress_common_config(struct mlx5_eswitch *esw,
esw              1927 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	if (!mlx5_eswitch_vport_match_metadata_enabled(esw) &&
esw              1928 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	    !MLX5_CAP_GEN(esw->dev, prio_tag_required))
esw              1931 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	esw_vport_cleanup_ingress_rules(esw, vport);
esw              1933 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	err = esw_vport_enable_ingress_acl(esw, vport);
esw              1935 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		esw_warn(esw->dev,
esw              1941 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	esw_debug(esw->dev,
esw              1944 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
esw              1945 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		err = esw_vport_add_ingress_acl_modify_metadata(esw, vport);
esw              1950 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	if (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
esw              1951 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	    mlx5_eswitch_is_vf_vport(esw, vport->vport)) {
esw              1952 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		err = esw_vport_ingress_prio_tag_config(esw, vport);
esw              1959 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		esw_vport_disable_ingress_acl(esw, vport);
esw              1964 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c esw_check_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
esw              1966 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	if (!MLX5_CAP_ESW(esw->dev, esw_uplink_ingress_acl))
esw              1969 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	if (!(MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
esw              1973 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source))
esw              1976 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	if (mlx5_core_is_ecpf_esw_manager(esw->dev) ||
esw              1977 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	    mlx5_ecpf_vport_exists(esw->dev))
esw              1983 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c static int esw_create_offloads_acl_tables(struct mlx5_eswitch *esw)
esw              1989 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	if (esw_check_vport_match_metadata_supported(esw))
esw              1990 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
esw              1992 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	mlx5_esw_for_all_vports(esw, i, vport) {
esw              1993 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		err = esw_vport_ingress_common_config(esw, vport);
esw              1997 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		if (mlx5_eswitch_is_vf_vport(esw, vport->vport)) {
esw              1998 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 			err = esw_vport_egress_prio_tag_config(esw, vport);
esw              2004 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	if (mlx5_eswitch_vport_match_metadata_enabled(esw))
esw              2005 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		esw_info(esw->dev, "Use metadata reg_c as source vport to match\n");
esw              2010 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	esw_vport_disable_ingress_acl(esw, vport);
esw              2013 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		vport = &esw->vports[j];
esw              2014 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		esw_vport_disable_egress_acl(esw, vport);
esw              2015 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		esw_vport_disable_ingress_acl(esw, vport);
esw              2021 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c static void esw_destroy_offloads_acl_tables(struct mlx5_eswitch *esw)
esw              2026 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	mlx5_esw_for_all_vports(esw, i, vport) {
esw              2027 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		esw_vport_disable_egress_acl(esw, vport);
esw              2028 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		esw_vport_disable_ingress_acl(esw, vport);
esw              2031 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
esw              2034 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
esw              2036 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	int num_vfs = esw->esw_funcs.num_vfs;
esw              2040 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	if (mlx5_core_is_ecpf_esw_manager(esw->dev))
esw              2041 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		total_vports = esw->total_vports;
esw              2043 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		total_vports = num_vfs + MLX5_SPECIAL_VPORTS(esw->dev);
esw              2045 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
esw              2046 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	mutex_init(&esw->fdb_table.offloads.fdb_prio_lock);
esw              2048 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	err = esw_create_offloads_acl_tables(esw);
esw              2052 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	err = esw_create_offloads_fdb_tables(esw, total_vports);
esw              2056 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	err = esw_create_offloads_table(esw, total_vports);
esw              2060 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	err = esw_create_vport_rx_group(esw, total_vports);
esw              2067 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	esw_destroy_offloads_table(esw);
esw              2070 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	esw_destroy_offloads_fdb_tables(esw);
esw              2073 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	esw_destroy_offloads_acl_tables(esw);
esw              2078 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
esw              2080 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	esw_destroy_vport_rx_group(esw);
esw              2081 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	esw_destroy_offloads_table(esw);
esw              2082 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	esw_destroy_offloads_fdb_tables(esw);
esw              2083 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	esw_destroy_offloads_acl_tables(esw);
esw              2087 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out)
esw              2097 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	if (new_num_vfs == esw->esw_funcs.num_vfs || host_pf_disabled)
esw              2101 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	if (esw->esw_funcs.num_vfs > 0) {
esw              2102 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		esw_offloads_unload_vf_reps(esw, esw->esw_funcs.num_vfs);
esw              2106 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		err = esw_offloads_load_vf_reps(esw, new_num_vfs);
esw              2110 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	esw->esw_funcs.num_vfs = new_num_vfs;
esw              2116 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	struct mlx5_eswitch *esw;
esw              2120 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	esw = host_work->esw;
esw              2122 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	out = mlx5_esw_query_functions(esw->dev);
esw              2126 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	esw_vfs_changed_event_handler(esw, out);
esw              2136 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	struct mlx5_eswitch *esw;
esw              2143 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	esw = container_of(esw_funcs, struct mlx5_eswitch, esw_funcs);
esw              2145 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	host_work->esw = esw;
esw              2148 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	queue_work(esw->work_queue, &host_work->work);
esw              2153 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c int esw_offloads_enable(struct mlx5_eswitch *esw)
esw              2157 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat) &&
esw              2158 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	    MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, decap))
esw              2159 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
esw              2161 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
esw              2163 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	mlx5_rdma_enable_roce(esw->dev);
esw              2164 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	err = esw_offloads_steering_init(esw);
esw              2168 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	err = esw_set_passing_vport_metadata(esw, true);
esw              2172 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
esw              2174 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	err = esw_offloads_load_all_reps(esw);
esw              2178 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	esw_offloads_devcom_init(esw);
esw              2179 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	mutex_init(&esw->offloads.termtbl_mutex);
esw              2184 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	mlx5_eswitch_disable_pf_vf_vports(esw);
esw              2185 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	esw_set_passing_vport_metadata(esw, false);
esw              2187 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	esw_offloads_steering_cleanup(esw);
esw              2189 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	mlx5_rdma_disable_roce(esw->dev);
esw              2193 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c static int esw_offloads_stop(struct mlx5_eswitch *esw,
esw              2198 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	mlx5_eswitch_disable(esw);
esw              2199 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY);
esw              2202 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		err1 = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS);
esw              2212 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c void esw_offloads_disable(struct mlx5_eswitch *esw)
esw              2214 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	esw_offloads_devcom_cleanup(esw);
esw              2215 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	esw_offloads_unload_all_reps(esw);
esw              2216 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	mlx5_eswitch_disable_pf_vf_vports(esw);
esw              2217 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	esw_set_passing_vport_metadata(esw, false);
esw              2218 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	esw_offloads_steering_cleanup(esw);
esw              2219 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	mlx5_rdma_disable_roce(esw->dev);
esw              2220 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
esw              2359 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	struct mlx5_eswitch *esw = dev->priv.eswitch;
esw              2379 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	if (atomic64_read(&esw->offloads.num_flows) > 0) {
esw              2389 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) {
esw              2398 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	esw->offloads.inline_mode = mlx5_mode;
esw              2403 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	mlx5_esw_for_each_host_func_vport_reverse(esw, vport, num_vport)
esw              2406 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 						 esw->offloads.inline_mode);
esw              2414 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	struct mlx5_eswitch *esw = dev->priv.eswitch;
esw              2421 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
esw              2424 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode)
esw              2427 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	struct mlx5_core_dev *dev = esw->dev;
esw              2433 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	if (esw->mode == MLX5_ESWITCH_NONE)
esw              2448 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	mlx5_query_nic_vport_min_inline(dev, esw->first_host_vport, &prev_mlx5_mode);
esw              2449 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) {
esw              2466 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	struct mlx5_eswitch *esw = dev->priv.eswitch;
esw              2481 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	if (esw->mode == MLX5_ESWITCH_LEGACY) {
esw              2482 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		esw->offloads.encap = encap;
esw              2486 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	if (esw->offloads.encap == encap)
esw              2489 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	if (atomic64_read(&esw->offloads.num_flows) > 0) {
esw              2495 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	esw_destroy_offloads_fdb_tables(esw);
esw              2497 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	esw->offloads.encap = encap;
esw              2499 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	err = esw_create_offloads_fdb_tables(esw, esw->nvports);
esw              2504 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		esw->offloads.encap = !encap;
esw              2505 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		(void)esw_create_offloads_fdb_tables(esw, esw->nvports);
esw              2515 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	struct mlx5_eswitch *esw = dev->priv.eswitch;
esw              2522 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	*encap = esw->offloads.encap;
esw              2526 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
esw              2534 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	esw->offloads.rep_ops[rep_type] = ops;
esw              2535 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	mlx5_esw_for_all_reps(esw, i, rep) {
esw              2542 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type)
esw              2547 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	if (esw->mode == MLX5_ESWITCH_OFFLOADS)
esw              2548 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		__unload_reps_all_vport(esw, rep_type);
esw              2550 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	mlx5_esw_for_all_reps(esw, i, rep)
esw              2555 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
esw              2559 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
esw              2563 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
esw              2569 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	rep = mlx5_eswitch_get_rep(esw, vport);
esw              2572 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	    esw->offloads.rep_ops[rep_type]->get_proto_dev)
esw              2573 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 		return esw->offloads.rep_ops[rep_type]->get_proto_dev(rep);
esw              2578 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type)
esw              2580 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	return mlx5_eswitch_get_proto_dev(esw, MLX5_VPORT_UPLINK, rep_type);
esw              2584 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
esw              2587 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	return mlx5_eswitch_get_rep(esw, vport);
esw              2591 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num)
esw              2594 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	       vport_num <= esw->dev->priv.sriov.max_vfs;
esw              2597 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw)
esw              2599 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	return !!(esw->flags & MLX5_ESWITCH_VPORT_MATCH_METADATA);
esw              2603 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c u32 mlx5_eswitch_get_vport_metadata_for_match(const struct mlx5_eswitch *esw,
esw              2606 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 	return ((MLX5_CAP_GEN(esw->dev, vhca_id) & 0xffff) << 16) | vport_num;
esw                93 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c mlx5_eswitch_termtbl_get_create(struct mlx5_eswitch *esw,
esw               102 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c 	mutex_lock(&esw->offloads.termtbl_mutex);
esw               105 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c 	hash_for_each_possible(esw->offloads.termtbl_tbl, tt,
esw               127 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c 	err = mlx5_eswitch_termtbl_create(esw->dev, tt, flow_act);
esw               129 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c 		esw_warn(esw->dev, "Failed to create termination table\n");
esw               132 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c 	hash_add(esw->offloads.termtbl_tbl, &tt->termtbl_hlist, hash_key);
esw               135 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c 	mutex_unlock(&esw->offloads.termtbl_mutex);
esw               139 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c 	mutex_unlock(&esw->offloads.termtbl_mutex);
esw               144 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c mlx5_eswitch_termtbl_put(struct mlx5_eswitch *esw,
esw               147 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c 	mutex_lock(&esw->offloads.termtbl_mutex);
esw               150 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c 	mutex_unlock(&esw->offloads.termtbl_mutex);
esw               180 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c static bool mlx5_eswitch_offload_is_uplink_port(const struct mlx5_eswitch *esw,
esw               185 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c 	if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source))
esw               197 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
esw               201 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c 	if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, termination_table))
esw               206 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c 		mlx5_eswitch_offload_is_uplink_port(esw, spec);
esw               210 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw,
esw               235 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c 		tt = mlx5_eswitch_termtbl_get_create(esw, &term_tbl_act,
esw               238 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c 			esw_warn(esw->dev, "Failed to create termination table\n");
esw               281 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c 			mlx5_eswitch_termtbl_put(esw, tt);
esw              1766 drivers/s390/block/dasd.c 							    irb->esw.esw1.lpum);
esw              4193 drivers/s390/block/dasd.c 	} else if (irb->esw.esw0.erw.cons) {
esw               157 drivers/s390/block/dasd_3990_erp.c 			~(erp->irb.esw.esw0.sublog.lpum);
esw               159 drivers/s390/block/dasd_3990_erp.c 		erp->lpm &= ~(erp->irb.esw.esw0.sublog.lpum);
esw               165 drivers/s390/block/dasd_3990_erp.c 			    erp->lpm, erp->irb.esw.esw0.sublog.lpum, opm);
esw               173 drivers/s390/block/dasd_3990_erp.c 			"/opm=%x)\n", erp->irb.esw.esw0.sublog.lpum, opm);
esw              2246 drivers/s390/block/dasd_3990_erp.c 	__u8 lpum = erp->refers->irb.esw.esw1.lpum;
esw              5477 drivers/s390/block/dasd_eckd.c 	if (irb->esw.esw0.erw.cons) {
esw              6612 drivers/s390/block/dasd_eckd.c 		if (dasd_eckd_disable_hpf_path(device, irb->esw.esw1.lpum))
esw               691 drivers/s390/block/dasd_fba.c 	if (irb->esw.esw0.erw.cons) {
esw               186 drivers/s390/cio/ccwreq.c 	if (irb->esw.esw0.erw.cons) {
esw               766 drivers/s390/cio/device_fsm.c 		    !irb->esw.esw0.erw.cons) {
esw               124 drivers/s390/cio/device_status.c 	cdev_irb->esw.esw1.lpum = irb->esw.esw1.lpum;
esw               128 drivers/s390/cio/device_status.c 		cdev_sublog = &cdev_irb->esw.esw0.sublog;
esw               129 drivers/s390/cio/device_status.c 		sublog = &irb->esw.esw0.sublog;
esw               158 drivers/s390/cio/device_status.c 			cdev_irb->esw.esw0.erw.cpt = irb->esw.esw0.erw.cpt;
esw               160 drivers/s390/cio/device_status.c 		cdev_irb->esw.esw0.erw.fsavf = irb->esw.esw0.erw.fsavf;
esw               161 drivers/s390/cio/device_status.c 		if (cdev_irb->esw.esw0.erw.fsavf) {
esw               163 drivers/s390/cio/device_status.c 			memcpy(cdev_irb->esw.esw0.faddr, irb->esw.esw0.faddr,
esw               164 drivers/s390/cio/device_status.c 			       sizeof (irb->esw.esw0.faddr));
esw               166 drivers/s390/cio/device_status.c 			cdev_irb->esw.esw0.erw.fsaf = irb->esw.esw0.erw.fsaf;
esw               169 drivers/s390/cio/device_status.c 		cdev_irb->esw.esw0.erw.scavf = irb->esw.esw0.erw.scavf;
esw               170 drivers/s390/cio/device_status.c 		if (irb->esw.esw0.erw.scavf)
esw               172 drivers/s390/cio/device_status.c 			cdev_irb->esw.esw0.saddr = irb->esw.esw0.saddr;
esw               178 drivers/s390/cio/device_status.c 	cdev_irb->esw.esw0.erw.auth = irb->esw.esw0.erw.auth;
esw               180 drivers/s390/cio/device_status.c 	cdev_irb->esw.esw0.erw.pvrf = irb->esw.esw0.erw.pvrf;
esw               181 drivers/s390/cio/device_status.c 	if (irb->esw.esw0.erw.pvrf)
esw               184 drivers/s390/cio/device_status.c 	cdev_irb->esw.esw0.erw.cons = irb->esw.esw0.erw.cons;
esw               185 drivers/s390/cio/device_status.c 	if (irb->esw.esw0.erw.cons)
esw               186 drivers/s390/cio/device_status.c 		cdev_irb->esw.esw0.erw.scnt = irb->esw.esw0.erw.scnt;
esw               303 drivers/s390/cio/device_status.c 	    !(cdev_irb->esw.esw0.erw.cons))
esw               367 drivers/s390/cio/device_status.c 		cdev->private->dma_area->irb.esw.esw0.erw.cons = 1;
esw               372 drivers/s390/cio/device_status.c 	    irb->esw.esw0.erw.pvrf)
esw               389 drivers/s390/cio/device_status.c 		cdev->private->dma_area->irb.esw.esw0.erw.cons = 1;
esw               140 drivers/s390/cio/eadm_sch.c 	    && scsw->eswf == 1 && irb->esw.eadm.erw.r)
esw               945 drivers/s390/cio/qdio_main.c 	if (irb->esw.esw0.erw.cons) {
esw              1037 drivers/s390/net/qeth_core_main.c 		if (irb->esw.esw0.erw.cons) {
esw                53 include/linux/mlx5/eswitch.h void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
esw                56 include/linux/mlx5/eswitch.h void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type);
esw                57 include/linux/mlx5/eswitch.h void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
esw                60 include/linux/mlx5/eswitch.h struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
esw                62 include/linux/mlx5/eswitch.h void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type);
esw                64 include/linux/mlx5/eswitch.h mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw,
esw                73 include/linux/mlx5/eswitch.h bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw);
esw                74 include/linux/mlx5/eswitch.h u32 mlx5_eswitch_get_vport_metadata_for_match(const struct mlx5_eswitch *esw,
esw                76 include/linux/mlx5/eswitch.h u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw);
esw                79 include/linux/mlx5/eswitch.h static inline u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw)
esw                91 include/linux/mlx5/eswitch.h mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw)
esw                97 include/linux/mlx5/eswitch.h mlx5_eswitch_get_vport_metadata_for_match(const struct mlx5_eswitch *esw,