Lines Matching refs:priv

54 	struct mlx4_en_priv *priv = netdev_priv(dev);  in mlx4_en_setup_tc()  local
65 netdev_set_tc_queue(dev, i, priv->num_tx_rings_p_up, offset); in mlx4_en_setup_tc()
66 offset += priv->num_tx_rings_p_up; in mlx4_en_setup_tc()
78 struct mlx4_en_priv *priv = netdev_priv(dev); in mlx4_en_low_latency_recv() local
79 struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring]; in mlx4_en_low_latency_recv()
82 if (!priv->port_up) in mlx4_en_low_latency_recv()
113 struct mlx4_en_priv *priv; member
123 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv);
142 struct mlx4_en_priv *priv = filter->priv; in mlx4_en_filter_work() local
174 .port = priv->port, in mlx4_en_filter_work()
181 en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n", in mlx4_en_filter_work()
189 rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn; in mlx4_en_filter_work()
190 memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN); in mlx4_en_filter_work()
196 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id); in mlx4_en_filter_work()
198 en_err(priv, "Error detaching flow. rc = %d\n", rc); in mlx4_en_filter_work()
201 rc = mlx4_flow_attach(priv->mdev->dev, &rule, &filter->reg_id); in mlx4_en_filter_work()
203 en_err(priv, "Error attaching flow. err = %d\n", rc); in mlx4_en_filter_work()
206 mlx4_en_filter_rfs_expire(priv); in mlx4_en_filter_work()
212 filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip, in filter_hash_bucket() argument
224 return &priv->filter_hash[bucket_idx]; in filter_hash_bucket()
228 mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip, in mlx4_en_filter_alloc() argument
238 filter->priv = priv; in mlx4_en_filter_alloc()
250 filter->id = priv->last_filter_id++ % RPS_NO_FILTER; in mlx4_en_filter_alloc()
252 list_add_tail(&filter->next, &priv->filters); in mlx4_en_filter_alloc()
254 filter_hash_bucket(priv, src_ip, dst_ip, src_port, in mlx4_en_filter_alloc()
262 struct mlx4_en_priv *priv = filter->priv; in mlx4_en_filter_free() local
267 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id); in mlx4_en_filter_free()
269 en_err(priv, "Error detaching flow. rc = %d\n", rc); in mlx4_en_filter_free()
275 mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip, in mlx4_en_filter_find() argument
282 filter_hash_bucket(priv, src_ip, dst_ip, in mlx4_en_filter_find()
302 struct mlx4_en_priv *priv = netdev_priv(net_dev); in mlx4_en_filter_rfs() local
331 spin_lock_bh(&priv->filters_lock); in mlx4_en_filter_rfs()
332 filter = mlx4_en_filter_find(priv, src_ip, dst_ip, ip_proto, in mlx4_en_filter_rfs()
340 filter = mlx4_en_filter_alloc(priv, rxq_index, in mlx4_en_filter_rfs()
349 queue_work(priv->mdev->workqueue, &filter->work); in mlx4_en_filter_rfs()
354 spin_unlock_bh(&priv->filters_lock); in mlx4_en_filter_rfs()
359 void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv) in mlx4_en_cleanup_filters() argument
364 spin_lock_bh(&priv->filters_lock); in mlx4_en_cleanup_filters()
365 list_for_each_entry_safe(filter, tmp, &priv->filters, next) { in mlx4_en_cleanup_filters()
369 spin_unlock_bh(&priv->filters_lock); in mlx4_en_cleanup_filters()
377 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv) in mlx4_en_filter_rfs_expire() argument
383 spin_lock_bh(&priv->filters_lock); in mlx4_en_filter_rfs_expire()
384 list_for_each_entry_safe(filter, tmp, &priv->filters, next) { in mlx4_en_filter_rfs_expire()
390 rps_may_expire_flow(priv->dev, in mlx4_en_filter_rfs_expire()
401 if (last_filter && (&last_filter->next != priv->filters.next)) in mlx4_en_filter_rfs_expire()
402 list_move(&priv->filters, &last_filter->next); in mlx4_en_filter_rfs_expire()
404 spin_unlock_bh(&priv->filters_lock); in mlx4_en_filter_rfs_expire()
414 struct mlx4_en_priv *priv = netdev_priv(dev); in mlx4_en_vlan_rx_add_vid() local
415 struct mlx4_en_dev *mdev = priv->mdev; in mlx4_en_vlan_rx_add_vid()
419 en_dbg(HW, priv, "adding VLAN:%d\n", vid); in mlx4_en_vlan_rx_add_vid()
421 set_bit(vid, priv->active_vlans); in mlx4_en_vlan_rx_add_vid()
425 if (mdev->device_up && priv->port_up) { in mlx4_en_vlan_rx_add_vid()
426 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); in mlx4_en_vlan_rx_add_vid()
428 en_err(priv, "Failed configuring VLAN filter\n"); in mlx4_en_vlan_rx_add_vid()
430 if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx)) in mlx4_en_vlan_rx_add_vid()
431 en_dbg(HW, priv, "failed adding vlan %d\n", vid); in mlx4_en_vlan_rx_add_vid()
440 struct mlx4_en_priv *priv = netdev_priv(dev); in mlx4_en_vlan_rx_kill_vid() local
441 struct mlx4_en_dev *mdev = priv->mdev; in mlx4_en_vlan_rx_kill_vid()
444 en_dbg(HW, priv, "Killing VID:%d\n", vid); in mlx4_en_vlan_rx_kill_vid()
446 clear_bit(vid, priv->active_vlans); in mlx4_en_vlan_rx_kill_vid()
450 mlx4_unregister_vlan(mdev->dev, priv->port, vid); in mlx4_en_vlan_rx_kill_vid()
452 if (mdev->device_up && priv->port_up) { in mlx4_en_vlan_rx_kill_vid()
453 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); in mlx4_en_vlan_rx_kill_vid()
455 en_err(priv, "Failed configuring VLAN filter\n"); in mlx4_en_vlan_rx_kill_vid()
473 static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *addr, in mlx4_en_tunnel_steer_add() argument
478 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN || in mlx4_en_tunnel_steer_add()
479 priv->mdev->dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC) in mlx4_en_tunnel_steer_add()
482 err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn, in mlx4_en_tunnel_steer_add()
485 en_err(priv, "failed to add vxlan steering rule, err %d\n", err); in mlx4_en_tunnel_steer_add()
488 en_dbg(DRV, priv, "added vxlan steering rule, mac %pM reg_id %llx\n", addr, *reg_id); in mlx4_en_tunnel_steer_add()
493 static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv, in mlx4_en_uc_steer_add() argument
496 struct mlx4_en_dev *mdev = priv->mdev; in mlx4_en_uc_steer_add()
507 gid[5] = priv->port; in mlx4_en_uc_steer_add()
524 rule.port = priv->port; in mlx4_en_uc_steer_add()
540 en_warn(priv, "Failed Attaching Unicast\n"); in mlx4_en_uc_steer_add()
545 static void mlx4_en_uc_steer_release(struct mlx4_en_priv *priv, in mlx4_en_uc_steer_release() argument
548 struct mlx4_en_dev *mdev = priv->mdev; in mlx4_en_uc_steer_release()
558 gid[5] = priv->port; in mlx4_en_uc_steer_release()
568 en_err(priv, "Invalid steering mode.\n"); in mlx4_en_uc_steer_release()
572 static int mlx4_en_get_qp(struct mlx4_en_priv *priv) in mlx4_en_get_qp() argument
574 struct mlx4_en_dev *mdev = priv->mdev; in mlx4_en_get_qp()
580 int *qpn = &priv->base_qpn; in mlx4_en_get_qp()
581 u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr); in mlx4_en_get_qp()
583 en_dbg(DRV, priv, "Registering MAC: %pM for adding\n", in mlx4_en_get_qp()
584 priv->dev->dev_addr); in mlx4_en_get_qp()
585 index = mlx4_register_mac(dev, priv->port, mac); in mlx4_en_get_qp()
588 en_err(priv, "Failed adding MAC: %pM\n", in mlx4_en_get_qp()
589 priv->dev->dev_addr); in mlx4_en_get_qp()
594 int base_qpn = mlx4_get_base_qpn(dev, priv->port); in mlx4_en_get_qp()
600 en_dbg(DRV, priv, "Reserved qp %d\n", *qpn); in mlx4_en_get_qp()
602 en_err(priv, "Failed to reserve qp for mac registration\n"); in mlx4_en_get_qp()
606 err = mlx4_en_uc_steer_add(priv, priv->dev->dev_addr, qpn, &reg_id); in mlx4_en_get_qp()
610 err = mlx4_en_tunnel_steer_add(priv, priv->dev->dev_addr, *qpn, in mlx4_en_get_qp()
611 &priv->tunnel_reg_id); in mlx4_en_get_qp()
620 memcpy(entry->mac, priv->dev->dev_addr, sizeof(entry->mac)); in mlx4_en_get_qp()
621 memcpy(priv->current_mac, entry->mac, sizeof(priv->current_mac)); in mlx4_en_get_qp()
625 &priv->mac_hash[entry->mac[MLX4_EN_MAC_HASH_IDX]]); in mlx4_en_get_qp()
630 if (priv->tunnel_reg_id) in mlx4_en_get_qp()
631 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id); in mlx4_en_get_qp()
633 mlx4_en_uc_steer_release(priv, priv->dev->dev_addr, *qpn, reg_id); in mlx4_en_get_qp()
639 mlx4_unregister_mac(dev, priv->port, mac); in mlx4_en_get_qp()
643 static void mlx4_en_put_qp(struct mlx4_en_priv *priv) in mlx4_en_put_qp() argument
645 struct mlx4_en_dev *mdev = priv->mdev; in mlx4_en_put_qp()
647 int qpn = priv->base_qpn; in mlx4_en_put_qp()
651 mac = mlx4_mac_to_u64(priv->dev->dev_addr); in mlx4_en_put_qp()
652 en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n", in mlx4_en_put_qp()
653 priv->dev->dev_addr); in mlx4_en_put_qp()
654 mlx4_unregister_mac(dev, priv->port, mac); in mlx4_en_put_qp()
662 bucket = &priv->mac_hash[i]; in mlx4_en_put_qp()
665 en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n", in mlx4_en_put_qp()
667 mlx4_en_uc_steer_release(priv, entry->mac, in mlx4_en_put_qp()
670 mlx4_unregister_mac(dev, priv->port, mac); in mlx4_en_put_qp()
676 if (priv->tunnel_reg_id) { in mlx4_en_put_qp()
677 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id); in mlx4_en_put_qp()
678 priv->tunnel_reg_id = 0; in mlx4_en_put_qp()
681 en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n", in mlx4_en_put_qp()
682 priv->port, qpn); in mlx4_en_put_qp()
684 priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC; in mlx4_en_put_qp()
688 static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn, in mlx4_en_replace_mac() argument
691 struct mlx4_en_dev *mdev = priv->mdev; in mlx4_en_replace_mac()
703 bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]]; in mlx4_en_replace_mac()
706 mlx4_en_uc_steer_release(priv, entry->mac, in mlx4_en_replace_mac()
708 mlx4_unregister_mac(dev, priv->port, in mlx4_en_replace_mac()
716 &priv->mac_hash[mac_hash]); in mlx4_en_replace_mac()
717 mlx4_register_mac(dev, priv->port, new_mac_u64); in mlx4_en_replace_mac()
718 err = mlx4_en_uc_steer_add(priv, new_mac, in mlx4_en_replace_mac()
723 if (priv->tunnel_reg_id) { in mlx4_en_replace_mac()
724 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id); in mlx4_en_replace_mac()
725 priv->tunnel_reg_id = 0; in mlx4_en_replace_mac()
727 err = mlx4_en_tunnel_steer_add(priv, new_mac, qpn, in mlx4_en_replace_mac()
728 &priv->tunnel_reg_id); in mlx4_en_replace_mac()
735 return __mlx4_replace_mac(dev, priv->port, qpn, new_mac_u64); in mlx4_en_replace_mac()
738 static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv, in mlx4_en_do_set_mac() argument
743 if (priv->port_up) { in mlx4_en_do_set_mac()
745 err = mlx4_en_replace_mac(priv, priv->base_qpn, in mlx4_en_do_set_mac()
746 new_mac, priv->current_mac); in mlx4_en_do_set_mac()
748 en_err(priv, "Failed changing HW MAC address\n"); in mlx4_en_do_set_mac()
750 en_dbg(HW, priv, "Port is down while registering mac, exiting...\n"); in mlx4_en_do_set_mac()
753 memcpy(priv->current_mac, new_mac, sizeof(priv->current_mac)); in mlx4_en_do_set_mac()
760 struct mlx4_en_priv *priv = netdev_priv(dev); in mlx4_en_set_mac() local
761 struct mlx4_en_dev *mdev = priv->mdev; in mlx4_en_set_mac()
771 err = mlx4_en_do_set_mac(priv, new_mac); in mlx4_en_set_mac()
781 struct mlx4_en_priv *priv = netdev_priv(dev); in mlx4_en_clear_list() local
784 list_for_each_entry_safe(mc_to_del, tmp, &priv->mc_list, list) { in mlx4_en_clear_list()
792 struct mlx4_en_priv *priv = netdev_priv(dev); in mlx4_en_cache_mclist() local
804 list_add_tail(&tmp->list, &priv->mc_list); in mlx4_en_cache_mclist()
808 static void update_mclist_flags(struct mlx4_en_priv *priv, in update_mclist_flags() argument
857 struct mlx4_en_priv *priv = netdev_priv(dev); in mlx4_en_set_rx_mode() local
859 if (!priv->port_up) in mlx4_en_set_rx_mode()
862 queue_work(priv->mdev->workqueue, &priv->rx_mode_task); in mlx4_en_set_rx_mode()
865 static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv, in mlx4_en_set_promisc_mode() argument
870 if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) { in mlx4_en_set_promisc_mode()
871 if (netif_msg_rx_status(priv)) in mlx4_en_set_promisc_mode()
872 en_warn(priv, "Entering promiscuous mode\n"); in mlx4_en_set_promisc_mode()
873 priv->flags |= MLX4_EN_FLAG_PROMISC; in mlx4_en_set_promisc_mode()
879 priv->port, in mlx4_en_set_promisc_mode()
880 priv->base_qpn, in mlx4_en_set_promisc_mode()
883 en_err(priv, "Failed enabling promiscuous mode\n"); in mlx4_en_set_promisc_mode()
884 priv->flags |= MLX4_EN_FLAG_MC_PROMISC; in mlx4_en_set_promisc_mode()
889 priv->base_qpn, in mlx4_en_set_promisc_mode()
890 priv->port); in mlx4_en_set_promisc_mode()
892 en_err(priv, "Failed enabling unicast promiscuous mode\n"); in mlx4_en_set_promisc_mode()
897 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) { in mlx4_en_set_promisc_mode()
899 priv->base_qpn, in mlx4_en_set_promisc_mode()
900 priv->port); in mlx4_en_set_promisc_mode()
902 en_err(priv, "Failed enabling multicast promiscuous mode\n"); in mlx4_en_set_promisc_mode()
903 priv->flags |= MLX4_EN_FLAG_MC_PROMISC; in mlx4_en_set_promisc_mode()
909 priv->port, in mlx4_en_set_promisc_mode()
910 priv->base_qpn, in mlx4_en_set_promisc_mode()
913 en_err(priv, "Failed enabling promiscuous mode\n"); in mlx4_en_set_promisc_mode()
918 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, in mlx4_en_set_promisc_mode()
921 en_err(priv, "Failed disabling multicast filter\n"); in mlx4_en_set_promisc_mode()
925 static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv *priv, in mlx4_en_clear_promisc_mode() argument
930 if (netif_msg_rx_status(priv)) in mlx4_en_clear_promisc_mode()
931 en_warn(priv, "Leaving promiscuous mode\n"); in mlx4_en_clear_promisc_mode()
932 priv->flags &= ~MLX4_EN_FLAG_PROMISC; in mlx4_en_clear_promisc_mode()
938 priv->port, in mlx4_en_clear_promisc_mode()
941 en_err(priv, "Failed disabling promiscuous mode\n"); in mlx4_en_clear_promisc_mode()
942 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; in mlx4_en_clear_promisc_mode()
947 priv->base_qpn, in mlx4_en_clear_promisc_mode()
948 priv->port); in mlx4_en_clear_promisc_mode()
950 en_err(priv, "Failed disabling unicast promiscuous mode\n"); in mlx4_en_clear_promisc_mode()
952 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { in mlx4_en_clear_promisc_mode()
954 priv->base_qpn, in mlx4_en_clear_promisc_mode()
955 priv->port); in mlx4_en_clear_promisc_mode()
957 en_err(priv, "Failed disabling multicast promiscuous mode\n"); in mlx4_en_clear_promisc_mode()
958 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; in mlx4_en_clear_promisc_mode()
964 priv->port, in mlx4_en_clear_promisc_mode()
965 priv->base_qpn, 0); in mlx4_en_clear_promisc_mode()
967 en_err(priv, "Failed disabling promiscuous mode\n"); in mlx4_en_clear_promisc_mode()
972 static void mlx4_en_do_multicast(struct mlx4_en_priv *priv, in mlx4_en_do_multicast() argument
983 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, in mlx4_en_do_multicast()
986 en_err(priv, "Failed disabling multicast filter\n"); in mlx4_en_do_multicast()
989 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) { in mlx4_en_do_multicast()
993 priv->port, in mlx4_en_do_multicast()
994 priv->base_qpn, in mlx4_en_do_multicast()
1000 priv->base_qpn, in mlx4_en_do_multicast()
1001 priv->port); in mlx4_en_do_multicast()
1008 en_err(priv, "Failed entering multicast promisc mode\n"); in mlx4_en_do_multicast()
1009 priv->flags |= MLX4_EN_FLAG_MC_PROMISC; in mlx4_en_do_multicast()
1013 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { in mlx4_en_do_multicast()
1017 priv->port, in mlx4_en_do_multicast()
1023 priv->base_qpn, in mlx4_en_do_multicast()
1024 priv->port); in mlx4_en_do_multicast()
1031 en_err(priv, "Failed disabling multicast promiscuous mode\n"); in mlx4_en_do_multicast()
1032 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; in mlx4_en_do_multicast()
1035 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, in mlx4_en_do_multicast()
1038 en_err(priv, "Failed disabling multicast filter\n"); in mlx4_en_do_multicast()
1041 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST, in mlx4_en_do_multicast()
1049 list_for_each_entry(mclist, &priv->mc_list, list) { in mlx4_en_do_multicast()
1051 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, in mlx4_en_do_multicast()
1054 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, in mlx4_en_do_multicast()
1057 en_err(priv, "Failed enabling multicast filter\n"); in mlx4_en_do_multicast()
1059 update_mclist_flags(priv, &priv->curr_list, &priv->mc_list); in mlx4_en_do_multicast()
1060 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) { in mlx4_en_do_multicast()
1064 mc_list[5] = priv->port; in mlx4_en_do_multicast()
1066 &priv->rss_map.indir_qp, in mlx4_en_do_multicast()
1071 en_err(priv, "Fail to detach multicast address\n"); in mlx4_en_do_multicast()
1074 err = mlx4_flow_detach(priv->mdev->dev, mclist->tunnel_reg_id); in mlx4_en_do_multicast()
1076 en_err(priv, "Failed to detach multicast address\n"); in mlx4_en_do_multicast()
1086 mc_list[5] = priv->port; in mlx4_en_do_multicast()
1088 &priv->rss_map.indir_qp, in mlx4_en_do_multicast()
1090 priv->port, 0, in mlx4_en_do_multicast()
1094 en_err(priv, "Fail to attach multicast address\n"); in mlx4_en_do_multicast()
1096 err = mlx4_en_tunnel_steer_add(priv, &mc_list[10], priv->base_qpn, in mlx4_en_do_multicast()
1099 en_err(priv, "Failed to attach multicast address\n"); in mlx4_en_do_multicast()
1105 static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv, in mlx4_en_do_uc_filter() argument
1126 bucket = &priv->mac_hash[i]; in mlx4_en_do_uc_filter()
1139 priv->current_mac)) in mlx4_en_do_uc_filter()
1144 mlx4_en_uc_steer_release(priv, entry->mac, in mlx4_en_do_uc_filter()
1145 priv->base_qpn, in mlx4_en_do_uc_filter()
1147 mlx4_unregister_mac(mdev->dev, priv->port, mac); in mlx4_en_do_uc_filter()
1151 en_dbg(DRV, priv, "Removed MAC %pM on port:%d\n", in mlx4_en_do_uc_filter()
1152 entry->mac, priv->port); in mlx4_en_do_uc_filter()
1161 if ((priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) && 0 == removed) in mlx4_en_do_uc_filter()
1164 prev_flags = priv->flags; in mlx4_en_do_uc_filter()
1165 priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC; in mlx4_en_do_uc_filter()
1170 bucket = &priv->mac_hash[ha->addr[MLX4_EN_MAC_HASH_IDX]]; in mlx4_en_do_uc_filter()
1181 en_err(priv, "Failed adding MAC %pM on port:%d (out of memory)\n", in mlx4_en_do_uc_filter()
1182 ha->addr, priv->port); in mlx4_en_do_uc_filter()
1183 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC; in mlx4_en_do_uc_filter()
1188 err = mlx4_register_mac(mdev->dev, priv->port, mac); in mlx4_en_do_uc_filter()
1190 en_err(priv, "Failed registering MAC %pM on port %d: %d\n", in mlx4_en_do_uc_filter()
1191 ha->addr, priv->port, err); in mlx4_en_do_uc_filter()
1193 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC; in mlx4_en_do_uc_filter()
1196 err = mlx4_en_uc_steer_add(priv, ha->addr, in mlx4_en_do_uc_filter()
1197 &priv->base_qpn, in mlx4_en_do_uc_filter()
1200 en_err(priv, "Failed adding MAC %pM on port %d: %d\n", in mlx4_en_do_uc_filter()
1201 ha->addr, priv->port, err); in mlx4_en_do_uc_filter()
1202 mlx4_unregister_mac(mdev->dev, priv->port, mac); in mlx4_en_do_uc_filter()
1204 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC; in mlx4_en_do_uc_filter()
1208 en_dbg(DRV, priv, "Added MAC %pM on port:%d\n", in mlx4_en_do_uc_filter()
1209 ha->addr, priv->port); in mlx4_en_do_uc_filter()
1211 bucket = &priv->mac_hash[mac_hash]; in mlx4_en_do_uc_filter()
1217 if (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) { in mlx4_en_do_uc_filter()
1218 en_warn(priv, "Forcing promiscuous mode on port:%d\n", in mlx4_en_do_uc_filter()
1219 priv->port); in mlx4_en_do_uc_filter()
1221 en_warn(priv, "Stop forcing promiscuous mode on port:%d\n", in mlx4_en_do_uc_filter()
1222 priv->port); in mlx4_en_do_uc_filter()
1228 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, in mlx4_en_do_set_rx_mode() local
1230 struct mlx4_en_dev *mdev = priv->mdev; in mlx4_en_do_set_rx_mode()
1231 struct net_device *dev = priv->dev; in mlx4_en_do_set_rx_mode()
1235 en_dbg(HW, priv, "Card is not up, ignoring rx mode change.\n"); in mlx4_en_do_set_rx_mode()
1238 if (!priv->port_up) { in mlx4_en_do_set_rx_mode()
1239 en_dbg(HW, priv, "Port is down, ignoring rx mode change.\n"); in mlx4_en_do_set_rx_mode()
1244 if (!mlx4_en_QUERY_PORT(mdev, priv->port)) { in mlx4_en_do_set_rx_mode()
1245 if (priv->port_state.link_state) { in mlx4_en_do_set_rx_mode()
1246 priv->last_link_state = MLX4_DEV_EVENT_PORT_UP; in mlx4_en_do_set_rx_mode()
1248 en_dbg(LINK, priv, "Link Up\n"); in mlx4_en_do_set_rx_mode()
1254 mlx4_en_do_uc_filter(priv, dev, mdev); in mlx4_en_do_set_rx_mode()
1258 (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC)) { in mlx4_en_do_set_rx_mode()
1259 mlx4_en_set_promisc_mode(priv, mdev); in mlx4_en_do_set_rx_mode()
1264 if (priv->flags & MLX4_EN_FLAG_PROMISC) in mlx4_en_do_set_rx_mode()
1265 mlx4_en_clear_promisc_mode(priv, mdev); in mlx4_en_do_set_rx_mode()
1267 mlx4_en_do_multicast(priv, dev, mdev); in mlx4_en_do_set_rx_mode()
1275 struct mlx4_en_priv *priv = netdev_priv(dev); in mlx4_en_netpoll() local
1279 for (i = 0; i < priv->rx_ring_num; i++) { in mlx4_en_netpoll()
1280 cq = priv->rx_cq[i]; in mlx4_en_netpoll()
1288 struct mlx4_en_priv *priv = netdev_priv(dev); in mlx4_en_tx_timeout() local
1289 struct mlx4_en_dev *mdev = priv->mdev; in mlx4_en_tx_timeout()
1292 if (netif_msg_timer(priv)) in mlx4_en_tx_timeout()
1293 en_warn(priv, "Tx timeout called on port:%d\n", priv->port); in mlx4_en_tx_timeout()
1295 for (i = 0; i < priv->tx_ring_num; i++) { in mlx4_en_tx_timeout()
1298 en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n", in mlx4_en_tx_timeout()
1299 i, priv->tx_ring[i]->qpn, priv->tx_ring[i]->cqn, in mlx4_en_tx_timeout()
1300 priv->tx_ring[i]->cons, priv->tx_ring[i]->prod); in mlx4_en_tx_timeout()
1303 priv->port_stats.tx_timeout++; in mlx4_en_tx_timeout()
1304 en_dbg(DRV, priv, "Scheduling watchdog\n"); in mlx4_en_tx_timeout()
1305 queue_work(mdev->workqueue, &priv->watchdog_task); in mlx4_en_tx_timeout()
1311 struct mlx4_en_priv *priv = netdev_priv(dev); in mlx4_en_get_stats() local
1313 spin_lock_bh(&priv->stats_lock); in mlx4_en_get_stats()
1314 memcpy(&priv->ret_stats, &priv->stats, sizeof(priv->stats)); in mlx4_en_get_stats()
1315 spin_unlock_bh(&priv->stats_lock); in mlx4_en_get_stats()
1317 return &priv->ret_stats; in mlx4_en_get_stats()
1320 static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv) in mlx4_en_set_default_moderation() argument
1331 priv->rx_frames = MLX4_EN_RX_COAL_TARGET; in mlx4_en_set_default_moderation()
1332 priv->rx_usecs = MLX4_EN_RX_COAL_TIME; in mlx4_en_set_default_moderation()
1333 priv->tx_frames = MLX4_EN_TX_COAL_PKTS; in mlx4_en_set_default_moderation()
1334 priv->tx_usecs = MLX4_EN_TX_COAL_TIME; in mlx4_en_set_default_moderation()
1335 en_dbg(INTR, priv, "Default coalesing params for mtu:%d - rx_frames:%d rx_usecs:%d\n", in mlx4_en_set_default_moderation()
1336 priv->dev->mtu, priv->rx_frames, priv->rx_usecs); in mlx4_en_set_default_moderation()
1339 for (i = 0; i < priv->rx_ring_num; i++) { in mlx4_en_set_default_moderation()
1340 cq = priv->rx_cq[i]; in mlx4_en_set_default_moderation()
1341 cq->moder_cnt = priv->rx_frames; in mlx4_en_set_default_moderation()
1342 cq->moder_time = priv->rx_usecs; in mlx4_en_set_default_moderation()
1343 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF; in mlx4_en_set_default_moderation()
1344 priv->last_moder_packets[i] = 0; in mlx4_en_set_default_moderation()
1345 priv->last_moder_bytes[i] = 0; in mlx4_en_set_default_moderation()
1348 for (i = 0; i < priv->tx_ring_num; i++) { in mlx4_en_set_default_moderation()
1349 cq = priv->tx_cq[i]; in mlx4_en_set_default_moderation()
1350 cq->moder_cnt = priv->tx_frames; in mlx4_en_set_default_moderation()
1351 cq->moder_time = priv->tx_usecs; in mlx4_en_set_default_moderation()
1355 priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW; in mlx4_en_set_default_moderation()
1356 priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW; in mlx4_en_set_default_moderation()
1357 priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH; in mlx4_en_set_default_moderation()
1358 priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH; in mlx4_en_set_default_moderation()
1359 priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL; in mlx4_en_set_default_moderation()
1360 priv->adaptive_rx_coal = 1; in mlx4_en_set_default_moderation()
1361 priv->last_moder_jiffies = 0; in mlx4_en_set_default_moderation()
1362 priv->last_moder_tx_packets = 0; in mlx4_en_set_default_moderation()
1365 static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv) in mlx4_en_auto_moderation() argument
1367 unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies); in mlx4_en_auto_moderation()
1378 if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ) in mlx4_en_auto_moderation()
1381 for (ring = 0; ring < priv->rx_ring_num; ring++) { in mlx4_en_auto_moderation()
1382 spin_lock_bh(&priv->stats_lock); in mlx4_en_auto_moderation()
1383 rx_packets = priv->rx_ring[ring]->packets; in mlx4_en_auto_moderation()
1384 rx_bytes = priv->rx_ring[ring]->bytes; in mlx4_en_auto_moderation()
1385 spin_unlock_bh(&priv->stats_lock); in mlx4_en_auto_moderation()
1388 priv->last_moder_packets[ring])); in mlx4_en_auto_moderation()
1392 priv->last_moder_bytes[ring])) / packets : 0; in mlx4_en_auto_moderation()
1396 if (rate > (MLX4_EN_RX_RATE_THRESH / priv->rx_ring_num) && in mlx4_en_auto_moderation()
1398 if (rate < priv->pkt_rate_low) in mlx4_en_auto_moderation()
1399 moder_time = priv->rx_usecs_low; in mlx4_en_auto_moderation()
1400 else if (rate > priv->pkt_rate_high) in mlx4_en_auto_moderation()
1401 moder_time = priv->rx_usecs_high; in mlx4_en_auto_moderation()
1403 moder_time = (rate - priv->pkt_rate_low) * in mlx4_en_auto_moderation()
1404 (priv->rx_usecs_high - priv->rx_usecs_low) / in mlx4_en_auto_moderation()
1405 (priv->pkt_rate_high - priv->pkt_rate_low) + in mlx4_en_auto_moderation()
1406 priv->rx_usecs_low; in mlx4_en_auto_moderation()
1408 moder_time = priv->rx_usecs_low; in mlx4_en_auto_moderation()
1411 if (moder_time != priv->last_moder_time[ring]) { in mlx4_en_auto_moderation()
1412 priv->last_moder_time[ring] = moder_time; in mlx4_en_auto_moderation()
1413 cq = priv->rx_cq[ring]; in mlx4_en_auto_moderation()
1415 cq->moder_cnt = priv->rx_frames; in mlx4_en_auto_moderation()
1416 err = mlx4_en_set_cq_moder(priv, cq); in mlx4_en_auto_moderation()
1418 en_err(priv, "Failed modifying moderation for cq:%d\n", in mlx4_en_auto_moderation()
1421 priv->last_moder_packets[ring] = rx_packets; in mlx4_en_auto_moderation()
1422 priv->last_moder_bytes[ring] = rx_bytes; in mlx4_en_auto_moderation()
1425 priv->last_moder_jiffies = jiffies; in mlx4_en_auto_moderation()
1431 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv, in mlx4_en_do_get_stats() local
1433 struct mlx4_en_dev *mdev = priv->mdev; in mlx4_en_do_get_stats()
1438 if (priv->port_up) { in mlx4_en_do_get_stats()
1439 err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0); in mlx4_en_do_get_stats()
1441 en_dbg(HW, priv, "Could not update stats\n"); in mlx4_en_do_get_stats()
1443 mlx4_en_auto_moderation(priv); in mlx4_en_do_get_stats()
1446 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); in mlx4_en_do_get_stats()
1448 if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) { in mlx4_en_do_get_stats()
1449 mlx4_en_do_set_mac(priv, priv->current_mac); in mlx4_en_do_get_stats()
1450 mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0; in mlx4_en_do_get_stats()
1461 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv, in mlx4_en_service_task() local
1463 struct mlx4_en_dev *mdev = priv->mdev; in mlx4_en_service_task()
1470 mlx4_en_recover_from_oom(priv); in mlx4_en_service_task()
1471 queue_delayed_work(mdev->workqueue, &priv->service_task, in mlx4_en_service_task()
1479 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, in mlx4_en_linkstate() local
1481 struct mlx4_en_dev *mdev = priv->mdev; in mlx4_en_linkstate()
1482 int linkstate = priv->link_state; in mlx4_en_linkstate()
1487 if (priv->last_link_state != linkstate) { in mlx4_en_linkstate()
1489 en_info(priv, "Link Down\n"); in mlx4_en_linkstate()
1490 netif_carrier_off(priv->dev); in mlx4_en_linkstate()
1492 en_info(priv, "Link Up\n"); in mlx4_en_linkstate()
1493 netif_carrier_on(priv->dev); in mlx4_en_linkstate()
1496 priv->last_link_state = linkstate; in mlx4_en_linkstate()
1500 static int mlx4_en_init_affinity_hint(struct mlx4_en_priv *priv, int ring_idx) in mlx4_en_init_affinity_hint() argument
1502 struct mlx4_en_rx_ring *ring = priv->rx_ring[ring_idx]; in mlx4_en_init_affinity_hint()
1503 int numa_node = priv->mdev->dev->numa_node; in mlx4_en_init_affinity_hint()
1513 static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx) in mlx4_en_free_affinity_hint() argument
1515 free_cpumask_var(priv->rx_ring[ring_idx]->affinity_mask); in mlx4_en_free_affinity_hint()
1520 struct mlx4_en_priv *priv = netdev_priv(dev); in mlx4_en_start_port() local
1521 struct mlx4_en_dev *mdev = priv->mdev; in mlx4_en_start_port()
1531 if (priv->port_up) { in mlx4_en_start_port()
1532 en_dbg(DRV, priv, "start port called while port already up\n"); in mlx4_en_start_port()
1536 INIT_LIST_HEAD(&priv->mc_list); in mlx4_en_start_port()
1537 INIT_LIST_HEAD(&priv->curr_list); in mlx4_en_start_port()
1538 INIT_LIST_HEAD(&priv->ethtool_list); in mlx4_en_start_port()
1539 memset(&priv->ethtool_rules[0], 0, in mlx4_en_start_port()
1543 dev->mtu = min(dev->mtu, priv->max_mtu); in mlx4_en_start_port()
1545 en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size); in mlx4_en_start_port()
1548 err = mlx4_en_activate_rx_rings(priv); in mlx4_en_start_port()
1550 en_err(priv, "Failed to activate RX rings\n"); in mlx4_en_start_port()
1553 for (i = 0; i < priv->rx_ring_num; i++) { in mlx4_en_start_port()
1554 cq = priv->rx_cq[i]; in mlx4_en_start_port()
1558 err = mlx4_en_init_affinity_hint(priv, i); in mlx4_en_start_port()
1560 en_err(priv, "Failed preparing IRQ affinity hint\n"); in mlx4_en_start_port()
1564 err = mlx4_en_activate_cq(priv, cq, i); in mlx4_en_start_port()
1566 en_err(priv, "Failed activating Rx CQ\n"); in mlx4_en_start_port()
1567 mlx4_en_free_affinity_hint(priv, i); in mlx4_en_start_port()
1574 cqe = mlx4_en_get_cqe(cq->buf, j, priv->cqe_size) + in mlx4_en_start_port()
1575 priv->cqe_factor; in mlx4_en_start_port()
1579 err = mlx4_en_set_cq_moder(priv, cq); in mlx4_en_start_port()
1581 en_err(priv, "Failed setting cq moderation parameters\n"); in mlx4_en_start_port()
1582 mlx4_en_deactivate_cq(priv, cq); in mlx4_en_start_port()
1583 mlx4_en_free_affinity_hint(priv, i); in mlx4_en_start_port()
1586 mlx4_en_arm_cq(priv, cq); in mlx4_en_start_port()
1587 priv->rx_ring[i]->cqn = cq->mcq.cqn; in mlx4_en_start_port()
1592 en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port); in mlx4_en_start_port()
1593 err = mlx4_en_get_qp(priv); in mlx4_en_start_port()
1595 en_err(priv, "Failed getting eth qp\n"); in mlx4_en_start_port()
1598 mdev->mac_removed[priv->port] = 0; in mlx4_en_start_port()
1600 err = mlx4_en_config_rss_steer(priv); in mlx4_en_start_port()
1602 en_err(priv, "Failed configuring rss steering\n"); in mlx4_en_start_port()
1606 err = mlx4_en_create_drop_qp(priv); in mlx4_en_start_port()
1611 for (i = 0; i < priv->tx_ring_num; i++) { in mlx4_en_start_port()
1613 cq = priv->tx_cq[i]; in mlx4_en_start_port()
1614 err = mlx4_en_activate_cq(priv, cq, i); in mlx4_en_start_port()
1616 en_err(priv, "Failed allocating Tx CQ\n"); in mlx4_en_start_port()
1619 err = mlx4_en_set_cq_moder(priv, cq); in mlx4_en_start_port()
1621 en_err(priv, "Failed setting cq moderation parameters\n"); in mlx4_en_start_port()
1622 mlx4_en_deactivate_cq(priv, cq); in mlx4_en_start_port()
1625 en_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i); in mlx4_en_start_port()
1629 tx_ring = priv->tx_ring[i]; in mlx4_en_start_port()
1630 err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn, in mlx4_en_start_port()
1631 i / priv->num_tx_rings_p_up); in mlx4_en_start_port()
1633 en_err(priv, "Failed allocating Tx ring\n"); in mlx4_en_start_port()
1634 mlx4_en_deactivate_cq(priv, cq); in mlx4_en_start_port()
1640 mlx4_en_arm_cq(priv, cq); in mlx4_en_start_port()
1649 err = mlx4_SET_PORT_general(mdev->dev, priv->port, in mlx4_en_start_port()
1650 priv->rx_skb_size + ETH_FCS_LEN, in mlx4_en_start_port()
1651 priv->prof->tx_pause, in mlx4_en_start_port()
1652 priv->prof->tx_ppp, in mlx4_en_start_port()
1653 priv->prof->rx_pause, in mlx4_en_start_port()
1654 priv->prof->rx_ppp); in mlx4_en_start_port()
1656 en_err(priv, "Failed setting port general configurations for port %d, with error %d\n", in mlx4_en_start_port()
1657 priv->port, err); in mlx4_en_start_port()
1661 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0); in mlx4_en_start_port()
1663 en_err(priv, "Failed setting default qp numbers\n"); in mlx4_en_start_port()
1668 err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1); in mlx4_en_start_port()
1670 en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n", in mlx4_en_start_port()
1677 en_dbg(HW, priv, "Initializing port\n"); in mlx4_en_start_port()
1678 err = mlx4_INIT_PORT(mdev->dev, priv->port); in mlx4_en_start_port()
1680 en_err(priv, "Failed Initializing port\n"); in mlx4_en_start_port()
1686 mc_list[5] = priv->port; /* needed for B0 steering support */ in mlx4_en_start_port()
1687 if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list, in mlx4_en_start_port()
1688 priv->port, 0, MLX4_PROT_ETH, in mlx4_en_start_port()
1689 &priv->broadcast_id)) in mlx4_en_start_port()
1693 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC); in mlx4_en_start_port()
1696 queue_work(mdev->workqueue, &priv->rx_mode_task); in mlx4_en_start_port()
1699 if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) in mlx4_en_start_port()
1702 priv->port_up = true; in mlx4_en_start_port()
1710 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[tx_index]); in mlx4_en_start_port()
1711 mlx4_en_deactivate_cq(priv, priv->tx_cq[tx_index]); in mlx4_en_start_port()
1713 mlx4_en_destroy_drop_qp(priv); in mlx4_en_start_port()
1715 mlx4_en_release_rss_steer(priv); in mlx4_en_start_port()
1717 mlx4_en_put_qp(priv); in mlx4_en_start_port()
1720 mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]); in mlx4_en_start_port()
1721 mlx4_en_free_affinity_hint(priv, rx_index); in mlx4_en_start_port()
1723 for (i = 0; i < priv->rx_ring_num; i++) in mlx4_en_start_port()
1724 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]); in mlx4_en_start_port()
1732 struct mlx4_en_priv *priv = netdev_priv(dev); in mlx4_en_stop_port() local
1733 struct mlx4_en_dev *mdev = priv->mdev; in mlx4_en_stop_port()
1739 if (!priv->port_up) { in mlx4_en_stop_port()
1740 en_dbg(DRV, priv, "stop port called while port already down\n"); in mlx4_en_stop_port()
1745 mlx4_CLOSE_PORT(mdev->dev, priv->port); in mlx4_en_stop_port()
1757 priv->port_up = false; in mlx4_en_stop_port()
1762 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | in mlx4_en_stop_port()
1765 priv->port, in mlx4_en_stop_port()
1768 priv->port, in mlx4_en_stop_port()
1770 } else if (priv->flags & MLX4_EN_FLAG_PROMISC) { in mlx4_en_stop_port()
1771 priv->flags &= ~MLX4_EN_FLAG_PROMISC; in mlx4_en_stop_port()
1774 mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn, in mlx4_en_stop_port()
1775 priv->port); in mlx4_en_stop_port()
1778 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { in mlx4_en_stop_port()
1779 mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn, in mlx4_en_stop_port()
1780 priv->port); in mlx4_en_stop_port()
1781 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; in mlx4_en_stop_port()
1787 mc_list[5] = priv->port; /* needed for B0 steering support */ in mlx4_en_stop_port()
1788 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list, in mlx4_en_stop_port()
1789 MLX4_PROT_ETH, priv->broadcast_id); in mlx4_en_stop_port()
1790 list_for_each_entry(mclist, &priv->curr_list, list) { in mlx4_en_stop_port()
1792 mc_list[5] = priv->port; in mlx4_en_stop_port()
1793 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, in mlx4_en_stop_port()
1799 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) { in mlx4_en_stop_port()
1805 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG); in mlx4_en_stop_port()
1812 &priv->ethtool_list, list) { in mlx4_en_stop_port()
1818 mlx4_en_destroy_drop_qp(priv); in mlx4_en_stop_port()
1821 for (i = 0; i < priv->tx_ring_num; i++) { in mlx4_en_stop_port()
1822 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[i]); in mlx4_en_stop_port()
1823 mlx4_en_deactivate_cq(priv, priv->tx_cq[i]); in mlx4_en_stop_port()
1827 for (i = 0; i < priv->tx_ring_num; i++) in mlx4_en_stop_port()
1828 mlx4_en_free_tx_buf(dev, priv->tx_ring[i]); in mlx4_en_stop_port()
1831 mlx4_en_release_rss_steer(priv); in mlx4_en_stop_port()
1834 mlx4_en_put_qp(priv); in mlx4_en_stop_port()
1836 mdev->mac_removed[priv->port] = 1; in mlx4_en_stop_port()
1839 for (i = 0; i < priv->rx_ring_num; i++) { in mlx4_en_stop_port()
1840 struct mlx4_en_cq *cq = priv->rx_cq[i]; in mlx4_en_stop_port()
1850 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]); in mlx4_en_stop_port()
1851 mlx4_en_deactivate_cq(priv, cq); in mlx4_en_stop_port()
1853 mlx4_en_free_affinity_hint(priv, i); in mlx4_en_stop_port()
1859 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, in mlx4_en_restart() local
1861 struct mlx4_en_dev *mdev = priv->mdev; in mlx4_en_restart()
1862 struct net_device *dev = priv->dev; in mlx4_en_restart()
1864 en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port); in mlx4_en_restart()
1867 if (priv->port_up) { in mlx4_en_restart()
1870 en_err(priv, "Failed restarting port %d\n", priv->port); in mlx4_en_restart()
1877 struct mlx4_en_priv *priv = netdev_priv(dev); in mlx4_en_clear_stats() local
1878 struct mlx4_en_dev *mdev = priv->mdev; in mlx4_en_clear_stats()
1881 if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1)) in mlx4_en_clear_stats()
1882 en_dbg(HW, priv, "Failed dumping statistics\n"); in mlx4_en_clear_stats()
1884 memset(&priv->stats, 0, sizeof(priv->stats)); in mlx4_en_clear_stats()
1885 memset(&priv->pstats, 0, sizeof(priv->pstats)); in mlx4_en_clear_stats()
1886 memset(&priv->pkstats, 0, sizeof(priv->pkstats)); in mlx4_en_clear_stats()
1887 memset(&priv->port_stats, 0, sizeof(priv->port_stats)); in mlx4_en_clear_stats()
1888 memset(&priv->rx_flowstats, 0, sizeof(priv->rx_flowstats)); in mlx4_en_clear_stats()
1889 memset(&priv->tx_flowstats, 0, sizeof(priv->tx_flowstats)); in mlx4_en_clear_stats()
1890 memset(&priv->rx_priority_flowstats, 0, in mlx4_en_clear_stats()
1891 sizeof(priv->rx_priority_flowstats)); in mlx4_en_clear_stats()
1892 memset(&priv->tx_priority_flowstats, 0, in mlx4_en_clear_stats()
1893 sizeof(priv->tx_priority_flowstats)); in mlx4_en_clear_stats()
1895 for (i = 0; i < priv->tx_ring_num; i++) { in mlx4_en_clear_stats()
1896 priv->tx_ring[i]->bytes = 0; in mlx4_en_clear_stats()
1897 priv->tx_ring[i]->packets = 0; in mlx4_en_clear_stats()
1898 priv->tx_ring[i]->tx_csum = 0; in mlx4_en_clear_stats()
1900 for (i = 0; i < priv->rx_ring_num; i++) { in mlx4_en_clear_stats()
1901 priv->rx_ring[i]->bytes = 0; in mlx4_en_clear_stats()
1902 priv->rx_ring[i]->packets = 0; in mlx4_en_clear_stats()
1903 priv->rx_ring[i]->csum_ok = 0; in mlx4_en_clear_stats()
1904 priv->rx_ring[i]->csum_none = 0; in mlx4_en_clear_stats()
1905 priv->rx_ring[i]->csum_complete = 0; in mlx4_en_clear_stats()
1911 struct mlx4_en_priv *priv = netdev_priv(dev); in mlx4_en_open() local
1912 struct mlx4_en_dev *mdev = priv->mdev; in mlx4_en_open()
1918 en_err(priv, "Cannot open - device down/disabled\n"); in mlx4_en_open()
1928 en_err(priv, "Failed starting port:%d\n", priv->port); in mlx4_en_open()
1938 struct mlx4_en_priv *priv = netdev_priv(dev); in mlx4_en_close() local
1939 struct mlx4_en_dev *mdev = priv->mdev; in mlx4_en_close()
1941 en_dbg(IFDOWN, priv, "Close port called\n"); in mlx4_en_close()
1952 void mlx4_en_free_resources(struct mlx4_en_priv *priv) in mlx4_en_free_resources() argument
1957 free_irq_cpu_rmap(priv->dev->rx_cpu_rmap); in mlx4_en_free_resources()
1958 priv->dev->rx_cpu_rmap = NULL; in mlx4_en_free_resources()
1961 for (i = 0; i < priv->tx_ring_num; i++) { in mlx4_en_free_resources()
1962 if (priv->tx_ring && priv->tx_ring[i]) in mlx4_en_free_resources()
1963 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]); in mlx4_en_free_resources()
1964 if (priv->tx_cq && priv->tx_cq[i]) in mlx4_en_free_resources()
1965 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]); in mlx4_en_free_resources()
1968 for (i = 0; i < priv->rx_ring_num; i++) { in mlx4_en_free_resources()
1969 if (priv->rx_ring[i]) in mlx4_en_free_resources()
1970 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i], in mlx4_en_free_resources()
1971 priv->prof->rx_ring_size, priv->stride); in mlx4_en_free_resources()
1972 if (priv->rx_cq[i]) in mlx4_en_free_resources()
1973 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); in mlx4_en_free_resources()
1978 int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) in mlx4_en_alloc_resources() argument
1980 struct mlx4_en_port_profile *prof = priv->prof; in mlx4_en_alloc_resources()
1985 for (i = 0; i < priv->tx_ring_num; i++) { in mlx4_en_alloc_resources()
1987 if (mlx4_en_create_cq(priv, &priv->tx_cq[i], in mlx4_en_alloc_resources()
1991 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], in mlx4_en_alloc_resources()
1998 for (i = 0; i < priv->rx_ring_num; i++) { in mlx4_en_alloc_resources()
2000 if (mlx4_en_create_cq(priv, &priv->rx_cq[i], in mlx4_en_alloc_resources()
2004 if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i], in mlx4_en_alloc_resources()
2005 prof->rx_ring_size, priv->stride, in mlx4_en_alloc_resources()
2011 if (priv->mdev->dev->caps.comp_pool) { in mlx4_en_alloc_resources()
2012 priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->mdev->dev->caps.comp_pool); in mlx4_en_alloc_resources()
2013 if (!priv->dev->rx_cpu_rmap) in mlx4_en_alloc_resources()
2021 en_err(priv, "Failed to allocate NIC resources\n"); in mlx4_en_alloc_resources()
2022 for (i = 0; i < priv->rx_ring_num; i++) { in mlx4_en_alloc_resources()
2023 if (priv->rx_ring[i]) in mlx4_en_alloc_resources()
2024 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i], in mlx4_en_alloc_resources()
2026 priv->stride); in mlx4_en_alloc_resources()
2027 if (priv->rx_cq[i]) in mlx4_en_alloc_resources()
2028 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); in mlx4_en_alloc_resources()
2030 for (i = 0; i < priv->tx_ring_num; i++) { in mlx4_en_alloc_resources()
2031 if (priv->tx_ring[i]) in mlx4_en_alloc_resources()
2032 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]); in mlx4_en_alloc_resources()
2033 if (priv->tx_cq[i]) in mlx4_en_alloc_resources()
2034 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]); in mlx4_en_alloc_resources()
2042 struct mlx4_en_priv *priv = netdev_priv(dev); in mlx4_en_destroy_netdev() local
2043 struct mlx4_en_dev *mdev = priv->mdev; in mlx4_en_destroy_netdev()
2045 en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port); in mlx4_en_destroy_netdev()
2048 if (priv->registered) in mlx4_en_destroy_netdev()
2051 if (priv->allocated) in mlx4_en_destroy_netdev()
2052 mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE); in mlx4_en_destroy_netdev()
2054 cancel_delayed_work(&priv->stats_task); in mlx4_en_destroy_netdev()
2055 cancel_delayed_work(&priv->service_task); in mlx4_en_destroy_netdev()
2061 mdev->pndev[priv->port] = NULL; in mlx4_en_destroy_netdev()
2062 mdev->upper[priv->port] = NULL; in mlx4_en_destroy_netdev()
2065 mlx4_en_free_resources(priv); in mlx4_en_destroy_netdev()
2067 kfree(priv->tx_ring); in mlx4_en_destroy_netdev()
2068 kfree(priv->tx_cq); in mlx4_en_destroy_netdev()
2075 struct mlx4_en_priv *priv = netdev_priv(dev); in mlx4_en_change_mtu() local
2076 struct mlx4_en_dev *mdev = priv->mdev; in mlx4_en_change_mtu()
2079 en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n", in mlx4_en_change_mtu()
2082 if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) { in mlx4_en_change_mtu()
2083 en_err(priv, "Bad MTU size:%d.\n", new_mtu); in mlx4_en_change_mtu()
2093 en_dbg(DRV, priv, "Change MTU called with card down!?\n"); in mlx4_en_change_mtu()
2098 en_err(priv, "Failed restarting port:%d\n", in mlx4_en_change_mtu()
2099 priv->port); in mlx4_en_change_mtu()
2100 queue_work(mdev->workqueue, &priv->watchdog_task); in mlx4_en_change_mtu()
2110 struct mlx4_en_priv *priv = netdev_priv(dev); in mlx4_en_hwtstamp_set() local
2111 struct mlx4_en_dev *mdev = priv->mdev; in mlx4_en_hwtstamp_set()
2169 struct mlx4_en_priv *priv = netdev_priv(dev); in mlx4_en_hwtstamp_get() local
2171 return copy_to_user(ifr->ifr_data, &priv->hwtstamp_config, in mlx4_en_hwtstamp_get()
2172 sizeof(priv->hwtstamp_config)) ? -EFAULT : 0; in mlx4_en_hwtstamp_get()
2190 struct mlx4_en_priv *priv = netdev_priv(netdev); in mlx4_en_set_features() local
2195 en_info(priv, "Turn %s RX-FCS\n", in mlx4_en_set_features()
2203 en_info(priv, "Turn %s RX-ALL\n", in mlx4_en_set_features()
2205 ret = mlx4_SET_PORT_fcs_check(priv->mdev->dev, in mlx4_en_set_features()
2206 priv->port, ignore_fcs_value); in mlx4_en_set_features()
2212 en_info(priv, "Turn %s RX vlan strip offload\n", in mlx4_en_set_features()
2218 en_info(priv, "Turn %s TX vlan strip offload\n", in mlx4_en_set_features()
2222 en_info(priv, "Turn %s loopback\n", in mlx4_en_set_features()
2228 ret = mlx4_en_reset_config(netdev, priv->hwtstamp_config, in mlx4_en_set_features()
2295 struct mlx4_en_priv *priv = netdev_priv(dev); in mlx4_en_get_phys_port_id() local
2296 struct mlx4_dev *mdev = priv->mdev->dev; in mlx4_en_get_phys_port_id()
2298 u64 phys_port_id = mdev->caps.phys_port_id[priv->port]; in mlx4_en_get_phys_port_id()
2315 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, in mlx4_en_add_vxlan_offloads() local
2318 ret = mlx4_config_vxlan_port(priv->mdev->dev, priv->vxlan_port); in mlx4_en_add_vxlan_offloads()
2322 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port, in mlx4_en_add_vxlan_offloads()
2326 en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret); in mlx4_en_add_vxlan_offloads()
2331 priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM | in mlx4_en_add_vxlan_offloads()
2338 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, in mlx4_en_del_vxlan_offloads() local
2341 priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_RXCSUM | in mlx4_en_del_vxlan_offloads()
2344 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port, in mlx4_en_del_vxlan_offloads()
2347 en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret); in mlx4_en_del_vxlan_offloads()
2349 priv->vxlan_port = 0; in mlx4_en_del_vxlan_offloads()
2355 struct mlx4_en_priv *priv = netdev_priv(dev); in mlx4_en_add_vxlan_port() local
2358 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) in mlx4_en_add_vxlan_port()
2364 current_port = priv->vxlan_port; in mlx4_en_add_vxlan_port()
2366 en_warn(priv, "vxlan port %d configured, can't add port %d\n", in mlx4_en_add_vxlan_port()
2371 priv->vxlan_port = port; in mlx4_en_add_vxlan_port()
2372 queue_work(priv->mdev->workqueue, &priv->vxlan_add_task); in mlx4_en_add_vxlan_port()
2378 struct mlx4_en_priv *priv = netdev_priv(dev); in mlx4_en_del_vxlan_port() local
2381 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) in mlx4_en_del_vxlan_port()
2387 current_port = priv->vxlan_port; in mlx4_en_del_vxlan_port()
2389 en_dbg(DRV, priv, "vxlan port %d isn't configured, ignoring\n", ntohs(port)); in mlx4_en_del_vxlan_port()
2393 queue_work(priv->mdev->workqueue, &priv->vxlan_del_task); in mlx4_en_del_vxlan_port()
2407 struct mlx4_en_priv *priv = netdev_priv(dev); in mlx4_en_set_tx_maxrate() local
2408 struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[queue_index]; in mlx4_en_set_tx_maxrate()
2412 if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT)) in mlx4_en_set_tx_maxrate()
2427 err = mlx4_update_qp(priv->mdev->dev, tx_ring->qpn, MLX4_UPDATE_QP_RATE_LIMIT, in mlx4_en_set_tx_maxrate()
2504 struct mlx4_en_priv *priv; member
2515 struct mlx4_dev *dev = bond->priv->mdev->dev; in mlx4_en_bond_work()
2521 en_err(bond->priv, "Fail to bond device\n"); in mlx4_en_bond_work()
2526 en_err(bond->priv, "Fail to set port map [%d][%d]: %d\n", in mlx4_en_bond_work()
2534 en_err(bond->priv, "Fail to unbond device\n"); in mlx4_en_bond_work()
2536 dev_put(bond->priv->dev); in mlx4_en_bond_work()
2540 static int mlx4_en_queue_bond_work(struct mlx4_en_priv *priv, int is_bonded, in mlx4_en_queue_bond_work() argument
2550 bond->priv = priv; in mlx4_en_queue_bond_work()
2554 dev_hold(priv->dev); in mlx4_en_queue_bond_work()
2555 queue_work(priv->mdev->workqueue, &bond->work); in mlx4_en_queue_bond_work()
2568 struct mlx4_en_priv *priv; in mlx4_en_netdev_event() local
2603 priv = netdev_priv(ndev); in mlx4_en_netdev_event()
2670 mlx4_en_queue_bond_work(priv, do_bond, in mlx4_en_netdev_event()
2756 struct mlx4_en_priv *priv; in mlx4_en_init_netdev() local
2776 priv = netdev_priv(dev); in mlx4_en_init_netdev()
2777 memset(priv, 0, sizeof(struct mlx4_en_priv)); in mlx4_en_init_netdev()
2778 spin_lock_init(&priv->stats_lock); in mlx4_en_init_netdev()
2779 INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode); in mlx4_en_init_netdev()
2780 INIT_WORK(&priv->watchdog_task, mlx4_en_restart); in mlx4_en_init_netdev()
2781 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); in mlx4_en_init_netdev()
2782 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); in mlx4_en_init_netdev()
2783 INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task); in mlx4_en_init_netdev()
2785 INIT_WORK(&priv->vxlan_add_task, mlx4_en_add_vxlan_offloads); in mlx4_en_init_netdev()
2786 INIT_WORK(&priv->vxlan_del_task, mlx4_en_del_vxlan_offloads); in mlx4_en_init_netdev()
2789 INIT_LIST_HEAD(&priv->filters); in mlx4_en_init_netdev()
2790 spin_lock_init(&priv->filters_lock); in mlx4_en_init_netdev()
2793 priv->dev = dev; in mlx4_en_init_netdev()
2794 priv->mdev = mdev; in mlx4_en_init_netdev()
2795 priv->ddev = &mdev->pdev->dev; in mlx4_en_init_netdev()
2796 priv->prof = prof; in mlx4_en_init_netdev()
2797 priv->port = port; in mlx4_en_init_netdev()
2798 priv->port_up = false; in mlx4_en_init_netdev()
2799 priv->flags = prof->flags; in mlx4_en_init_netdev()
2800 priv->pflags = MLX4_EN_PRIV_FLAGS_BLUEFLAME; in mlx4_en_init_netdev()
2801 priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE | in mlx4_en_init_netdev()
2803 priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up; in mlx4_en_init_netdev()
2804 priv->tx_ring_num = prof->tx_ring_num; in mlx4_en_init_netdev()
2805 priv->tx_work_limit = MLX4_EN_DEFAULT_TX_WORK; in mlx4_en_init_netdev()
2806 netdev_rss_key_fill(priv->rss_key, sizeof(priv->rss_key)); in mlx4_en_init_netdev()
2808 priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS, in mlx4_en_init_netdev()
2810 if (!priv->tx_ring) { in mlx4_en_init_netdev()
2814 priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq *) * MAX_TX_RINGS, in mlx4_en_init_netdev()
2816 if (!priv->tx_cq) { in mlx4_en_init_netdev()
2820 priv->rx_ring_num = prof->rx_ring_num; in mlx4_en_init_netdev()
2821 priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0; in mlx4_en_init_netdev()
2822 priv->cqe_size = mdev->dev->caps.cqe_size; in mlx4_en_init_netdev()
2823 priv->mac_index = -1; in mlx4_en_init_netdev()
2824 priv->msg_enable = MLX4_EN_MSG_LEVEL; in mlx4_en_init_netdev()
2826 if (!mlx4_is_slave(priv->mdev->dev)) { in mlx4_en_init_netdev()
2830 en_info(priv, "enabling only PFC DCB ops\n"); in mlx4_en_init_netdev()
2837 INIT_HLIST_HEAD(&priv->mac_hash[i]); in mlx4_en_init_netdev()
2840 priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port]; in mlx4_en_init_netdev()
2842 if (mdev->dev->caps.rx_checksum_flags_port[priv->port] & in mlx4_en_init_netdev()
2844 priv->flags |= MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP; in mlx4_en_init_netdev()
2848 mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]); in mlx4_en_init_netdev()
2850 if (mlx4_is_slave(priv->mdev->dev)) { in mlx4_en_init_netdev()
2852 en_warn(priv, "Assigned random MAC address %pM\n", dev->dev_addr); in mlx4_en_init_netdev()
2854 mdev->dev->caps.def_mac[priv->port] = mac_u64; in mlx4_en_init_netdev()
2856 en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n", in mlx4_en_init_netdev()
2857 priv->port, dev->dev_addr); in mlx4_en_init_netdev()
2863 memcpy(priv->current_mac, dev->dev_addr, sizeof(priv->current_mac)); in mlx4_en_init_netdev()
2865 priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) + in mlx4_en_init_netdev()
2867 err = mlx4_en_alloc_resources(priv); in mlx4_en_init_netdev()
2872 priv->hwtstamp_config.flags = 0; in mlx4_en_init_netdev()
2873 priv->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF; in mlx4_en_init_netdev()
2874 priv->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; in mlx4_en_init_netdev()
2877 err = mlx4_alloc_hwq_res(mdev->dev, &priv->res, in mlx4_en_init_netdev()
2880 en_err(priv, "Failed to allocate page for rx qps\n"); in mlx4_en_init_netdev()
2883 priv->allocated = 1; in mlx4_en_init_netdev()
2888 if (mlx4_is_master(priv->mdev->dev)) in mlx4_en_init_netdev()
2893 netif_set_real_num_tx_queues(dev, priv->tx_ring_num); in mlx4_en_init_netdev()
2894 netif_set_real_num_rx_queues(dev, priv->rx_ring_num); in mlx4_en_init_netdev()
2930 priv->rss_hash_fn = ETH_RSS_HASH_TOP; in mlx4_en_init_netdev()
2932 priv->rss_hash_fn = ETH_RSS_HASH_XOR; in mlx4_en_init_netdev()
2934 en_warn(priv, in mlx4_en_init_netdev()
2936 priv->rss_hash_fn = ETH_RSS_HASH_TOP; in mlx4_en_init_netdev()
2948 mlx4_en_set_default_moderation(priv); in mlx4_en_init_netdev()
2950 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num); in mlx4_en_init_netdev()
2951 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); in mlx4_en_init_netdev()
2953 mlx4_en_update_loopback_state(priv->dev, priv->dev->features); in mlx4_en_init_netdev()
2957 err = mlx4_SET_PORT_general(mdev->dev, priv->port, in mlx4_en_init_netdev()
2958 priv->rx_skb_size + ETH_FCS_LEN, in mlx4_en_init_netdev()
2962 en_err(priv, "Failed setting port general configurations for port %d, with error %d\n", in mlx4_en_init_netdev()
2963 priv->port, err); in mlx4_en_init_netdev()
2968 err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1); in mlx4_en_init_netdev()
2970 en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n", in mlx4_en_init_netdev()
2977 en_warn(priv, "Initializing port\n"); in mlx4_en_init_netdev()
2978 err = mlx4_INIT_PORT(mdev->dev, priv->port); in mlx4_en_init_netdev()
2980 en_err(priv, "Failed Initializing port\n"); in mlx4_en_init_netdev()
2983 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); in mlx4_en_init_netdev()
2986 queue_delayed_work(mdev->workqueue, &priv->service_task, in mlx4_en_init_netdev()
2989 mlx4_en_set_stats_bitmap(mdev->dev, &priv->stats_bitmap, in mlx4_en_init_netdev()
2990 mdev->profile.prof[priv->port].rx_ppp, in mlx4_en_init_netdev()
2991 mdev->profile.prof[priv->port].rx_pause, in mlx4_en_init_netdev()
2992 mdev->profile.prof[priv->port].tx_ppp, in mlx4_en_init_netdev()
2993 mdev->profile.prof[priv->port].tx_pause); in mlx4_en_init_netdev()
2997 en_err(priv, "Netdev registration failed for port %d\n", port); in mlx4_en_init_netdev()
3001 priv->registered = 1; in mlx4_en_init_netdev()
3014 struct mlx4_en_priv *priv = netdev_priv(dev); in mlx4_en_reset_config() local
3015 struct mlx4_en_dev *mdev = priv->mdev; in mlx4_en_reset_config()
3019 if (priv->hwtstamp_config.tx_type == ts_config.tx_type && in mlx4_en_reset_config()
3020 priv->hwtstamp_config.rx_filter == ts_config.rx_filter && in mlx4_en_reset_config()
3027 (priv->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE)) { in mlx4_en_reset_config()
3028 en_warn(priv, "Can't turn ON rx vlan offload while time-stamping rx filter is ON\n"); in mlx4_en_reset_config()
3033 if (priv->port_up) { in mlx4_en_reset_config()
3038 mlx4_en_free_resources(priv); in mlx4_en_reset_config()
3040 en_warn(priv, "Changing device configuration rx filter(%x) rx vlan(%x)\n", in mlx4_en_reset_config()
3043 priv->hwtstamp_config.tx_type = ts_config.tx_type; in mlx4_en_reset_config()
3044 priv->hwtstamp_config.rx_filter = ts_config.rx_filter; in mlx4_en_reset_config()
3074 en_warn(priv, "Turning off RX vlan offload since RX time-stamping is ON\n"); in mlx4_en_reset_config()
3078 err = mlx4_en_alloc_resources(priv); in mlx4_en_reset_config()
3080 en_err(priv, "Failed reallocating port resources\n"); in mlx4_en_reset_config()
3086 en_err(priv, "Failed starting port\n"); in mlx4_en_reset_config()