Lines Matching refs:priv
86 static void sxgbe_enable_eee_mode(const struct sxgbe_priv_data *priv) in sxgbe_enable_eee_mode() argument
89 if (!priv->tx_path_in_lpi_mode) in sxgbe_enable_eee_mode()
90 priv->hw->mac->set_eee_mode(priv->ioaddr); in sxgbe_enable_eee_mode()
93 void sxgbe_disable_eee_mode(struct sxgbe_priv_data * const priv) in sxgbe_disable_eee_mode() argument
96 priv->hw->mac->reset_eee_mode(priv->ioaddr); in sxgbe_disable_eee_mode()
97 del_timer_sync(&priv->eee_ctrl_timer); in sxgbe_disable_eee_mode()
98 priv->tx_path_in_lpi_mode = false; in sxgbe_disable_eee_mode()
110 struct sxgbe_priv_data *priv = (struct sxgbe_priv_data *)arg; in sxgbe_eee_ctrl_timer() local
112 sxgbe_enable_eee_mode(priv); in sxgbe_eee_ctrl_timer()
113 mod_timer(&priv->eee_ctrl_timer, SXGBE_LPI_TIMER(eee_timer)); in sxgbe_eee_ctrl_timer()
125 bool sxgbe_eee_init(struct sxgbe_priv_data * const priv) in sxgbe_eee_init() argument
130 if (priv->hw_cap.eee) { in sxgbe_eee_init()
132 if (phy_init_eee(priv->phydev, 1)) in sxgbe_eee_init()
135 priv->eee_active = 1; in sxgbe_eee_init()
136 setup_timer(&priv->eee_ctrl_timer, sxgbe_eee_ctrl_timer, in sxgbe_eee_init()
137 (unsigned long)priv); in sxgbe_eee_init()
138 priv->eee_ctrl_timer.expires = SXGBE_LPI_TIMER(eee_timer); in sxgbe_eee_init()
139 add_timer(&priv->eee_ctrl_timer); in sxgbe_eee_init()
141 priv->hw->mac->set_eee_timer(priv->ioaddr, in sxgbe_eee_init()
143 priv->tx_lpi_timer); in sxgbe_eee_init()
153 static void sxgbe_eee_adjust(const struct sxgbe_priv_data *priv) in sxgbe_eee_adjust() argument
159 if (priv->eee_enabled) in sxgbe_eee_adjust()
160 priv->hw->mac->set_eee_pls(priv->ioaddr, priv->phydev->link); in sxgbe_eee_adjust()
169 static void sxgbe_clk_csr_set(struct sxgbe_priv_data *priv) in sxgbe_clk_csr_set() argument
171 u32 clk_rate = clk_get_rate(priv->sxgbe_clk); in sxgbe_clk_csr_set()
177 priv->clk_csr = SXGBE_CSR_100_150M; in sxgbe_clk_csr_set()
179 priv->clk_csr = SXGBE_CSR_150_250M; in sxgbe_clk_csr_set()
181 priv->clk_csr = SXGBE_CSR_250_300M; in sxgbe_clk_csr_set()
183 priv->clk_csr = SXGBE_CSR_300_350M; in sxgbe_clk_csr_set()
185 priv->clk_csr = SXGBE_CSR_350_400M; in sxgbe_clk_csr_set()
187 priv->clk_csr = SXGBE_CSR_400_500M; in sxgbe_clk_csr_set()
205 struct sxgbe_priv_data *priv = netdev_priv(dev); in sxgbe_adjust_link() local
206 struct phy_device *phydev = priv->phydev; in sxgbe_adjust_link()
218 if (phydev->speed != priv->speed) { in sxgbe_adjust_link()
231 netif_err(priv, link, dev, in sxgbe_adjust_link()
236 priv->speed = phydev->speed; in sxgbe_adjust_link()
237 priv->hw->mac->set_speed(priv->ioaddr, speed); in sxgbe_adjust_link()
240 if (!priv->oldlink) { in sxgbe_adjust_link()
242 priv->oldlink = 1; in sxgbe_adjust_link()
244 } else if (priv->oldlink) { in sxgbe_adjust_link()
246 priv->oldlink = 0; in sxgbe_adjust_link()
247 priv->speed = SPEED_UNKNOWN; in sxgbe_adjust_link()
250 if (new_state & netif_msg_link(priv)) in sxgbe_adjust_link()
254 sxgbe_eee_adjust(priv); in sxgbe_adjust_link()
270 struct sxgbe_priv_data *priv = netdev_priv(ndev); in sxgbe_init_phy() local
271 int phy_iface = priv->plat->interface; in sxgbe_init_phy()
274 priv->oldlink = 0; in sxgbe_init_phy()
275 priv->speed = SPEED_UNKNOWN; in sxgbe_init_phy()
276 priv->oldduplex = DUPLEX_UNKNOWN; in sxgbe_init_phy()
278 if (priv->plat->phy_bus_name) in sxgbe_init_phy()
280 priv->plat->phy_bus_name, priv->plat->bus_id); in sxgbe_init_phy()
283 priv->plat->bus_id); in sxgbe_init_phy()
286 priv->plat->phy_addr); in sxgbe_init_phy()
310 priv->phydev = phydev; in sxgbe_init_phy()
321 static void sxgbe_clear_descriptors(struct sxgbe_priv_data *priv) in sxgbe_clear_descriptors() argument
324 unsigned int txsize = priv->dma_tx_size; in sxgbe_clear_descriptors()
325 unsigned int rxsize = priv->dma_rx_size; in sxgbe_clear_descriptors()
330 priv->hw->desc->init_rx_desc(&priv->rxq[j]->dma_rx[i], in sxgbe_clear_descriptors()
331 priv->use_riwt, priv->mode, in sxgbe_clear_descriptors()
337 priv->hw->desc->init_tx_desc(&priv->txq[j]->dma_tx[i]); in sxgbe_clear_descriptors()
346 struct sxgbe_priv_data *priv = netdev_priv(dev); in sxgbe_init_rx_buffers() local
354 rx_ring->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data, in sxgbe_init_rx_buffers()
357 if (dma_mapping_error(priv->device, rx_ring->rx_skbuff_dma[i])) { in sxgbe_init_rx_buffers()
380 struct sxgbe_priv_data *priv = netdev_priv(dev); in sxgbe_free_rx_buffers() local
383 dma_unmap_single(priv->device, rx_ring->rx_skbuff_dma[i], in sxgbe_free_rx_buffers()
466 struct sxgbe_priv_data *priv = netdev_priv(dev); in init_rx_ring() local
474 netif_dbg(priv, probe, dev, "%s: bfsize %d\n", __func__, bfsize); in init_rx_ring()
486 rx_ring->dma_rx = dma_zalloc_coherent(priv->device, in init_rx_ring()
521 priv->dma_buf_sz = bfsize; in init_rx_ring()
536 dma_free_coherent(priv->device, in init_rx_ring()
566 struct sxgbe_priv_data *priv = netdev_priv(netd); in init_dma_desc_rings() local
567 int tx_rsize = priv->dma_tx_size; in init_dma_desc_rings()
568 int rx_rsize = priv->dma_rx_size; in init_dma_desc_rings()
572 ret = init_tx_ring(priv->device, queue_num, in init_dma_desc_rings()
573 priv->txq[queue_num], tx_rsize); in init_dma_desc_rings()
582 priv->txq[queue_num]->priv_ptr = priv; in init_dma_desc_rings()
588 priv->rxq[queue_num], rx_rsize); in init_dma_desc_rings()
597 priv->rxq[queue_num]->priv_ptr = priv; in init_dma_desc_rings()
600 sxgbe_clear_descriptors(priv); in init_dma_desc_rings()
606 free_tx_ring(priv->device, priv->txq[queue_num], tx_rsize); in init_dma_desc_rings()
611 free_rx_ring(priv->device, priv->rxq[queue_num], rx_rsize); in init_dma_desc_rings()
618 struct sxgbe_priv_data *priv = txqueue->priv_ptr; in tx_free_ring_skbufs() local
619 int tx_rsize = priv->dma_tx_size; in tx_free_ring_skbufs()
625 dma_unmap_single(priv->device, in tx_free_ring_skbufs()
627 priv->hw->desc->get_tx_len(tdesc), in tx_free_ring_skbufs()
637 static void dma_free_tx_skbufs(struct sxgbe_priv_data *priv) in dma_free_tx_skbufs() argument
642 struct sxgbe_tx_queue *tqueue = priv->txq[queue_num]; in dma_free_tx_skbufs()
647 static void free_dma_desc_resources(struct sxgbe_priv_data *priv) in free_dma_desc_resources() argument
650 int tx_rsize = priv->dma_tx_size; in free_dma_desc_resources()
651 int rx_rsize = priv->dma_rx_size; in free_dma_desc_resources()
654 dma_free_tx_skbufs(priv); in free_dma_desc_resources()
658 free_tx_ring(priv->device, priv->txq[queue_num], tx_rsize); in free_dma_desc_resources()
663 free_rx_ring(priv->device, priv->rxq[queue_num], rx_rsize); in free_dma_desc_resources()
667 static int txring_mem_alloc(struct sxgbe_priv_data *priv) in txring_mem_alloc() argument
672 priv->txq[queue_num] = devm_kmalloc(priv->device, in txring_mem_alloc()
674 if (!priv->txq[queue_num]) in txring_mem_alloc()
681 static int rxring_mem_alloc(struct sxgbe_priv_data *priv) in rxring_mem_alloc() argument
686 priv->rxq[queue_num] = devm_kmalloc(priv->device, in rxring_mem_alloc()
688 if (!priv->rxq[queue_num]) in rxring_mem_alloc()
701 static void sxgbe_mtl_operation_mode(struct sxgbe_priv_data *priv) in sxgbe_mtl_operation_mode() argument
706 if (likely(priv->plat->force_sf_dma_mode)) { in sxgbe_mtl_operation_mode()
708 SXGBE_FOR_EACH_QUEUE(priv->hw_cap.tx_mtl_queues, queue_num) in sxgbe_mtl_operation_mode()
709 priv->hw->mtl->set_tx_mtl_mode(priv->ioaddr, queue_num, in sxgbe_mtl_operation_mode()
711 priv->tx_tc = SXGBE_MTL_SFMODE; in sxgbe_mtl_operation_mode()
714 SXGBE_FOR_EACH_QUEUE(priv->hw_cap.rx_mtl_queues, queue_num) in sxgbe_mtl_operation_mode()
715 priv->hw->mtl->set_rx_mtl_mode(priv->ioaddr, queue_num, in sxgbe_mtl_operation_mode()
717 priv->rx_tc = SXGBE_MTL_SFMODE; in sxgbe_mtl_operation_mode()
718 } else if (unlikely(priv->plat->force_thresh_dma_mode)) { in sxgbe_mtl_operation_mode()
720 SXGBE_FOR_EACH_QUEUE(priv->hw_cap.tx_mtl_queues, queue_num) in sxgbe_mtl_operation_mode()
721 priv->hw->mtl->set_tx_mtl_mode(priv->ioaddr, queue_num, in sxgbe_mtl_operation_mode()
722 priv->tx_tc); in sxgbe_mtl_operation_mode()
724 SXGBE_FOR_EACH_QUEUE(priv->hw_cap.rx_mtl_queues, queue_num) in sxgbe_mtl_operation_mode()
725 priv->hw->mtl->set_rx_mtl_mode(priv->ioaddr, queue_num, in sxgbe_mtl_operation_mode()
726 priv->rx_tc); in sxgbe_mtl_operation_mode()
739 struct sxgbe_priv_data *priv = tqueue->priv_ptr; in sxgbe_tx_queue_clean() local
740 unsigned int tx_rsize = priv->dma_tx_size; in sxgbe_tx_queue_clean()
744 dev_txq = netdev_get_tx_queue(priv->dev, queue_no); in sxgbe_tx_queue_clean()
748 priv->xstats.tx_clean++; in sxgbe_tx_queue_clean()
757 if (priv->hw->desc->get_tx_owner(p)) in sxgbe_tx_queue_clean()
760 if (netif_msg_tx_done(priv)) in sxgbe_tx_queue_clean()
765 dma_unmap_single(priv->device, in sxgbe_tx_queue_clean()
767 priv->hw->desc->get_tx_len(p), in sxgbe_tx_queue_clean()
777 priv->hw->desc->release_tx_desc(p); in sxgbe_tx_queue_clean()
784 sxgbe_tx_avail(tqueue, tx_rsize) > SXGBE_TX_THRESH(priv))) { in sxgbe_tx_queue_clean()
785 netif_tx_lock(priv->dev); in sxgbe_tx_queue_clean()
787 sxgbe_tx_avail(tqueue, tx_rsize) > SXGBE_TX_THRESH(priv)) { in sxgbe_tx_queue_clean()
788 if (netif_msg_tx_done(priv)) in sxgbe_tx_queue_clean()
792 netif_tx_unlock(priv->dev); in sxgbe_tx_queue_clean()
803 static void sxgbe_tx_all_clean(struct sxgbe_priv_data * const priv) in sxgbe_tx_all_clean() argument
808 struct sxgbe_tx_queue *tqueue = priv->txq[queue_num]; in sxgbe_tx_all_clean()
813 if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) { in sxgbe_tx_all_clean()
814 sxgbe_enable_eee_mode(priv); in sxgbe_tx_all_clean()
815 mod_timer(&priv->eee_ctrl_timer, SXGBE_LPI_TIMER(eee_timer)); in sxgbe_tx_all_clean()
825 static void sxgbe_restart_tx_queue(struct sxgbe_priv_data *priv, int queue_num) in sxgbe_restart_tx_queue() argument
827 struct sxgbe_tx_queue *tx_ring = priv->txq[queue_num]; in sxgbe_restart_tx_queue()
828 struct netdev_queue *dev_txq = netdev_get_tx_queue(priv->dev, in sxgbe_restart_tx_queue()
835 priv->hw->dma->stop_tx_queue(priv->ioaddr, queue_num); in sxgbe_restart_tx_queue()
845 priv->hw->dma->start_tx_queue(priv->ioaddr, queue_num); in sxgbe_restart_tx_queue()
847 priv->dev->stats.tx_errors++; in sxgbe_restart_tx_queue()
859 static void sxgbe_reset_all_tx_queues(struct sxgbe_priv_data *priv) in sxgbe_reset_all_tx_queues() argument
867 sxgbe_restart_tx_queue(priv, queue_num); in sxgbe_reset_all_tx_queues()
879 static int sxgbe_get_hw_features(struct sxgbe_priv_data * const priv) in sxgbe_get_hw_features() argument
882 struct sxgbe_hw_features *features = &priv->hw_cap; in sxgbe_get_hw_features()
885 rval = priv->hw->mac->get_hw_feature(priv->ioaddr, 0); in sxgbe_get_hw_features()
902 rval = priv->hw->mac->get_hw_feature(priv->ioaddr, 1); in sxgbe_get_hw_features()
917 rval = priv->hw->mac->get_hw_feature(priv->ioaddr, 2); in sxgbe_get_hw_features()
937 static void sxgbe_check_ether_addr(struct sxgbe_priv_data *priv) in sxgbe_check_ether_addr() argument
939 if (!is_valid_ether_addr(priv->dev->dev_addr)) { in sxgbe_check_ether_addr()
940 priv->hw->mac->get_umac_addr((void __iomem *) in sxgbe_check_ether_addr()
941 priv->ioaddr, in sxgbe_check_ether_addr()
942 priv->dev->dev_addr, 0); in sxgbe_check_ether_addr()
943 if (!is_valid_ether_addr(priv->dev->dev_addr)) in sxgbe_check_ether_addr()
944 eth_hw_addr_random(priv->dev); in sxgbe_check_ether_addr()
946 dev_info(priv->device, "device MAC address %pM\n", in sxgbe_check_ether_addr()
947 priv->dev->dev_addr); in sxgbe_check_ether_addr()
958 static int sxgbe_init_dma_engine(struct sxgbe_priv_data *priv) in sxgbe_init_dma_engine() argument
963 if (priv->plat->dma_cfg) { in sxgbe_init_dma_engine()
964 pbl = priv->plat->dma_cfg->pbl; in sxgbe_init_dma_engine()
965 fixed_burst = priv->plat->dma_cfg->fixed_burst; in sxgbe_init_dma_engine()
966 burst_map = priv->plat->dma_cfg->burst_map; in sxgbe_init_dma_engine()
970 priv->hw->dma->cha_init(priv->ioaddr, queue_num, in sxgbe_init_dma_engine()
972 (priv->txq[queue_num])->dma_tx_phy, in sxgbe_init_dma_engine()
973 (priv->rxq[queue_num])->dma_rx_phy, in sxgbe_init_dma_engine()
974 priv->dma_tx_size, priv->dma_rx_size); in sxgbe_init_dma_engine()
976 return priv->hw->dma->init(priv->ioaddr, fixed_burst, burst_map); in sxgbe_init_dma_engine()
985 static void sxgbe_init_mtl_engine(struct sxgbe_priv_data *priv) in sxgbe_init_mtl_engine() argument
990 priv->hw->mtl->mtl_set_txfifosize(priv->ioaddr, queue_num, in sxgbe_init_mtl_engine()
991 priv->hw_cap.tx_mtl_qsize); in sxgbe_init_mtl_engine()
992 priv->hw->mtl->mtl_enable_txqueue(priv->ioaddr, queue_num); in sxgbe_init_mtl_engine()
1002 static void sxgbe_disable_mtl_engine(struct sxgbe_priv_data *priv) in sxgbe_disable_mtl_engine() argument
1007 priv->hw->mtl->mtl_disable_txqueue(priv->ioaddr, queue_num); in sxgbe_disable_mtl_engine()
1031 static void sxgbe_tx_init_coalesce(struct sxgbe_priv_data *priv) in sxgbe_tx_init_coalesce() argument
1036 struct sxgbe_tx_queue *p = priv->txq[queue_num]; in sxgbe_tx_init_coalesce()
1040 (unsigned long)&priv->txq[queue_num]); in sxgbe_tx_init_coalesce()
1046 static void sxgbe_tx_del_timer(struct sxgbe_priv_data *priv) in sxgbe_tx_del_timer() argument
1051 struct sxgbe_tx_queue *p = priv->txq[queue_num]; in sxgbe_tx_del_timer()
1067 struct sxgbe_priv_data *priv = netdev_priv(dev); in sxgbe_open() local
1070 clk_prepare_enable(priv->sxgbe_clk); in sxgbe_open()
1072 sxgbe_check_ether_addr(priv); in sxgbe_open()
1083 priv->dma_tx_size = SXGBE_ALIGN(DMA_TX_SIZE); in sxgbe_open()
1084 priv->dma_rx_size = SXGBE_ALIGN(DMA_RX_SIZE); in sxgbe_open()
1085 priv->dma_buf_sz = SXGBE_ALIGN(DMA_BUFFER_SIZE); in sxgbe_open()
1086 priv->tx_tc = TC_DEFAULT; in sxgbe_open()
1087 priv->rx_tc = TC_DEFAULT; in sxgbe_open()
1091 ret = sxgbe_init_dma_engine(priv); in sxgbe_open()
1098 sxgbe_init_mtl_engine(priv); in sxgbe_open()
1101 priv->hw->mac->set_umac_addr(priv->ioaddr, dev->dev_addr, 0); in sxgbe_open()
1104 priv->hw->mac->core_init(priv->ioaddr); in sxgbe_open()
1106 priv->hw->mac->enable_rxqueue(priv->ioaddr, queue_num); in sxgbe_open()
1110 ret = devm_request_irq(priv->device, priv->irq, sxgbe_common_interrupt, in sxgbe_open()
1114 __func__, priv->irq, ret); in sxgbe_open()
1121 if (priv->lpi_irq != dev->irq) { in sxgbe_open()
1122 ret = devm_request_irq(priv->device, priv->lpi_irq, in sxgbe_open()
1127 __func__, priv->lpi_irq, ret); in sxgbe_open()
1134 ret = devm_request_irq(priv->device, in sxgbe_open()
1135 (priv->txq[queue_num])->irq_no, in sxgbe_open()
1137 dev->name, priv->txq[queue_num]); in sxgbe_open()
1140 __func__, priv->irq, ret); in sxgbe_open()
1147 ret = devm_request_irq(priv->device, in sxgbe_open()
1148 (priv->rxq[queue_num])->irq_no, in sxgbe_open()
1150 dev->name, priv->rxq[queue_num]); in sxgbe_open()
1153 __func__, priv->irq, ret); in sxgbe_open()
1159 priv->hw->mac->enable_tx(priv->ioaddr, true); in sxgbe_open()
1160 priv->hw->mac->enable_rx(priv->ioaddr, true); in sxgbe_open()
1163 sxgbe_mtl_operation_mode(priv); in sxgbe_open()
1166 memset(&priv->xstats, 0, sizeof(struct sxgbe_extra_stats)); in sxgbe_open()
1168 priv->xstats.tx_threshold = priv->tx_tc; in sxgbe_open()
1169 priv->xstats.rx_threshold = priv->rx_tc; in sxgbe_open()
1173 priv->hw->dma->start_tx(priv->ioaddr, SXGBE_TX_QUEUES); in sxgbe_open()
1174 priv->hw->dma->start_rx(priv->ioaddr, SXGBE_RX_QUEUES); in sxgbe_open()
1176 if (priv->phydev) in sxgbe_open()
1177 phy_start(priv->phydev); in sxgbe_open()
1180 sxgbe_tx_init_coalesce(priv); in sxgbe_open()
1182 if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) { in sxgbe_open()
1183 priv->rx_riwt = SXGBE_MAX_DMA_RIWT; in sxgbe_open()
1184 priv->hw->dma->rx_watchdog(priv->ioaddr, SXGBE_MAX_DMA_RIWT); in sxgbe_open()
1187 priv->tx_lpi_timer = SXGBE_DEFAULT_LPI_TIMER; in sxgbe_open()
1188 priv->eee_enabled = sxgbe_eee_init(priv); in sxgbe_open()
1190 napi_enable(&priv->napi); in sxgbe_open()
1196 free_dma_desc_resources(priv); in sxgbe_open()
1197 if (priv->phydev) in sxgbe_open()
1198 phy_disconnect(priv->phydev); in sxgbe_open()
1200 clk_disable_unprepare(priv->sxgbe_clk); in sxgbe_open()
1213 struct sxgbe_priv_data *priv = netdev_priv(dev); in sxgbe_release() local
1215 if (priv->eee_enabled) in sxgbe_release()
1216 del_timer_sync(&priv->eee_ctrl_timer); in sxgbe_release()
1219 if (priv->phydev) { in sxgbe_release()
1220 phy_stop(priv->phydev); in sxgbe_release()
1221 phy_disconnect(priv->phydev); in sxgbe_release()
1222 priv->phydev = NULL; in sxgbe_release()
1227 napi_disable(&priv->napi); in sxgbe_release()
1230 sxgbe_tx_del_timer(priv); in sxgbe_release()
1233 priv->hw->dma->stop_tx(priv->ioaddr, SXGBE_TX_QUEUES); in sxgbe_release()
1234 priv->hw->dma->stop_rx(priv->ioaddr, SXGBE_RX_QUEUES); in sxgbe_release()
1237 sxgbe_disable_mtl_engine(priv); in sxgbe_release()
1240 free_dma_desc_resources(priv); in sxgbe_release()
1243 priv->hw->mac->enable_tx(priv->ioaddr, false); in sxgbe_release()
1244 priv->hw->mac->enable_rx(priv->ioaddr, false); in sxgbe_release()
1246 clk_disable_unprepare(priv->sxgbe_clk); in sxgbe_release()
1251 static void sxgbe_tso_prepare(struct sxgbe_priv_data *priv, in sxgbe_tso_prepare() argument
1261 first_desc->tdes01 = dma_map_single(priv->device, skb->data, in sxgbe_tso_prepare()
1263 if (dma_mapping_error(priv->device, first_desc->tdes01)) in sxgbe_tso_prepare()
1267 priv->hw->desc->tx_desc_enable_tse(first_desc, 1, total_hdr_len, in sxgbe_tso_prepare()
1286 struct sxgbe_priv_data *priv = netdev_priv(dev); in sxgbe_xmit() local
1287 unsigned int tx_rsize = priv->dma_tx_size; in sxgbe_xmit()
1288 struct sxgbe_tx_queue *tqueue = priv->txq[txq_index]; in sxgbe_xmit()
1311 if (priv->tx_path_in_lpi_mode) in sxgbe_xmit()
1312 sxgbe_disable_eee_mode(priv); in sxgbe_xmit()
1339 priv->hw->desc->tx_ctxt_desc_set_mss( in sxgbe_xmit()
1341 priv->hw->desc->tx_ctxt_desc_set_tcmssv( in sxgbe_xmit()
1343 priv->hw->desc->tx_ctxt_desc_reset_ostc( in sxgbe_xmit()
1345 priv->hw->desc->tx_ctxt_desc_set_ctxt( in sxgbe_xmit()
1347 priv->hw->desc->tx_ctxt_desc_set_owner( in sxgbe_xmit()
1355 sxgbe_tso_prepare(priv, first_desc, skb); in sxgbe_xmit()
1357 tx_desc->tdes01 = dma_map_single(priv->device, in sxgbe_xmit()
1359 if (dma_mapping_error(priv->device, tx_desc->tdes01)) in sxgbe_xmit()
1363 priv->hw->desc->prepare_tx_desc(tx_desc, 1, no_pagedlen, in sxgbe_xmit()
1374 tx_desc->tdes01 = skb_frag_dma_map(priv->device, frag, 0, len, in sxgbe_xmit()
1381 priv->hw->desc->prepare_tx_desc(tx_desc, 0, len, in sxgbe_xmit()
1387 priv->hw->desc->set_tx_owner(tx_desc); in sxgbe_xmit()
1391 priv->hw->desc->close_tx_desc(tx_desc); in sxgbe_xmit()
1398 priv->hw->desc->clear_tx_ic(tx_desc); in sxgbe_xmit()
1399 priv->xstats.tx_reset_ic_bit++; in sxgbe_xmit()
1407 priv->hw->desc->set_tx_owner(first_desc); in sxgbe_xmit()
1415 netif_dbg(priv, pktdata, dev, "%s: curr %d dirty=%d entry=%d, first=%p, nfrags=%d\n", in sxgbe_xmit()
1421 netif_dbg(priv, hw, dev, "%s: stop transmitted packets\n", in sxgbe_xmit()
1432 priv->hw->desc->tx_enable_tstamp(first_desc); in sxgbe_xmit()
1438 priv->hw->dma->enable_dma_transmission(priv->ioaddr, txq_index); in sxgbe_xmit()
1451 static void sxgbe_rx_refill(struct sxgbe_priv_data *priv) in sxgbe_rx_refill() argument
1453 unsigned int rxsize = priv->dma_rx_size; in sxgbe_rx_refill()
1454 int bfsize = priv->dma_buf_sz; in sxgbe_rx_refill()
1455 u8 qnum = priv->cur_rx_qnum; in sxgbe_rx_refill()
1457 for (; priv->rxq[qnum]->cur_rx - priv->rxq[qnum]->dirty_rx > 0; in sxgbe_rx_refill()
1458 priv->rxq[qnum]->dirty_rx++) { in sxgbe_rx_refill()
1459 unsigned int entry = priv->rxq[qnum]->dirty_rx % rxsize; in sxgbe_rx_refill()
1462 p = priv->rxq[qnum]->dma_rx + entry; in sxgbe_rx_refill()
1464 if (likely(priv->rxq[qnum]->rx_skbuff[entry] == NULL)) { in sxgbe_rx_refill()
1467 skb = netdev_alloc_skb_ip_align(priv->dev, bfsize); in sxgbe_rx_refill()
1472 priv->rxq[qnum]->rx_skbuff[entry] = skb; in sxgbe_rx_refill()
1473 priv->rxq[qnum]->rx_skbuff_dma[entry] = in sxgbe_rx_refill()
1474 dma_map_single(priv->device, skb->data, bfsize, in sxgbe_rx_refill()
1478 priv->rxq[qnum]->rx_skbuff_dma[entry]; in sxgbe_rx_refill()
1483 priv->hw->desc->set_rx_owner(p); in sxgbe_rx_refill()
1484 priv->hw->desc->set_rx_int_on_com(p); in sxgbe_rx_refill()
1497 static int sxgbe_rx(struct sxgbe_priv_data *priv, int limit) in sxgbe_rx() argument
1499 u8 qnum = priv->cur_rx_qnum; in sxgbe_rx()
1500 unsigned int rxsize = priv->dma_rx_size; in sxgbe_rx()
1501 unsigned int entry = priv->rxq[qnum]->cur_rx; in sxgbe_rx()
1512 p = priv->rxq[qnum]->dma_rx + entry; in sxgbe_rx()
1514 if (priv->hw->desc->get_rx_owner(p)) in sxgbe_rx()
1519 next_entry = (++priv->rxq[qnum]->cur_rx) % rxsize; in sxgbe_rx()
1520 prefetch(priv->rxq[qnum]->dma_rx + next_entry); in sxgbe_rx()
1526 status = priv->hw->desc->rx_wbstatus(p, &priv->xstats, in sxgbe_rx()
1532 if (unlikely(!priv->rxcsum_insertion)) in sxgbe_rx()
1535 skb = priv->rxq[qnum]->rx_skbuff[entry]; in sxgbe_rx()
1538 netdev_err(priv->dev, "rx descriptor is not consistent\n"); in sxgbe_rx()
1541 priv->rxq[qnum]->rx_skbuff[entry] = NULL; in sxgbe_rx()
1543 frame_len = priv->hw->desc->get_rx_frame_len(p); in sxgbe_rx()
1551 napi_gro_receive(&priv->napi, skb); in sxgbe_rx()
1556 sxgbe_rx_refill(priv); in sxgbe_rx()
1571 struct sxgbe_priv_data *priv = container_of(napi, in sxgbe_poll() local
1574 u8 qnum = priv->cur_rx_qnum; in sxgbe_poll()
1576 priv->xstats.napi_poll++; in sxgbe_poll()
1578 sxgbe_tx_all_clean(priv); in sxgbe_poll()
1580 work_done = sxgbe_rx(priv, budget); in sxgbe_poll()
1583 priv->hw->dma->enable_dma_irq(priv->ioaddr, qnum); in sxgbe_poll()
1599 struct sxgbe_priv_data *priv = netdev_priv(dev); in sxgbe_tx_timeout() local
1601 sxgbe_reset_all_tx_queues(priv); in sxgbe_tx_timeout()
1615 struct sxgbe_priv_data *priv = netdev_priv(netdev); in sxgbe_common_interrupt() local
1618 status = priv->hw->mac->host_irq_status(priv->ioaddr, &priv->xstats); in sxgbe_common_interrupt()
1621 priv->xstats.tx_lpi_entry_n++; in sxgbe_common_interrupt()
1622 priv->tx_path_in_lpi_mode = true; in sxgbe_common_interrupt()
1625 priv->xstats.tx_lpi_exit_n++; in sxgbe_common_interrupt()
1626 priv->tx_path_in_lpi_mode = false; in sxgbe_common_interrupt()
1629 priv->xstats.rx_lpi_entry_n++; in sxgbe_common_interrupt()
1631 priv->xstats.rx_lpi_exit_n++; in sxgbe_common_interrupt()
1646 struct sxgbe_priv_data *priv = txq->priv_ptr; in sxgbe_tx_interrupt() local
1649 status = priv->hw->dma->tx_dma_int_status(priv->ioaddr, txq->queue_no, in sxgbe_tx_interrupt()
1650 &priv->xstats); in sxgbe_tx_interrupt()
1653 napi_schedule(&priv->napi); in sxgbe_tx_interrupt()
1657 sxgbe_restart_tx_queue(priv, txq->queue_no); in sxgbe_tx_interrupt()
1661 (priv->tx_tc != SXGBE_MTL_SFMODE) && in sxgbe_tx_interrupt()
1662 (priv->tx_tc < 512))) { in sxgbe_tx_interrupt()
1664 priv->tx_tc += (priv->tx_tc < 128) ? 32 : 64; in sxgbe_tx_interrupt()
1665 priv->hw->mtl->set_tx_mtl_mode(priv->ioaddr, in sxgbe_tx_interrupt()
1666 txq->queue_no, priv->tx_tc); in sxgbe_tx_interrupt()
1667 priv->xstats.tx_threshold = priv->tx_tc; in sxgbe_tx_interrupt()
1683 struct sxgbe_priv_data *priv = rxq->priv_ptr; in sxgbe_rx_interrupt() local
1686 status = priv->hw->dma->rx_dma_int_status(priv->ioaddr, rxq->queue_no, in sxgbe_rx_interrupt()
1687 &priv->xstats); in sxgbe_rx_interrupt()
1689 if (likely((status & handle_rx) && (napi_schedule_prep(&priv->napi)))) { in sxgbe_rx_interrupt()
1690 priv->hw->dma->disable_dma_irq(priv->ioaddr, rxq->queue_no); in sxgbe_rx_interrupt()
1691 __napi_schedule(&priv->napi); in sxgbe_rx_interrupt()
1696 (priv->rx_tc != SXGBE_MTL_SFMODE) && in sxgbe_rx_interrupt()
1697 (priv->rx_tc < 128))) { in sxgbe_rx_interrupt()
1699 priv->rx_tc += 32; in sxgbe_rx_interrupt()
1700 priv->hw->mtl->set_rx_mtl_mode(priv->ioaddr, in sxgbe_rx_interrupt()
1701 rxq->queue_no, priv->rx_tc); in sxgbe_rx_interrupt()
1702 priv->xstats.rx_threshold = priv->rx_tc; in sxgbe_rx_interrupt()
1731 struct sxgbe_priv_data *priv = netdev_priv(dev); in sxgbe_get_stats64() local
1732 void __iomem *ioaddr = priv->ioaddr; in sxgbe_get_stats64()
1735 spin_lock(&priv->stats_lock); in sxgbe_get_stats64()
1779 spin_unlock(&priv->stats_lock); in sxgbe_get_stats64()
1796 struct sxgbe_priv_data *priv = netdev_priv(dev); in sxgbe_set_features() local
1801 priv->hw->mac->enable_rx_csum(priv->ioaddr); in sxgbe_set_features()
1802 priv->rxcsum_insertion = true; in sxgbe_set_features()
1804 priv->hw->mac->disable_rx_csum(priv->ioaddr); in sxgbe_set_features()
1805 priv->rxcsum_insertion = false; in sxgbe_set_features()
1878 struct sxgbe_priv_data *priv = netdev_priv(dev); in sxgbe_set_rx_mode() local
1879 void __iomem *ioaddr = (void __iomem *)priv->ioaddr; in sxgbe_set_rx_mode()
1954 struct sxgbe_priv_data *priv = netdev_priv(dev); in sxgbe_poll_controller() local
1956 disable_irq(priv->irq); in sxgbe_poll_controller()
1957 sxgbe_rx_interrupt(priv->irq, dev); in sxgbe_poll_controller()
1958 enable_irq(priv->irq); in sxgbe_poll_controller()
1972 struct sxgbe_priv_data *priv = netdev_priv(dev); in sxgbe_ioctl() local
1982 if (!priv->phydev) in sxgbe_ioctl()
1984 ret = phy_mii_ioctl(priv->phydev, rq, cmd); in sxgbe_ioctl()
2036 static int sxgbe_hw_init(struct sxgbe_priv_data * const priv) in sxgbe_hw_init() argument
2040 priv->hw = kmalloc(sizeof(*priv->hw), GFP_KERNEL); in sxgbe_hw_init()
2041 if(!priv->hw) in sxgbe_hw_init()
2045 sxgbe_get_ops(priv->hw); in sxgbe_hw_init()
2048 ctrl_ids = priv->hw->mac->get_controller_version(priv->ioaddr); in sxgbe_hw_init()
2049 priv->hw->ctrl_uid = (ctrl_ids & 0x00ff0000) >> 16; in sxgbe_hw_init()
2050 priv->hw->ctrl_id = (ctrl_ids & 0x000000ff); in sxgbe_hw_init()
2052 priv->hw->ctrl_uid, priv->hw->ctrl_id); in sxgbe_hw_init()
2055 if (!sxgbe_get_hw_features(priv)) in sxgbe_hw_init()
2058 if (priv->hw_cap.tx_csum_offload) in sxgbe_hw_init()
2061 if (priv->hw_cap.rx_csum_offload) in sxgbe_hw_init()
2097 struct sxgbe_priv_data *priv; in sxgbe_drv_probe() local
2109 priv = netdev_priv(ndev); in sxgbe_drv_probe()
2110 priv->device = device; in sxgbe_drv_probe()
2111 priv->dev = ndev; in sxgbe_drv_probe()
2114 priv->plat = plat_dat; in sxgbe_drv_probe()
2115 priv->ioaddr = addr; in sxgbe_drv_probe()
2117 ret = sxgbe_sw_reset(priv->ioaddr); in sxgbe_drv_probe()
2125 ret = sxgbe_hw_init(priv); in sxgbe_drv_probe()
2130 ret = txring_mem_alloc(priv); in sxgbe_drv_probe()
2134 ret = rxring_mem_alloc(priv); in sxgbe_drv_probe()
2149 priv->msg_enable = netif_msg_init(debug, default_msg_level); in sxgbe_drv_probe()
2152 if (priv->hw_cap.tcpseg_offload) { in sxgbe_drv_probe()
2154 priv->hw->dma->enable_tso(priv->ioaddr, queue_num); in sxgbe_drv_probe()
2159 if (priv->hw_cap.rx_csum_offload) { in sxgbe_drv_probe()
2160 priv->hw->mac->enable_rx_csum(priv->ioaddr); in sxgbe_drv_probe()
2161 priv->rxcsum_insertion = true; in sxgbe_drv_probe()
2165 priv->rx_pause = 1; in sxgbe_drv_probe()
2166 priv->tx_pause = 1; in sxgbe_drv_probe()
2169 if (!priv->plat->riwt_off) { in sxgbe_drv_probe()
2170 priv->use_riwt = 1; in sxgbe_drv_probe()
2174 netif_napi_add(ndev, &priv->napi, sxgbe_poll, 64); in sxgbe_drv_probe()
2176 spin_lock_init(&priv->stats_lock); in sxgbe_drv_probe()
2178 priv->sxgbe_clk = clk_get(priv->device, SXGBE_RESOURCE_NAME); in sxgbe_drv_probe()
2179 if (IS_ERR(priv->sxgbe_clk)) { in sxgbe_drv_probe()
2191 if (!priv->plat->clk_csr) in sxgbe_drv_probe()
2192 sxgbe_clk_csr_set(priv); in sxgbe_drv_probe()
2194 priv->clk_csr = priv->plat->clk_csr; in sxgbe_drv_probe()
2200 __func__, priv->plat->bus_id); in sxgbe_drv_probe()
2210 sxgbe_check_ether_addr(priv); in sxgbe_drv_probe()
2212 return priv; in sxgbe_drv_probe()
2217 clk_put(priv->sxgbe_clk); in sxgbe_drv_probe()
2219 netif_napi_del(&priv->napi); in sxgbe_drv_probe()
2221 kfree(priv->hw); in sxgbe_drv_probe()
2236 struct sxgbe_priv_data *priv = netdev_priv(ndev); in sxgbe_drv_remove() local
2242 priv->hw->mac->disable_rxqueue(priv->ioaddr, queue_num); in sxgbe_drv_remove()
2245 priv->hw->dma->stop_rx(priv->ioaddr, SXGBE_RX_QUEUES); in sxgbe_drv_remove()
2246 priv->hw->dma->stop_tx(priv->ioaddr, SXGBE_TX_QUEUES); in sxgbe_drv_remove()
2248 priv->hw->mac->enable_tx(priv->ioaddr, false); in sxgbe_drv_remove()
2249 priv->hw->mac->enable_rx(priv->ioaddr, false); in sxgbe_drv_remove()
2255 clk_put(priv->sxgbe_clk); in sxgbe_drv_remove()
2257 netif_napi_del(&priv->napi); in sxgbe_drv_remove()
2259 kfree(priv->hw); in sxgbe_drv_remove()