Lines Matching refs:mdp
411 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_write() local
412 u16 offset = mdp->reg_offset[enum_index]; in sh_eth_write()
417 iowrite32(data, mdp->addr + offset); in sh_eth_write()
422 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_read() local
423 u16 offset = mdp->reg_offset[enum_index]; in sh_eth_read()
428 return ioread32(mdp->addr + offset); in sh_eth_read()
431 static bool sh_eth_is_gether(struct sh_eth_private *mdp) in sh_eth_is_gether() argument
433 return mdp->reg_offset == sh_eth_offset_gigabit; in sh_eth_is_gether()
436 static bool sh_eth_is_rz_fast_ether(struct sh_eth_private *mdp) in sh_eth_is_rz_fast_ether() argument
438 return mdp->reg_offset == sh_eth_offset_fast_rz; in sh_eth_is_rz_fast_ether()
444 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_select_mii() local
446 switch (mdp->phy_interface) { in sh_eth_select_mii()
468 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_set_duplex() local
470 if (mdp->duplex) /* Full */ in sh_eth_set_duplex()
479 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_set_rate_r8a777x() local
481 switch (mdp->speed) { in sh_eth_set_rate_r8a777x()
544 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_set_rate_sh7724() local
546 switch (mdp->speed) { in sh_eth_set_rate_sh7724()
584 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_set_rate_sh7757() local
586 switch (mdp->speed) { in sh_eth_set_rate_sh7757()
650 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_set_rate_giga() local
652 switch (mdp->speed) { in sh_eth_set_rate_giga()
700 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_chip_reset() local
703 sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR); in sh_eth_chip_reset()
709 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_set_rate_gether() local
711 switch (mdp->speed) { in sh_eth_set_rate_gether()
785 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_chip_reset_r8a7740() local
788 sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR); in sh_eth_chip_reset_r8a7740()
920 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_reset() local
923 if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp)) { in sh_eth_reset()
943 if (mdp->cd->hw_crc) in sh_eth_reset()
947 if (mdp->cd->select_mii) in sh_eth_reset()
970 static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x) in cpu_to_edmac() argument
972 switch (mdp->edmac_endian) { in cpu_to_edmac()
981 static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x) in edmac_to_cpu() argument
983 switch (mdp->edmac_endian) { in edmac_to_cpu()
1023 static u32 sh_eth_get_edtrr_trns(struct sh_eth_private *mdp) in sh_eth_get_edtrr_trns() argument
1025 if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp)) in sh_eth_get_edtrr_trns()
1124 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_ring_free() local
1128 if (mdp->rx_skbuff) { in sh_eth_ring_free()
1129 for (i = 0; i < mdp->num_rx_ring; i++) in sh_eth_ring_free()
1130 dev_kfree_skb(mdp->rx_skbuff[i]); in sh_eth_ring_free()
1132 kfree(mdp->rx_skbuff); in sh_eth_ring_free()
1133 mdp->rx_skbuff = NULL; in sh_eth_ring_free()
1136 if (mdp->tx_skbuff) { in sh_eth_ring_free()
1137 for (i = 0; i < mdp->num_tx_ring; i++) in sh_eth_ring_free()
1138 dev_kfree_skb(mdp->tx_skbuff[i]); in sh_eth_ring_free()
1140 kfree(mdp->tx_skbuff); in sh_eth_ring_free()
1141 mdp->tx_skbuff = NULL; in sh_eth_ring_free()
1143 if (mdp->rx_ring) { in sh_eth_ring_free()
1144 ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring; in sh_eth_ring_free()
1145 dma_free_coherent(NULL, ringsize, mdp->rx_ring, in sh_eth_ring_free()
1146 mdp->rx_desc_dma); in sh_eth_ring_free()
1147 mdp->rx_ring = NULL; in sh_eth_ring_free()
1150 if (mdp->tx_ring) { in sh_eth_ring_free()
1151 ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring; in sh_eth_ring_free()
1152 dma_free_coherent(NULL, ringsize, mdp->tx_ring, in sh_eth_ring_free()
1153 mdp->tx_desc_dma); in sh_eth_ring_free()
1154 mdp->tx_ring = NULL; in sh_eth_ring_free()
1161 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_ring_format() local
1166 int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring; in sh_eth_ring_format()
1167 int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring; in sh_eth_ring_format()
1168 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1; in sh_eth_ring_format()
1172 mdp->cur_rx = 0; in sh_eth_ring_format()
1173 mdp->cur_tx = 0; in sh_eth_ring_format()
1174 mdp->dirty_rx = 0; in sh_eth_ring_format()
1175 mdp->dirty_tx = 0; in sh_eth_ring_format()
1177 memset(mdp->rx_ring, 0, rx_ringsize); in sh_eth_ring_format()
1180 for (i = 0; i < mdp->num_rx_ring; i++) { in sh_eth_ring_format()
1182 mdp->rx_skbuff[i] = NULL; in sh_eth_ring_format()
1189 buf_len = ALIGN(mdp->rx_buf_sz, 32); in sh_eth_ring_format()
1196 mdp->rx_skbuff[i] = skb; in sh_eth_ring_format()
1199 rxdesc = &mdp->rx_ring[i]; in sh_eth_ring_format()
1200 rxdesc->len = cpu_to_edmac(mdp, buf_len << 16); in sh_eth_ring_format()
1201 rxdesc->addr = cpu_to_edmac(mdp, dma_addr); in sh_eth_ring_format()
1202 rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP); in sh_eth_ring_format()
1206 sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR); in sh_eth_ring_format()
1207 if (sh_eth_is_gether(mdp) || in sh_eth_ring_format()
1208 sh_eth_is_rz_fast_ether(mdp)) in sh_eth_ring_format()
1209 sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR); in sh_eth_ring_format()
1213 mdp->dirty_rx = (u32) (i - mdp->num_rx_ring); in sh_eth_ring_format()
1217 rxdesc->status |= cpu_to_edmac(mdp, RD_RDLE); in sh_eth_ring_format()
1219 memset(mdp->tx_ring, 0, tx_ringsize); in sh_eth_ring_format()
1222 for (i = 0; i < mdp->num_tx_ring; i++) { in sh_eth_ring_format()
1223 mdp->tx_skbuff[i] = NULL; in sh_eth_ring_format()
1224 txdesc = &mdp->tx_ring[i]; in sh_eth_ring_format()
1225 txdesc->status = cpu_to_edmac(mdp, TD_TFP); in sh_eth_ring_format()
1226 txdesc->len = cpu_to_edmac(mdp, 0); in sh_eth_ring_format()
1229 sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR); in sh_eth_ring_format()
1230 if (sh_eth_is_gether(mdp) || in sh_eth_ring_format()
1231 sh_eth_is_rz_fast_ether(mdp)) in sh_eth_ring_format()
1232 sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR); in sh_eth_ring_format()
1236 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE); in sh_eth_ring_format()
1242 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_ring_init() local
1250 mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ : in sh_eth_ring_init()
1252 if (mdp->cd->rpadir) in sh_eth_ring_init()
1253 mdp->rx_buf_sz += NET_IP_ALIGN; in sh_eth_ring_init()
1256 mdp->rx_skbuff = kcalloc(mdp->num_rx_ring, sizeof(*mdp->rx_skbuff), in sh_eth_ring_init()
1258 if (!mdp->rx_skbuff) in sh_eth_ring_init()
1261 mdp->tx_skbuff = kcalloc(mdp->num_tx_ring, sizeof(*mdp->tx_skbuff), in sh_eth_ring_init()
1263 if (!mdp->tx_skbuff) in sh_eth_ring_init()
1267 rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring; in sh_eth_ring_init()
1268 mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma, in sh_eth_ring_init()
1270 if (!mdp->rx_ring) in sh_eth_ring_init()
1273 mdp->dirty_rx = 0; in sh_eth_ring_init()
1276 tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring; in sh_eth_ring_init()
1277 mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma, in sh_eth_ring_init()
1279 if (!mdp->tx_ring) in sh_eth_ring_init()
1293 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_dev_init() local
1301 if (mdp->cd->rmiimode) in sh_eth_dev_init()
1306 if (mdp->cd->rpadir) in sh_eth_dev_init()
1307 sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR); in sh_eth_dev_init()
1313 if (mdp->cd->hw_swap) in sh_eth_dev_init()
1320 sh_eth_write(ndev, mdp->cd->fdr_value, FDR); in sh_eth_dev_init()
1326 sh_eth_write(ndev, mdp->cd->trscer_err_mask, TRSCER); in sh_eth_dev_init()
1328 if (mdp->cd->bculr) in sh_eth_dev_init()
1331 sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR); in sh_eth_dev_init()
1333 if (!mdp->cd->no_trimd) in sh_eth_dev_init()
1342 mdp->irq_enabled = true; in sh_eth_dev_init()
1343 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); in sh_eth_dev_init()
1348 ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE; in sh_eth_dev_init()
1352 if (mdp->cd->set_rate) in sh_eth_dev_init()
1353 mdp->cd->set_rate(ndev); in sh_eth_dev_init()
1356 sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR); in sh_eth_dev_init()
1360 sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR); in sh_eth_dev_init()
1366 if (mdp->cd->apr) in sh_eth_dev_init()
1368 if (mdp->cd->mpr) in sh_eth_dev_init()
1370 if (mdp->cd->tpauser) in sh_eth_dev_init()
1385 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_dev_exit() local
1391 for (i = 0; i < mdp->num_tx_ring; i++) in sh_eth_dev_exit()
1392 mdp->tx_ring[i].status &= ~cpu_to_edmac(mdp, TD_TACT); in sh_eth_dev_exit()
1416 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_txfree() local
1421 for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) { in sh_eth_txfree()
1422 entry = mdp->dirty_tx % mdp->num_tx_ring; in sh_eth_txfree()
1423 txdesc = &mdp->tx_ring[entry]; in sh_eth_txfree()
1424 if (txdesc->status & cpu_to_edmac(mdp, TD_TACT)) in sh_eth_txfree()
1428 netif_info(mdp, tx_done, ndev, in sh_eth_txfree()
1430 entry, edmac_to_cpu(mdp, txdesc->status)); in sh_eth_txfree()
1432 if (mdp->tx_skbuff[entry]) { in sh_eth_txfree()
1434 edmac_to_cpu(mdp, txdesc->addr), in sh_eth_txfree()
1435 edmac_to_cpu(mdp, txdesc->len) >> 16, in sh_eth_txfree()
1437 dev_kfree_skb_irq(mdp->tx_skbuff[entry]); in sh_eth_txfree()
1438 mdp->tx_skbuff[entry] = NULL; in sh_eth_txfree()
1441 txdesc->status = cpu_to_edmac(mdp, TD_TFP); in sh_eth_txfree()
1442 if (entry >= mdp->num_tx_ring - 1) in sh_eth_txfree()
1443 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE); in sh_eth_txfree()
1446 ndev->stats.tx_bytes += edmac_to_cpu(mdp, txdesc->len) >> 16; in sh_eth_txfree()
1454 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_rx() local
1457 int entry = mdp->cur_rx % mdp->num_rx_ring; in sh_eth_rx()
1458 int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx; in sh_eth_rx()
1463 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1; in sh_eth_rx()
1469 rxdesc = &mdp->rx_ring[entry]; in sh_eth_rx()
1470 while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) { in sh_eth_rx()
1473 desc_status = edmac_to_cpu(mdp, rxdesc->status); in sh_eth_rx()
1474 pkt_len = edmac_to_cpu(mdp, rxdesc->len) & RD_RFL; in sh_eth_rx()
1479 netif_info(mdp, rx_status, ndev, in sh_eth_rx()
1492 if (mdp->cd->shift_rd0) in sh_eth_rx()
1495 skb = mdp->rx_skbuff[entry]; in sh_eth_rx()
1512 dma_addr = edmac_to_cpu(mdp, rxdesc->addr); in sh_eth_rx()
1513 if (!mdp->cd->hw_swap) in sh_eth_rx()
1517 mdp->rx_skbuff[entry] = NULL; in sh_eth_rx()
1518 if (mdp->cd->rpadir) in sh_eth_rx()
1521 ALIGN(mdp->rx_buf_sz, 32), in sh_eth_rx()
1531 entry = (++mdp->cur_rx) % mdp->num_rx_ring; in sh_eth_rx()
1532 rxdesc = &mdp->rx_ring[entry]; in sh_eth_rx()
1536 for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) { in sh_eth_rx()
1537 entry = mdp->dirty_rx % mdp->num_rx_ring; in sh_eth_rx()
1538 rxdesc = &mdp->rx_ring[entry]; in sh_eth_rx()
1540 buf_len = ALIGN(mdp->rx_buf_sz, 32); in sh_eth_rx()
1541 rxdesc->len = cpu_to_edmac(mdp, buf_len << 16); in sh_eth_rx()
1543 if (mdp->rx_skbuff[entry] == NULL) { in sh_eth_rx()
1554 mdp->rx_skbuff[entry] = skb; in sh_eth_rx()
1557 rxdesc->addr = cpu_to_edmac(mdp, dma_addr); in sh_eth_rx()
1560 if (entry >= mdp->num_rx_ring - 1) in sh_eth_rx()
1562 cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDLE); in sh_eth_rx()
1565 cpu_to_edmac(mdp, RD_RACT | RD_RFP); in sh_eth_rx()
1573 mdp->reg_offset[RDFAR] != SH_ETH_OFFSET_INVALID) { in sh_eth_rx()
1577 mdp->cur_rx = count; in sh_eth_rx()
1578 mdp->dirty_rx = count; in sh_eth_rx()
1605 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_error() local
1617 if (mdp->cd->no_psr || mdp->no_ether_link) { in sh_eth_error()
1621 if (mdp->ether_link_active_low) in sh_eth_error()
1646 netif_err(mdp, tx_err, ndev, "Transmit Abort\n"); in sh_eth_error()
1661 netif_err(mdp, tx_err, ndev, "Transmit Descriptor Empty\n"); in sh_eth_error()
1667 netif_err(mdp, tx_err, ndev, "Transmit FIFO Under flow\n"); in sh_eth_error()
1680 if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) { in sh_eth_error()
1683 netif_err(mdp, tx_err, ndev, "Address Error\n"); in sh_eth_error()
1687 if (mdp->cd->no_ade) in sh_eth_error()
1695 intr_status, mdp->cur_tx, mdp->dirty_tx, in sh_eth_error()
1701 if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) { in sh_eth_error()
1703 sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR); in sh_eth_error()
1713 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_interrupt() local
1714 struct sh_eth_cpu_data *cd = mdp->cd; in sh_eth_interrupt()
1718 spin_lock(&mdp->lock); in sh_eth_interrupt()
1734 if (!likely(mdp->irq_enabled)) { in sh_eth_interrupt()
1740 if (napi_schedule_prep(&mdp->napi)) { in sh_eth_interrupt()
1744 __napi_schedule(&mdp->napi); in sh_eth_interrupt()
1769 spin_unlock(&mdp->lock); in sh_eth_interrupt()
1776 struct sh_eth_private *mdp = container_of(napi, struct sh_eth_private, in sh_eth_poll() local
1796 if (mdp->irq_enabled) in sh_eth_poll()
1797 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); in sh_eth_poll()
1805 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_adjust_link() local
1806 struct phy_device *phydev = mdp->phydev; in sh_eth_adjust_link()
1810 if (phydev->duplex != mdp->duplex) { in sh_eth_adjust_link()
1812 mdp->duplex = phydev->duplex; in sh_eth_adjust_link()
1813 if (mdp->cd->set_duplex) in sh_eth_adjust_link()
1814 mdp->cd->set_duplex(ndev); in sh_eth_adjust_link()
1817 if (phydev->speed != mdp->speed) { in sh_eth_adjust_link()
1819 mdp->speed = phydev->speed; in sh_eth_adjust_link()
1820 if (mdp->cd->set_rate) in sh_eth_adjust_link()
1821 mdp->cd->set_rate(ndev); in sh_eth_adjust_link()
1823 if (!mdp->link) { in sh_eth_adjust_link()
1828 mdp->link = phydev->link; in sh_eth_adjust_link()
1829 if (mdp->cd->no_psr || mdp->no_ether_link) in sh_eth_adjust_link()
1832 } else if (mdp->link) { in sh_eth_adjust_link()
1834 mdp->link = 0; in sh_eth_adjust_link()
1835 mdp->speed = 0; in sh_eth_adjust_link()
1836 mdp->duplex = -1; in sh_eth_adjust_link()
1837 if (mdp->cd->no_psr || mdp->no_ether_link) in sh_eth_adjust_link()
1841 if (new_state && netif_msg_link(mdp)) in sh_eth_adjust_link()
1849 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_phy_init() local
1852 mdp->link = 0; in sh_eth_phy_init()
1853 mdp->speed = 0; in sh_eth_phy_init()
1854 mdp->duplex = -1; in sh_eth_phy_init()
1863 mdp->phy_interface); in sh_eth_phy_init()
1871 mdp->mii_bus->id, mdp->phy_id); in sh_eth_phy_init()
1874 mdp->phy_interface); in sh_eth_phy_init()
1885 mdp->phydev = phydev; in sh_eth_phy_init()
1893 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_phy_start() local
1900 phy_start(mdp->phydev); in sh_eth_phy_start()
1908 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_get_settings() local
1912 if (!mdp->phydev) in sh_eth_get_settings()
1915 spin_lock_irqsave(&mdp->lock, flags); in sh_eth_get_settings()
1916 ret = phy_ethtool_gset(mdp->phydev, ecmd); in sh_eth_get_settings()
1917 spin_unlock_irqrestore(&mdp->lock, flags); in sh_eth_get_settings()
1925 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_set_settings() local
1929 if (!mdp->phydev) in sh_eth_set_settings()
1932 spin_lock_irqsave(&mdp->lock, flags); in sh_eth_set_settings()
1937 ret = phy_ethtool_sset(mdp->phydev, ecmd); in sh_eth_set_settings()
1942 mdp->duplex = 1; in sh_eth_set_settings()
1944 mdp->duplex = 0; in sh_eth_set_settings()
1946 if (mdp->cd->set_duplex) in sh_eth_set_settings()
1947 mdp->cd->set_duplex(ndev); in sh_eth_set_settings()
1955 spin_unlock_irqrestore(&mdp->lock, flags); in sh_eth_set_settings()
1970 struct sh_eth_private *mdp = netdev_priv(ndev); in __sh_eth_get_regs() local
1971 struct sh_eth_cpu_data *cd = mdp->cd; in __sh_eth_get_regs()
1995 if (mdp->reg_offset[reg] != SH_ETH_OFFSET_INVALID) { \ in __sh_eth_get_regs()
2004 #define add_tsu_reg(reg) add_reg_from(reg, sh_eth_tsu_read(mdp, reg)) in __sh_eth_get_regs()
2103 if (mdp->reg_offset[TSU_ADRH0] != SH_ETH_OFFSET_INVALID) { in __sh_eth_get_regs()
2113 mdp->tsu_addr + in __sh_eth_get_regs()
2114 mdp->reg_offset[TSU_ADRH0] + in __sh_eth_get_regs()
2137 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_get_regs() local
2141 pm_runtime_get_sync(&mdp->pdev->dev); in sh_eth_get_regs()
2143 pm_runtime_put_sync(&mdp->pdev->dev); in sh_eth_get_regs()
2148 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_nway_reset() local
2152 if (!mdp->phydev) in sh_eth_nway_reset()
2155 spin_lock_irqsave(&mdp->lock, flags); in sh_eth_nway_reset()
2156 ret = phy_start_aneg(mdp->phydev); in sh_eth_nway_reset()
2157 spin_unlock_irqrestore(&mdp->lock, flags); in sh_eth_nway_reset()
2164 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_get_msglevel() local
2165 return mdp->msg_enable; in sh_eth_get_msglevel()
2170 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_set_msglevel() local
2171 mdp->msg_enable = value; in sh_eth_set_msglevel()
2193 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_get_ethtool_stats() local
2197 data[i++] = mdp->cur_rx; in sh_eth_get_ethtool_stats()
2198 data[i++] = mdp->cur_tx; in sh_eth_get_ethtool_stats()
2199 data[i++] = mdp->dirty_rx; in sh_eth_get_ethtool_stats()
2200 data[i++] = mdp->dirty_tx; in sh_eth_get_ethtool_stats()
2216 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_get_ringparam() local
2220 ring->rx_pending = mdp->num_rx_ring; in sh_eth_get_ringparam()
2221 ring->tx_pending = mdp->num_tx_ring; in sh_eth_get_ringparam()
2227 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_set_ringparam() local
2247 mdp->irq_enabled = false; in sh_eth_set_ringparam()
2249 napi_synchronize(&mdp->napi); in sh_eth_set_ringparam()
2259 mdp->num_rx_ring = ring->rx_pending; in sh_eth_set_ringparam()
2260 mdp->num_tx_ring = ring->tx_pending; in sh_eth_set_ringparam()
2276 mdp->irq_enabled = true; in sh_eth_set_ringparam()
2277 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); in sh_eth_set_ringparam()
2306 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_open() local
2308 pm_runtime_get_sync(&mdp->pdev->dev); in sh_eth_open()
2310 napi_enable(&mdp->napi); in sh_eth_open()
2313 mdp->cd->irq_flags, ndev->name, ndev); in sh_eth_open()
2334 mdp->is_opened = 1; in sh_eth_open()
2341 napi_disable(&mdp->napi); in sh_eth_open()
2342 pm_runtime_put_sync(&mdp->pdev->dev); in sh_eth_open()
2349 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_tx_timeout() local
2355 netif_err(mdp, timer, ndev, in sh_eth_tx_timeout()
2363 for (i = 0; i < mdp->num_rx_ring; i++) { in sh_eth_tx_timeout()
2364 rxdesc = &mdp->rx_ring[i]; in sh_eth_tx_timeout()
2365 rxdesc->status = cpu_to_edmac(mdp, 0); in sh_eth_tx_timeout()
2366 rxdesc->addr = cpu_to_edmac(mdp, 0xBADF00D0); in sh_eth_tx_timeout()
2367 dev_kfree_skb(mdp->rx_skbuff[i]); in sh_eth_tx_timeout()
2368 mdp->rx_skbuff[i] = NULL; in sh_eth_tx_timeout()
2370 for (i = 0; i < mdp->num_tx_ring; i++) { in sh_eth_tx_timeout()
2371 dev_kfree_skb(mdp->tx_skbuff[i]); in sh_eth_tx_timeout()
2372 mdp->tx_skbuff[i] = NULL; in sh_eth_tx_timeout()
2382 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_start_xmit() local
2388 spin_lock_irqsave(&mdp->lock, flags); in sh_eth_start_xmit()
2389 if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) { in sh_eth_start_xmit()
2391 netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n"); in sh_eth_start_xmit()
2393 spin_unlock_irqrestore(&mdp->lock, flags); in sh_eth_start_xmit()
2397 spin_unlock_irqrestore(&mdp->lock, flags); in sh_eth_start_xmit()
2402 entry = mdp->cur_tx % mdp->num_tx_ring; in sh_eth_start_xmit()
2403 mdp->tx_skbuff[entry] = skb; in sh_eth_start_xmit()
2404 txdesc = &mdp->tx_ring[entry]; in sh_eth_start_xmit()
2406 if (!mdp->cd->hw_swap) in sh_eth_start_xmit()
2414 txdesc->addr = cpu_to_edmac(mdp, dma_addr); in sh_eth_start_xmit()
2415 txdesc->len = cpu_to_edmac(mdp, skb->len << 16); in sh_eth_start_xmit()
2418 if (entry >= mdp->num_tx_ring - 1) in sh_eth_start_xmit()
2419 txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE); in sh_eth_start_xmit()
2421 txdesc->status |= cpu_to_edmac(mdp, TD_TACT); in sh_eth_start_xmit()
2423 mdp->cur_tx++; in sh_eth_start_xmit()
2425 if (!(sh_eth_read(ndev, EDTRR) & sh_eth_get_edtrr_trns(mdp))) in sh_eth_start_xmit()
2426 sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR); in sh_eth_start_xmit()
2449 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_get_stats() local
2451 if (sh_eth_is_rz_fast_ether(mdp)) in sh_eth_get_stats()
2454 if (!mdp->is_opened) in sh_eth_get_stats()
2461 if (sh_eth_is_gether(mdp)) { in sh_eth_get_stats()
2477 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_close() local
2485 mdp->irq_enabled = false; in sh_eth_close()
2487 napi_disable(&mdp->napi); in sh_eth_close()
2493 if (mdp->phydev) { in sh_eth_close()
2494 phy_stop(mdp->phydev); in sh_eth_close()
2495 phy_disconnect(mdp->phydev); in sh_eth_close()
2496 mdp->phydev = NULL; in sh_eth_close()
2504 pm_runtime_put_sync(&mdp->pdev->dev); in sh_eth_close()
2506 mdp->is_opened = 0; in sh_eth_close()
2514 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_do_ioctl() local
2515 struct phy_device *phydev = mdp->phydev; in sh_eth_do_ioctl()
2527 static void *sh_eth_tsu_get_post_reg_offset(struct sh_eth_private *mdp, in sh_eth_tsu_get_post_reg_offset() argument
2530 return sh_eth_tsu_get_offset(mdp, TSU_POST1) + (entry / 8 * 4); in sh_eth_tsu_get_post_reg_offset()
2538 static u32 sh_eth_tsu_get_post_bit(struct sh_eth_private *mdp, int entry) in sh_eth_tsu_get_post_bit() argument
2540 return (0x08 >> (mdp->port << 1)) << (28 - ((entry % 8) * 4)); in sh_eth_tsu_get_post_bit()
2546 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_tsu_enable_cam_entry_post() local
2550 reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry); in sh_eth_tsu_enable_cam_entry_post()
2552 iowrite32(tmp | sh_eth_tsu_get_post_bit(mdp, entry), reg_offset); in sh_eth_tsu_enable_cam_entry_post()
2558 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_tsu_disable_cam_entry_post() local
2562 reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry); in sh_eth_tsu_disable_cam_entry_post()
2564 ref_mask = sh_eth_tsu_get_post_bit(mdp, entry) & ~post_mask; in sh_eth_tsu_disable_cam_entry_post()
2576 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_tsu_busy() local
2578 while ((sh_eth_tsu_read(mdp, TSU_ADSBSY) & TSU_ADSBSY_0)) { in sh_eth_tsu_busy()
2625 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_tsu_find_entry() local
2626 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); in sh_eth_tsu_find_entry()
2652 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_tsu_disable_cam_entry_table() local
2653 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); in sh_eth_tsu_disable_cam_entry_table()
2657 sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) & in sh_eth_tsu_disable_cam_entry_table()
2669 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_tsu_add_entry() local
2670 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); in sh_eth_tsu_add_entry()
2673 if (!mdp->cd->tsu) in sh_eth_tsu_add_entry()
2687 sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) | in sh_eth_tsu_add_entry()
2699 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_tsu_del_entry() local
2702 if (!mdp->cd->tsu) in sh_eth_tsu_del_entry()
2722 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_tsu_purge_all() local
2725 if (!mdp->cd->tsu) in sh_eth_tsu_purge_all()
2743 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_tsu_purge_mcast() local
2745 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); in sh_eth_tsu_purge_mcast()
2748 if (!mdp->cd->tsu) in sh_eth_tsu_purge_mcast()
2761 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_set_rx_mode() local
2766 spin_lock_irqsave(&mdp->lock, flags); in sh_eth_set_rx_mode()
2771 if (mdp->cd->tsu) in sh_eth_set_rx_mode()
2787 } else if (mdp->cd->tsu) { in sh_eth_set_rx_mode()
2806 spin_unlock_irqrestore(&mdp->lock, flags); in sh_eth_set_rx_mode()
2809 static int sh_eth_get_vtag_index(struct sh_eth_private *mdp) in sh_eth_get_vtag_index() argument
2811 if (!mdp->port) in sh_eth_get_vtag_index()
2820 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_vlan_rx_add_vid() local
2821 int vtag_reg_index = sh_eth_get_vtag_index(mdp); in sh_eth_vlan_rx_add_vid()
2823 if (unlikely(!mdp->cd->tsu)) in sh_eth_vlan_rx_add_vid()
2830 mdp->vlan_num_ids++; in sh_eth_vlan_rx_add_vid()
2835 if (mdp->vlan_num_ids > 1) { in sh_eth_vlan_rx_add_vid()
2837 sh_eth_tsu_write(mdp, 0, vtag_reg_index); in sh_eth_vlan_rx_add_vid()
2841 sh_eth_tsu_write(mdp, TSU_VTAG_ENABLE | (vid & TSU_VTAG_VID_MASK), in sh_eth_vlan_rx_add_vid()
2850 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_vlan_rx_kill_vid() local
2851 int vtag_reg_index = sh_eth_get_vtag_index(mdp); in sh_eth_vlan_rx_kill_vid()
2853 if (unlikely(!mdp->cd->tsu)) in sh_eth_vlan_rx_kill_vid()
2860 mdp->vlan_num_ids--; in sh_eth_vlan_rx_kill_vid()
2861 sh_eth_tsu_write(mdp, 0, vtag_reg_index); in sh_eth_vlan_rx_kill_vid()
2867 static void sh_eth_tsu_init(struct sh_eth_private *mdp) in sh_eth_tsu_init() argument
2869 if (sh_eth_is_rz_fast_ether(mdp)) { in sh_eth_tsu_init()
2870 sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */ in sh_eth_tsu_init()
2874 sh_eth_tsu_write(mdp, 0, TSU_FWEN0); /* Disable forward(0->1) */ in sh_eth_tsu_init()
2875 sh_eth_tsu_write(mdp, 0, TSU_FWEN1); /* Disable forward(1->0) */ in sh_eth_tsu_init()
2876 sh_eth_tsu_write(mdp, 0, TSU_FCM); /* forward fifo 3k-3k */ in sh_eth_tsu_init()
2877 sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0); in sh_eth_tsu_init()
2878 sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1); in sh_eth_tsu_init()
2879 sh_eth_tsu_write(mdp, 0, TSU_PRISL0); in sh_eth_tsu_init()
2880 sh_eth_tsu_write(mdp, 0, TSU_PRISL1); in sh_eth_tsu_init()
2881 sh_eth_tsu_write(mdp, 0, TSU_FWSL0); in sh_eth_tsu_init()
2882 sh_eth_tsu_write(mdp, 0, TSU_FWSL1); in sh_eth_tsu_init()
2883 sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC); in sh_eth_tsu_init()
2884 if (sh_eth_is_gether(mdp)) { in sh_eth_tsu_init()
2885 sh_eth_tsu_write(mdp, 0, TSU_QTAG0); /* Disable QTAG(0->1) */ in sh_eth_tsu_init()
2886 sh_eth_tsu_write(mdp, 0, TSU_QTAG1); /* Disable QTAG(1->0) */ in sh_eth_tsu_init()
2888 sh_eth_tsu_write(mdp, 0, TSU_QTAGM0); /* Disable QTAG(0->1) */ in sh_eth_tsu_init()
2889 sh_eth_tsu_write(mdp, 0, TSU_QTAGM1); /* Disable QTAG(1->0) */ in sh_eth_tsu_init()
2891 sh_eth_tsu_write(mdp, 0, TSU_FWSR); /* all interrupt status clear */ in sh_eth_tsu_init()
2892 sh_eth_tsu_write(mdp, 0, TSU_FWINMK); /* Disable all interrupt */ in sh_eth_tsu_init()
2893 sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */ in sh_eth_tsu_init()
2894 sh_eth_tsu_write(mdp, 0, TSU_POST1); /* Disable CAM entry [ 0- 7] */ in sh_eth_tsu_init()
2895 sh_eth_tsu_write(mdp, 0, TSU_POST2); /* Disable CAM entry [ 8-15] */ in sh_eth_tsu_init()
2896 sh_eth_tsu_write(mdp, 0, TSU_POST3); /* Disable CAM entry [16-23] */ in sh_eth_tsu_init()
2897 sh_eth_tsu_write(mdp, 0, TSU_POST4); /* Disable CAM entry [24-31] */ in sh_eth_tsu_init()
2901 static int sh_mdio_release(struct sh_eth_private *mdp) in sh_mdio_release() argument
2904 mdiobus_unregister(mdp->mii_bus); in sh_mdio_release()
2907 free_mdio_bitbang(mdp->mii_bus); in sh_mdio_release()
2913 static int sh_mdio_init(struct sh_eth_private *mdp, in sh_mdio_init() argument
2918 struct platform_device *pdev = mdp->pdev; in sh_mdio_init()
2919 struct device *dev = &mdp->pdev->dev; in sh_mdio_init()
2927 bitbang->addr = mdp->addr + mdp->reg_offset[PIR]; in sh_mdio_init()
2936 mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl); in sh_mdio_init()
2937 if (!mdp->mii_bus) in sh_mdio_init()
2941 mdp->mii_bus->name = "sh_mii"; in sh_mdio_init()
2942 mdp->mii_bus->parent = dev; in sh_mdio_init()
2943 snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", in sh_mdio_init()
2947 mdp->mii_bus->irq = devm_kmalloc_array(dev, PHY_MAX_ADDR, sizeof(int), in sh_mdio_init()
2949 if (!mdp->mii_bus->irq) { in sh_mdio_init()
2956 ret = of_mdiobus_register(mdp->mii_bus, dev->of_node); in sh_mdio_init()
2959 mdp->mii_bus->irq[i] = PHY_POLL; in sh_mdio_init()
2961 mdp->mii_bus->irq[pd->phy] = pd->phy_irq; in sh_mdio_init()
2963 ret = mdiobus_register(mdp->mii_bus); in sh_mdio_init()
2972 free_mdio_bitbang(mdp->mii_bus); in sh_mdio_init()
3080 struct sh_eth_private *mdp = NULL; in sh_eth_drv_probe() local
3106 mdp = netdev_priv(ndev); in sh_eth_drv_probe()
3107 mdp->num_tx_ring = TX_RING_SIZE; in sh_eth_drv_probe()
3108 mdp->num_rx_ring = RX_RING_SIZE; in sh_eth_drv_probe()
3109 mdp->addr = devm_ioremap_resource(&pdev->dev, res); in sh_eth_drv_probe()
3110 if (IS_ERR(mdp->addr)) { in sh_eth_drv_probe()
3111 ret = PTR_ERR(mdp->addr); in sh_eth_drv_probe()
3117 spin_lock_init(&mdp->lock); in sh_eth_drv_probe()
3118 mdp->pdev = pdev; in sh_eth_drv_probe()
3129 mdp->phy_id = pd->phy; in sh_eth_drv_probe()
3130 mdp->phy_interface = pd->phy_interface; in sh_eth_drv_probe()
3132 mdp->edmac_endian = pd->edmac_endian; in sh_eth_drv_probe()
3133 mdp->no_ether_link = pd->no_ether_link; in sh_eth_drv_probe()
3134 mdp->ether_link_active_low = pd->ether_link_active_low; in sh_eth_drv_probe()
3138 mdp->cd = (struct sh_eth_cpu_data *)id->driver_data; in sh_eth_drv_probe()
3144 mdp->cd = (struct sh_eth_cpu_data *)match->data; in sh_eth_drv_probe()
3146 mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type); in sh_eth_drv_probe()
3147 if (!mdp->reg_offset) { in sh_eth_drv_probe()
3149 mdp->cd->register_type); in sh_eth_drv_probe()
3153 sh_eth_set_default_cpu_data(mdp->cd); in sh_eth_drv_probe()
3156 if (mdp->cd->tsu) in sh_eth_drv_probe()
3164 mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE; in sh_eth_drv_probe()
3175 if (mdp->cd->tsu) { in sh_eth_drv_probe()
3178 mdp->tsu_addr = devm_ioremap_resource(&pdev->dev, rtsu); in sh_eth_drv_probe()
3179 if (IS_ERR(mdp->tsu_addr)) { in sh_eth_drv_probe()
3180 ret = PTR_ERR(mdp->tsu_addr); in sh_eth_drv_probe()
3183 mdp->port = devno % 2; in sh_eth_drv_probe()
3189 if (mdp->cd->chip_reset) in sh_eth_drv_probe()
3190 mdp->cd->chip_reset(ndev); in sh_eth_drv_probe()
3192 if (mdp->cd->tsu) { in sh_eth_drv_probe()
3194 sh_eth_tsu_init(mdp); in sh_eth_drv_probe()
3198 if (mdp->cd->rmiimode) in sh_eth_drv_probe()
3202 ret = sh_mdio_init(mdp, pd); in sh_eth_drv_probe()
3208 netif_napi_add(ndev, &mdp->napi, sh_eth_poll, 64); in sh_eth_drv_probe()
3225 netif_napi_del(&mdp->napi); in sh_eth_drv_probe()
3226 sh_mdio_release(mdp); in sh_eth_drv_probe()
3241 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_drv_remove() local
3244 netif_napi_del(&mdp->napi); in sh_eth_drv_remove()
3245 sh_mdio_release(mdp); in sh_eth_drv_remove()