Lines Matching refs:mdp

407 static bool sh_eth_is_gether(struct sh_eth_private *mdp)  in sh_eth_is_gether()  argument
409 return mdp->reg_offset == sh_eth_offset_gigabit; in sh_eth_is_gether()
412 static bool sh_eth_is_rz_fast_ether(struct sh_eth_private *mdp) in sh_eth_is_rz_fast_ether() argument
414 return mdp->reg_offset == sh_eth_offset_fast_rz; in sh_eth_is_rz_fast_ether()
420 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_select_mii() local
422 switch (mdp->phy_interface) { in sh_eth_select_mii()
444 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_set_duplex() local
446 if (mdp->duplex) /* Full */ in sh_eth_set_duplex()
455 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_set_rate_r8a777x() local
457 switch (mdp->speed) { in sh_eth_set_rate_r8a777x()
520 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_set_rate_sh7724() local
522 switch (mdp->speed) { in sh_eth_set_rate_sh7724()
560 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_set_rate_sh7757() local
562 switch (mdp->speed) { in sh_eth_set_rate_sh7757()
626 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_set_rate_giga() local
628 switch (mdp->speed) { in sh_eth_set_rate_giga()
676 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_chip_reset() local
679 sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR); in sh_eth_chip_reset()
685 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_set_rate_gether() local
687 switch (mdp->speed) { in sh_eth_set_rate_gether()
761 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_chip_reset_r8a7740() local
764 sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR); in sh_eth_chip_reset_r8a7740()
896 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_reset() local
899 if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp)) { in sh_eth_reset()
919 if (mdp->cd->hw_crc) in sh_eth_reset()
923 if (mdp->cd->select_mii) in sh_eth_reset()
946 static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x) in cpu_to_edmac() argument
948 switch (mdp->edmac_endian) { in cpu_to_edmac()
957 static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x) in edmac_to_cpu() argument
959 switch (mdp->edmac_endian) { in edmac_to_cpu()
999 static u32 sh_eth_get_edtrr_trns(struct sh_eth_private *mdp) in sh_eth_get_edtrr_trns() argument
1001 if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp)) in sh_eth_get_edtrr_trns()
1100 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_ring_free() local
1104 if (mdp->rx_skbuff) { in sh_eth_ring_free()
1105 for (i = 0; i < mdp->num_rx_ring; i++) in sh_eth_ring_free()
1106 dev_kfree_skb(mdp->rx_skbuff[i]); in sh_eth_ring_free()
1108 kfree(mdp->rx_skbuff); in sh_eth_ring_free()
1109 mdp->rx_skbuff = NULL; in sh_eth_ring_free()
1112 if (mdp->tx_skbuff) { in sh_eth_ring_free()
1113 for (i = 0; i < mdp->num_tx_ring; i++) in sh_eth_ring_free()
1114 dev_kfree_skb(mdp->tx_skbuff[i]); in sh_eth_ring_free()
1116 kfree(mdp->tx_skbuff); in sh_eth_ring_free()
1117 mdp->tx_skbuff = NULL; in sh_eth_ring_free()
1123 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_ring_format() local
1128 int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring; in sh_eth_ring_format()
1129 int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring; in sh_eth_ring_format()
1130 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1; in sh_eth_ring_format()
1133 mdp->cur_rx = 0; in sh_eth_ring_format()
1134 mdp->cur_tx = 0; in sh_eth_ring_format()
1135 mdp->dirty_rx = 0; in sh_eth_ring_format()
1136 mdp->dirty_tx = 0; in sh_eth_ring_format()
1138 memset(mdp->rx_ring, 0, rx_ringsize); in sh_eth_ring_format()
1141 for (i = 0; i < mdp->num_rx_ring; i++) { in sh_eth_ring_format()
1143 mdp->rx_skbuff[i] = NULL; in sh_eth_ring_format()
1150 rxdesc = &mdp->rx_ring[i]; in sh_eth_ring_format()
1152 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); in sh_eth_ring_format()
1160 mdp->rx_skbuff[i] = skb; in sh_eth_ring_format()
1162 rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP); in sh_eth_ring_format()
1166 sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR); in sh_eth_ring_format()
1167 if (sh_eth_is_gether(mdp) || in sh_eth_ring_format()
1168 sh_eth_is_rz_fast_ether(mdp)) in sh_eth_ring_format()
1169 sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR); in sh_eth_ring_format()
1173 mdp->dirty_rx = (u32) (i - mdp->num_rx_ring); in sh_eth_ring_format()
1176 rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL); in sh_eth_ring_format()
1178 memset(mdp->tx_ring, 0, tx_ringsize); in sh_eth_ring_format()
1181 for (i = 0; i < mdp->num_tx_ring; i++) { in sh_eth_ring_format()
1182 mdp->tx_skbuff[i] = NULL; in sh_eth_ring_format()
1183 txdesc = &mdp->tx_ring[i]; in sh_eth_ring_format()
1184 txdesc->status = cpu_to_edmac(mdp, TD_TFP); in sh_eth_ring_format()
1188 sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR); in sh_eth_ring_format()
1189 if (sh_eth_is_gether(mdp) || in sh_eth_ring_format()
1190 sh_eth_is_rz_fast_ether(mdp)) in sh_eth_ring_format()
1191 sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR); in sh_eth_ring_format()
1195 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE); in sh_eth_ring_format()
1201 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_ring_init() local
1209 mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ : in sh_eth_ring_init()
1211 if (mdp->cd->rpadir) in sh_eth_ring_init()
1212 mdp->rx_buf_sz += NET_IP_ALIGN; in sh_eth_ring_init()
1215 mdp->rx_skbuff = kmalloc_array(mdp->num_rx_ring, in sh_eth_ring_init()
1216 sizeof(*mdp->rx_skbuff), GFP_KERNEL); in sh_eth_ring_init()
1217 if (!mdp->rx_skbuff) { in sh_eth_ring_init()
1222 mdp->tx_skbuff = kmalloc_array(mdp->num_tx_ring, in sh_eth_ring_init()
1223 sizeof(*mdp->tx_skbuff), GFP_KERNEL); in sh_eth_ring_init()
1224 if (!mdp->tx_skbuff) { in sh_eth_ring_init()
1230 rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring; in sh_eth_ring_init()
1231 mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma, in sh_eth_ring_init()
1233 if (!mdp->rx_ring) { in sh_eth_ring_init()
1238 mdp->dirty_rx = 0; in sh_eth_ring_init()
1241 tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring; in sh_eth_ring_init()
1242 mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma, in sh_eth_ring_init()
1244 if (!mdp->tx_ring) { in sh_eth_ring_init()
1252 dma_free_coherent(NULL, rx_ringsize, mdp->rx_ring, mdp->rx_desc_dma); in sh_eth_ring_init()
1257 mdp->tx_ring = NULL; in sh_eth_ring_init()
1258 mdp->rx_ring = NULL; in sh_eth_ring_init()
1263 static void sh_eth_free_dma_buffer(struct sh_eth_private *mdp) in sh_eth_free_dma_buffer() argument
1267 if (mdp->rx_ring) { in sh_eth_free_dma_buffer()
1268 ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring; in sh_eth_free_dma_buffer()
1269 dma_free_coherent(NULL, ringsize, mdp->rx_ring, in sh_eth_free_dma_buffer()
1270 mdp->rx_desc_dma); in sh_eth_free_dma_buffer()
1271 mdp->rx_ring = NULL; in sh_eth_free_dma_buffer()
1274 if (mdp->tx_ring) { in sh_eth_free_dma_buffer()
1275 ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring; in sh_eth_free_dma_buffer()
1276 dma_free_coherent(NULL, ringsize, mdp->tx_ring, in sh_eth_free_dma_buffer()
1277 mdp->tx_desc_dma); in sh_eth_free_dma_buffer()
1278 mdp->tx_ring = NULL; in sh_eth_free_dma_buffer()
1285 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_dev_init() local
1293 if (mdp->cd->rmiimode) in sh_eth_dev_init()
1298 if (mdp->cd->rpadir) in sh_eth_dev_init()
1299 sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR); in sh_eth_dev_init()
1305 if (mdp->cd->hw_swap) in sh_eth_dev_init()
1312 sh_eth_write(ndev, mdp->cd->fdr_value, FDR); in sh_eth_dev_init()
1318 sh_eth_write(ndev, mdp->cd->trscer_err_mask, TRSCER); in sh_eth_dev_init()
1320 if (mdp->cd->bculr) in sh_eth_dev_init()
1323 sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR); in sh_eth_dev_init()
1325 if (!mdp->cd->no_trimd) in sh_eth_dev_init()
1334 mdp->irq_enabled = true; in sh_eth_dev_init()
1335 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); in sh_eth_dev_init()
1340 ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE; in sh_eth_dev_init()
1344 if (mdp->cd->set_rate) in sh_eth_dev_init()
1345 mdp->cd->set_rate(ndev); in sh_eth_dev_init()
1348 sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR); in sh_eth_dev_init()
1352 sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR); in sh_eth_dev_init()
1358 if (mdp->cd->apr) in sh_eth_dev_init()
1360 if (mdp->cd->mpr) in sh_eth_dev_init()
1362 if (mdp->cd->tpauser) in sh_eth_dev_init()
1377 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_dev_exit() local
1383 for (i = 0; i < mdp->num_tx_ring; i++) in sh_eth_dev_exit()
1384 mdp->tx_ring[i].status &= ~cpu_to_edmac(mdp, TD_TACT); in sh_eth_dev_exit()
1408 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_txfree() local
1413 for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) { in sh_eth_txfree()
1414 entry = mdp->dirty_tx % mdp->num_tx_ring; in sh_eth_txfree()
1415 txdesc = &mdp->tx_ring[entry]; in sh_eth_txfree()
1416 if (txdesc->status & cpu_to_edmac(mdp, TD_TACT)) in sh_eth_txfree()
1420 netif_info(mdp, tx_done, ndev, in sh_eth_txfree()
1422 entry, edmac_to_cpu(mdp, txdesc->status)); in sh_eth_txfree()
1424 if (mdp->tx_skbuff[entry]) { in sh_eth_txfree()
1427 dev_kfree_skb_irq(mdp->tx_skbuff[entry]); in sh_eth_txfree()
1428 mdp->tx_skbuff[entry] = NULL; in sh_eth_txfree()
1431 txdesc->status = cpu_to_edmac(mdp, TD_TFP); in sh_eth_txfree()
1432 if (entry >= mdp->num_tx_ring - 1) in sh_eth_txfree()
1433 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE); in sh_eth_txfree()
1444 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_rx() local
1447 int entry = mdp->cur_rx % mdp->num_rx_ring; in sh_eth_rx()
1448 int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx; in sh_eth_rx()
1453 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1; in sh_eth_rx()
1458 rxdesc = &mdp->rx_ring[entry]; in sh_eth_rx()
1459 while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) { in sh_eth_rx()
1462 desc_status = edmac_to_cpu(mdp, rxdesc->status); in sh_eth_rx()
1468 netif_info(mdp, rx_status, ndev, in sh_eth_rx()
1481 if (mdp->cd->shift_rd0) in sh_eth_rx()
1484 skb = mdp->rx_skbuff[entry]; in sh_eth_rx()
1501 if (!mdp->cd->hw_swap) in sh_eth_rx()
1505 mdp->rx_skbuff[entry] = NULL; in sh_eth_rx()
1506 if (mdp->cd->rpadir) in sh_eth_rx()
1509 ALIGN(mdp->rx_buf_sz, 16), in sh_eth_rx()
1519 entry = (++mdp->cur_rx) % mdp->num_rx_ring; in sh_eth_rx()
1520 rxdesc = &mdp->rx_ring[entry]; in sh_eth_rx()
1524 for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) { in sh_eth_rx()
1525 entry = mdp->dirty_rx % mdp->num_rx_ring; in sh_eth_rx()
1526 rxdesc = &mdp->rx_ring[entry]; in sh_eth_rx()
1528 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); in sh_eth_rx()
1530 if (mdp->rx_skbuff[entry] == NULL) { in sh_eth_rx()
1542 mdp->rx_skbuff[entry] = skb; in sh_eth_rx()
1548 if (entry >= mdp->num_rx_ring - 1) in sh_eth_rx()
1550 cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL); in sh_eth_rx()
1553 cpu_to_edmac(mdp, RD_RACT | RD_RFP); in sh_eth_rx()
1561 mdp->reg_offset[RDFAR] != SH_ETH_OFFSET_INVALID) { in sh_eth_rx()
1565 mdp->cur_rx = count; in sh_eth_rx()
1566 mdp->dirty_rx = count; in sh_eth_rx()
1593 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_error() local
1605 if (mdp->cd->no_psr || mdp->no_ether_link) { in sh_eth_error()
1609 if (mdp->ether_link_active_low) in sh_eth_error()
1634 netif_err(mdp, tx_err, ndev, "Transmit Abort\n"); in sh_eth_error()
1649 netif_err(mdp, tx_err, ndev, "Transmit Descriptor Empty\n"); in sh_eth_error()
1655 netif_err(mdp, tx_err, ndev, "Transmit FIFO Under flow\n"); in sh_eth_error()
1668 if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) { in sh_eth_error()
1671 netif_err(mdp, tx_err, ndev, "Address Error\n"); in sh_eth_error()
1675 if (mdp->cd->no_ade) in sh_eth_error()
1683 intr_status, mdp->cur_tx, mdp->dirty_tx, in sh_eth_error()
1689 if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) { in sh_eth_error()
1691 sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR); in sh_eth_error()
1701 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_interrupt() local
1702 struct sh_eth_cpu_data *cd = mdp->cd; in sh_eth_interrupt()
1706 spin_lock(&mdp->lock); in sh_eth_interrupt()
1722 if (!likely(mdp->irq_enabled)) { in sh_eth_interrupt()
1728 if (napi_schedule_prep(&mdp->napi)) { in sh_eth_interrupt()
1732 __napi_schedule(&mdp->napi); in sh_eth_interrupt()
1757 spin_unlock(&mdp->lock); in sh_eth_interrupt()
1764 struct sh_eth_private *mdp = container_of(napi, struct sh_eth_private, in sh_eth_poll() local
1784 if (mdp->irq_enabled) in sh_eth_poll()
1785 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); in sh_eth_poll()
1793 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_adjust_link() local
1794 struct phy_device *phydev = mdp->phydev; in sh_eth_adjust_link()
1798 if (phydev->duplex != mdp->duplex) { in sh_eth_adjust_link()
1800 mdp->duplex = phydev->duplex; in sh_eth_adjust_link()
1801 if (mdp->cd->set_duplex) in sh_eth_adjust_link()
1802 mdp->cd->set_duplex(ndev); in sh_eth_adjust_link()
1805 if (phydev->speed != mdp->speed) { in sh_eth_adjust_link()
1807 mdp->speed = phydev->speed; in sh_eth_adjust_link()
1808 if (mdp->cd->set_rate) in sh_eth_adjust_link()
1809 mdp->cd->set_rate(ndev); in sh_eth_adjust_link()
1811 if (!mdp->link) { in sh_eth_adjust_link()
1816 mdp->link = phydev->link; in sh_eth_adjust_link()
1817 if (mdp->cd->no_psr || mdp->no_ether_link) in sh_eth_adjust_link()
1820 } else if (mdp->link) { in sh_eth_adjust_link()
1822 mdp->link = 0; in sh_eth_adjust_link()
1823 mdp->speed = 0; in sh_eth_adjust_link()
1824 mdp->duplex = -1; in sh_eth_adjust_link()
1825 if (mdp->cd->no_psr || mdp->no_ether_link) in sh_eth_adjust_link()
1829 if (new_state && netif_msg_link(mdp)) in sh_eth_adjust_link()
1837 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_phy_init() local
1840 mdp->link = 0; in sh_eth_phy_init()
1841 mdp->speed = 0; in sh_eth_phy_init()
1842 mdp->duplex = -1; in sh_eth_phy_init()
1851 mdp->phy_interface); in sh_eth_phy_init()
1859 mdp->mii_bus->id, mdp->phy_id); in sh_eth_phy_init()
1862 mdp->phy_interface); in sh_eth_phy_init()
1873 mdp->phydev = phydev; in sh_eth_phy_init()
1881 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_phy_start() local
1888 phy_start(mdp->phydev); in sh_eth_phy_start()
1896 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_get_settings() local
1900 if (!mdp->phydev) in sh_eth_get_settings()
1903 spin_lock_irqsave(&mdp->lock, flags); in sh_eth_get_settings()
1904 ret = phy_ethtool_gset(mdp->phydev, ecmd); in sh_eth_get_settings()
1905 spin_unlock_irqrestore(&mdp->lock, flags); in sh_eth_get_settings()
1913 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_set_settings() local
1917 if (!mdp->phydev) in sh_eth_set_settings()
1920 spin_lock_irqsave(&mdp->lock, flags); in sh_eth_set_settings()
1925 ret = phy_ethtool_sset(mdp->phydev, ecmd); in sh_eth_set_settings()
1930 mdp->duplex = 1; in sh_eth_set_settings()
1932 mdp->duplex = 0; in sh_eth_set_settings()
1934 if (mdp->cd->set_duplex) in sh_eth_set_settings()
1935 mdp->cd->set_duplex(ndev); in sh_eth_set_settings()
1943 spin_unlock_irqrestore(&mdp->lock, flags); in sh_eth_set_settings()
1958 struct sh_eth_private *mdp = netdev_priv(ndev); in __sh_eth_get_regs() local
1959 struct sh_eth_cpu_data *cd = mdp->cd; in __sh_eth_get_regs()
1983 if (mdp->reg_offset[reg] != SH_ETH_OFFSET_INVALID) { \ in __sh_eth_get_regs()
1992 #define add_tsu_reg(reg) add_reg_from(reg, sh_eth_tsu_read(mdp, reg)) in __sh_eth_get_regs()
2091 if (mdp->reg_offset[TSU_ADRH0] != SH_ETH_OFFSET_INVALID) { in __sh_eth_get_regs()
2101 mdp->tsu_addr + in __sh_eth_get_regs()
2102 mdp->reg_offset[TSU_ADRH0] + in __sh_eth_get_regs()
2125 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_get_regs() local
2129 pm_runtime_get_sync(&mdp->pdev->dev); in sh_eth_get_regs()
2131 pm_runtime_put_sync(&mdp->pdev->dev); in sh_eth_get_regs()
2136 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_nway_reset() local
2140 if (!mdp->phydev) in sh_eth_nway_reset()
2143 spin_lock_irqsave(&mdp->lock, flags); in sh_eth_nway_reset()
2144 ret = phy_start_aneg(mdp->phydev); in sh_eth_nway_reset()
2145 spin_unlock_irqrestore(&mdp->lock, flags); in sh_eth_nway_reset()
2152 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_get_msglevel() local
2153 return mdp->msg_enable; in sh_eth_get_msglevel()
2158 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_set_msglevel() local
2159 mdp->msg_enable = value; in sh_eth_set_msglevel()
2181 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_get_ethtool_stats() local
2185 data[i++] = mdp->cur_rx; in sh_eth_get_ethtool_stats()
2186 data[i++] = mdp->cur_tx; in sh_eth_get_ethtool_stats()
2187 data[i++] = mdp->dirty_rx; in sh_eth_get_ethtool_stats()
2188 data[i++] = mdp->dirty_tx; in sh_eth_get_ethtool_stats()
2204 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_get_ringparam() local
2208 ring->rx_pending = mdp->num_rx_ring; in sh_eth_get_ringparam()
2209 ring->tx_pending = mdp->num_tx_ring; in sh_eth_get_ringparam()
2215 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_set_ringparam() local
2235 mdp->irq_enabled = false; in sh_eth_set_ringparam()
2237 napi_synchronize(&mdp->napi); in sh_eth_set_ringparam()
2245 sh_eth_free_dma_buffer(mdp); in sh_eth_set_ringparam()
2249 mdp->num_rx_ring = ring->rx_pending; in sh_eth_set_ringparam()
2250 mdp->num_tx_ring = ring->tx_pending; in sh_eth_set_ringparam()
2266 mdp->irq_enabled = true; in sh_eth_set_ringparam()
2267 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); in sh_eth_set_ringparam()
2296 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_open() local
2298 pm_runtime_get_sync(&mdp->pdev->dev); in sh_eth_open()
2300 napi_enable(&mdp->napi); in sh_eth_open()
2303 mdp->cd->irq_flags, ndev->name, ndev); in sh_eth_open()
2324 mdp->is_opened = 1; in sh_eth_open()
2331 napi_disable(&mdp->napi); in sh_eth_open()
2332 pm_runtime_put_sync(&mdp->pdev->dev); in sh_eth_open()
2339 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_tx_timeout() local
2345 netif_err(mdp, timer, ndev, in sh_eth_tx_timeout()
2353 for (i = 0; i < mdp->num_rx_ring; i++) { in sh_eth_tx_timeout()
2354 rxdesc = &mdp->rx_ring[i]; in sh_eth_tx_timeout()
2357 dev_kfree_skb(mdp->rx_skbuff[i]); in sh_eth_tx_timeout()
2358 mdp->rx_skbuff[i] = NULL; in sh_eth_tx_timeout()
2360 for (i = 0; i < mdp->num_tx_ring; i++) { in sh_eth_tx_timeout()
2361 dev_kfree_skb(mdp->tx_skbuff[i]); in sh_eth_tx_timeout()
2362 mdp->tx_skbuff[i] = NULL; in sh_eth_tx_timeout()
2372 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_start_xmit() local
2377 spin_lock_irqsave(&mdp->lock, flags); in sh_eth_start_xmit()
2378 if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) { in sh_eth_start_xmit()
2380 netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n"); in sh_eth_start_xmit()
2382 spin_unlock_irqrestore(&mdp->lock, flags); in sh_eth_start_xmit()
2386 spin_unlock_irqrestore(&mdp->lock, flags); in sh_eth_start_xmit()
2391 entry = mdp->cur_tx % mdp->num_tx_ring; in sh_eth_start_xmit()
2392 mdp->tx_skbuff[entry] = skb; in sh_eth_start_xmit()
2393 txdesc = &mdp->tx_ring[entry]; in sh_eth_start_xmit()
2395 if (!mdp->cd->hw_swap) in sh_eth_start_xmit()
2407 if (entry >= mdp->num_tx_ring - 1) in sh_eth_start_xmit()
2408 txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE); in sh_eth_start_xmit()
2410 txdesc->status |= cpu_to_edmac(mdp, TD_TACT); in sh_eth_start_xmit()
2412 mdp->cur_tx++; in sh_eth_start_xmit()
2414 if (!(sh_eth_read(ndev, EDTRR) & sh_eth_get_edtrr_trns(mdp))) in sh_eth_start_xmit()
2415 sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR); in sh_eth_start_xmit()
2438 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_get_stats() local
2440 if (sh_eth_is_rz_fast_ether(mdp)) in sh_eth_get_stats()
2443 if (!mdp->is_opened) in sh_eth_get_stats()
2450 if (sh_eth_is_gether(mdp)) { in sh_eth_get_stats()
2466 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_close() local
2474 mdp->irq_enabled = false; in sh_eth_close()
2476 napi_disable(&mdp->napi); in sh_eth_close()
2482 if (mdp->phydev) { in sh_eth_close()
2483 phy_stop(mdp->phydev); in sh_eth_close()
2484 phy_disconnect(mdp->phydev); in sh_eth_close()
2485 mdp->phydev = NULL; in sh_eth_close()
2494 sh_eth_free_dma_buffer(mdp); in sh_eth_close()
2496 pm_runtime_put_sync(&mdp->pdev->dev); in sh_eth_close()
2498 mdp->is_opened = 0; in sh_eth_close()
2506 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_do_ioctl() local
2507 struct phy_device *phydev = mdp->phydev; in sh_eth_do_ioctl()
2519 static void *sh_eth_tsu_get_post_reg_offset(struct sh_eth_private *mdp, in sh_eth_tsu_get_post_reg_offset() argument
2522 return sh_eth_tsu_get_offset(mdp, TSU_POST1) + (entry / 8 * 4); in sh_eth_tsu_get_post_reg_offset()
2530 static u32 sh_eth_tsu_get_post_bit(struct sh_eth_private *mdp, int entry) in sh_eth_tsu_get_post_bit() argument
2532 return (0x08 >> (mdp->port << 1)) << (28 - ((entry % 8) * 4)); in sh_eth_tsu_get_post_bit()
2538 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_tsu_enable_cam_entry_post() local
2542 reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry); in sh_eth_tsu_enable_cam_entry_post()
2544 iowrite32(tmp | sh_eth_tsu_get_post_bit(mdp, entry), reg_offset); in sh_eth_tsu_enable_cam_entry_post()
2550 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_tsu_disable_cam_entry_post() local
2554 reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry); in sh_eth_tsu_disable_cam_entry_post()
2556 ref_mask = sh_eth_tsu_get_post_bit(mdp, entry) & ~post_mask; in sh_eth_tsu_disable_cam_entry_post()
2568 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_tsu_busy() local
2570 while ((sh_eth_tsu_read(mdp, TSU_ADSBSY) & TSU_ADSBSY_0)) { in sh_eth_tsu_busy()
2617 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_tsu_find_entry() local
2618 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); in sh_eth_tsu_find_entry()
2644 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_tsu_disable_cam_entry_table() local
2645 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); in sh_eth_tsu_disable_cam_entry_table()
2649 sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) & in sh_eth_tsu_disable_cam_entry_table()
2661 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_tsu_add_entry() local
2662 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); in sh_eth_tsu_add_entry()
2665 if (!mdp->cd->tsu) in sh_eth_tsu_add_entry()
2679 sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) | in sh_eth_tsu_add_entry()
2691 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_tsu_del_entry() local
2694 if (!mdp->cd->tsu) in sh_eth_tsu_del_entry()
2714 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_tsu_purge_all() local
2717 if (!mdp->cd->tsu) in sh_eth_tsu_purge_all()
2735 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_tsu_purge_mcast() local
2737 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); in sh_eth_tsu_purge_mcast()
2740 if (!mdp->cd->tsu) in sh_eth_tsu_purge_mcast()
2753 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_set_rx_mode() local
2758 spin_lock_irqsave(&mdp->lock, flags); in sh_eth_set_rx_mode()
2763 if (mdp->cd->tsu) in sh_eth_set_rx_mode()
2779 } else if (mdp->cd->tsu) { in sh_eth_set_rx_mode()
2798 spin_unlock_irqrestore(&mdp->lock, flags); in sh_eth_set_rx_mode()
2801 static int sh_eth_get_vtag_index(struct sh_eth_private *mdp) in sh_eth_get_vtag_index() argument
2803 if (!mdp->port) in sh_eth_get_vtag_index()
2812 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_vlan_rx_add_vid() local
2813 int vtag_reg_index = sh_eth_get_vtag_index(mdp); in sh_eth_vlan_rx_add_vid()
2815 if (unlikely(!mdp->cd->tsu)) in sh_eth_vlan_rx_add_vid()
2822 mdp->vlan_num_ids++; in sh_eth_vlan_rx_add_vid()
2827 if (mdp->vlan_num_ids > 1) { in sh_eth_vlan_rx_add_vid()
2829 sh_eth_tsu_write(mdp, 0, vtag_reg_index); in sh_eth_vlan_rx_add_vid()
2833 sh_eth_tsu_write(mdp, TSU_VTAG_ENABLE | (vid & TSU_VTAG_VID_MASK), in sh_eth_vlan_rx_add_vid()
2842 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_vlan_rx_kill_vid() local
2843 int vtag_reg_index = sh_eth_get_vtag_index(mdp); in sh_eth_vlan_rx_kill_vid()
2845 if (unlikely(!mdp->cd->tsu)) in sh_eth_vlan_rx_kill_vid()
2852 mdp->vlan_num_ids--; in sh_eth_vlan_rx_kill_vid()
2853 sh_eth_tsu_write(mdp, 0, vtag_reg_index); in sh_eth_vlan_rx_kill_vid()
2859 static void sh_eth_tsu_init(struct sh_eth_private *mdp) in sh_eth_tsu_init() argument
2861 if (sh_eth_is_rz_fast_ether(mdp)) { in sh_eth_tsu_init()
2862 sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */ in sh_eth_tsu_init()
2866 sh_eth_tsu_write(mdp, 0, TSU_FWEN0); /* Disable forward(0->1) */ in sh_eth_tsu_init()
2867 sh_eth_tsu_write(mdp, 0, TSU_FWEN1); /* Disable forward(1->0) */ in sh_eth_tsu_init()
2868 sh_eth_tsu_write(mdp, 0, TSU_FCM); /* forward fifo 3k-3k */ in sh_eth_tsu_init()
2869 sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0); in sh_eth_tsu_init()
2870 sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1); in sh_eth_tsu_init()
2871 sh_eth_tsu_write(mdp, 0, TSU_PRISL0); in sh_eth_tsu_init()
2872 sh_eth_tsu_write(mdp, 0, TSU_PRISL1); in sh_eth_tsu_init()
2873 sh_eth_tsu_write(mdp, 0, TSU_FWSL0); in sh_eth_tsu_init()
2874 sh_eth_tsu_write(mdp, 0, TSU_FWSL1); in sh_eth_tsu_init()
2875 sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC); in sh_eth_tsu_init()
2876 if (sh_eth_is_gether(mdp)) { in sh_eth_tsu_init()
2877 sh_eth_tsu_write(mdp, 0, TSU_QTAG0); /* Disable QTAG(0->1) */ in sh_eth_tsu_init()
2878 sh_eth_tsu_write(mdp, 0, TSU_QTAG1); /* Disable QTAG(1->0) */ in sh_eth_tsu_init()
2880 sh_eth_tsu_write(mdp, 0, TSU_QTAGM0); /* Disable QTAG(0->1) */ in sh_eth_tsu_init()
2881 sh_eth_tsu_write(mdp, 0, TSU_QTAGM1); /* Disable QTAG(1->0) */ in sh_eth_tsu_init()
2883 sh_eth_tsu_write(mdp, 0, TSU_FWSR); /* all interrupt status clear */ in sh_eth_tsu_init()
2884 sh_eth_tsu_write(mdp, 0, TSU_FWINMK); /* Disable all interrupt */ in sh_eth_tsu_init()
2885 sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */ in sh_eth_tsu_init()
2886 sh_eth_tsu_write(mdp, 0, TSU_POST1); /* Disable CAM entry [ 0- 7] */ in sh_eth_tsu_init()
2887 sh_eth_tsu_write(mdp, 0, TSU_POST2); /* Disable CAM entry [ 8-15] */ in sh_eth_tsu_init()
2888 sh_eth_tsu_write(mdp, 0, TSU_POST3); /* Disable CAM entry [16-23] */ in sh_eth_tsu_init()
2889 sh_eth_tsu_write(mdp, 0, TSU_POST4); /* Disable CAM entry [24-31] */ in sh_eth_tsu_init()
2893 static int sh_mdio_release(struct sh_eth_private *mdp) in sh_mdio_release() argument
2896 mdiobus_unregister(mdp->mii_bus); in sh_mdio_release()
2899 free_mdio_bitbang(mdp->mii_bus); in sh_mdio_release()
2905 static int sh_mdio_init(struct sh_eth_private *mdp, in sh_mdio_init() argument
2910 struct platform_device *pdev = mdp->pdev; in sh_mdio_init()
2911 struct device *dev = &mdp->pdev->dev; in sh_mdio_init()
2919 bitbang->addr = mdp->addr + mdp->reg_offset[PIR]; in sh_mdio_init()
2928 mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl); in sh_mdio_init()
2929 if (!mdp->mii_bus) in sh_mdio_init()
2933 mdp->mii_bus->name = "sh_mii"; in sh_mdio_init()
2934 mdp->mii_bus->parent = dev; in sh_mdio_init()
2935 snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", in sh_mdio_init()
2939 mdp->mii_bus->irq = devm_kmalloc_array(dev, PHY_MAX_ADDR, sizeof(int), in sh_mdio_init()
2941 if (!mdp->mii_bus->irq) { in sh_mdio_init()
2948 ret = of_mdiobus_register(mdp->mii_bus, dev->of_node); in sh_mdio_init()
2951 mdp->mii_bus->irq[i] = PHY_POLL; in sh_mdio_init()
2953 mdp->mii_bus->irq[pd->phy] = pd->phy_irq; in sh_mdio_init()
2955 ret = mdiobus_register(mdp->mii_bus); in sh_mdio_init()
2964 free_mdio_bitbang(mdp->mii_bus); in sh_mdio_init()
3072 struct sh_eth_private *mdp = NULL; in sh_eth_drv_probe() local
3100 mdp = netdev_priv(ndev); in sh_eth_drv_probe()
3101 mdp->num_tx_ring = TX_RING_SIZE; in sh_eth_drv_probe()
3102 mdp->num_rx_ring = RX_RING_SIZE; in sh_eth_drv_probe()
3103 mdp->addr = devm_ioremap_resource(&pdev->dev, res); in sh_eth_drv_probe()
3104 if (IS_ERR(mdp->addr)) { in sh_eth_drv_probe()
3105 ret = PTR_ERR(mdp->addr); in sh_eth_drv_probe()
3111 spin_lock_init(&mdp->lock); in sh_eth_drv_probe()
3112 mdp->pdev = pdev; in sh_eth_drv_probe()
3123 mdp->phy_id = pd->phy; in sh_eth_drv_probe()
3124 mdp->phy_interface = pd->phy_interface; in sh_eth_drv_probe()
3126 mdp->edmac_endian = pd->edmac_endian; in sh_eth_drv_probe()
3127 mdp->no_ether_link = pd->no_ether_link; in sh_eth_drv_probe()
3128 mdp->ether_link_active_low = pd->ether_link_active_low; in sh_eth_drv_probe()
3132 mdp->cd = (struct sh_eth_cpu_data *)id->driver_data; in sh_eth_drv_probe()
3138 mdp->cd = (struct sh_eth_cpu_data *)match->data; in sh_eth_drv_probe()
3140 mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type); in sh_eth_drv_probe()
3141 if (!mdp->reg_offset) { in sh_eth_drv_probe()
3143 mdp->cd->register_type); in sh_eth_drv_probe()
3147 sh_eth_set_default_cpu_data(mdp->cd); in sh_eth_drv_probe()
3150 if (mdp->cd->tsu) in sh_eth_drv_probe()
3158 mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE; in sh_eth_drv_probe()
3169 if (mdp->cd->tsu) { in sh_eth_drv_probe()
3172 mdp->tsu_addr = devm_ioremap_resource(&pdev->dev, rtsu); in sh_eth_drv_probe()
3173 if (IS_ERR(mdp->tsu_addr)) { in sh_eth_drv_probe()
3174 ret = PTR_ERR(mdp->tsu_addr); in sh_eth_drv_probe()
3177 mdp->port = devno % 2; in sh_eth_drv_probe()
3183 if (mdp->cd->chip_reset) in sh_eth_drv_probe()
3184 mdp->cd->chip_reset(ndev); in sh_eth_drv_probe()
3186 if (mdp->cd->tsu) { in sh_eth_drv_probe()
3188 sh_eth_tsu_init(mdp); in sh_eth_drv_probe()
3192 if (mdp->cd->rmiimode) in sh_eth_drv_probe()
3196 ret = sh_mdio_init(mdp, pd); in sh_eth_drv_probe()
3202 netif_napi_add(ndev, &mdp->napi, sh_eth_poll, 64); in sh_eth_drv_probe()
3219 netif_napi_del(&mdp->napi); in sh_eth_drv_probe()
3220 sh_mdio_release(mdp); in sh_eth_drv_probe()
3235 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_drv_remove() local
3238 netif_napi_del(&mdp->napi); in sh_eth_drv_remove()
3239 sh_mdio_release(mdp); in sh_eth_drv_remove()