Lines Matching refs:mp
434 static inline u32 rdl(struct mv643xx_eth_private *mp, int offset) in rdl() argument
436 return readl(mp->shared->base + offset); in rdl()
439 static inline u32 rdlp(struct mv643xx_eth_private *mp, int offset) in rdlp() argument
441 return readl(mp->base + offset); in rdlp()
444 static inline void wrl(struct mv643xx_eth_private *mp, int offset, u32 data) in wrl() argument
446 writel(data, mp->shared->base + offset); in wrl()
449 static inline void wrlp(struct mv643xx_eth_private *mp, int offset, u32 data) in wrlp() argument
451 writel(data, mp->base + offset); in wrlp()
468 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); in rxq_enable() local
469 wrlp(mp, RXQ_COMMAND, 1 << rxq->index); in rxq_enable()
474 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); in rxq_disable() local
477 wrlp(mp, RXQ_COMMAND, mask << 8); in rxq_disable()
478 while (rdlp(mp, RXQ_COMMAND) & mask) in rxq_disable()
484 struct mv643xx_eth_private *mp = txq_to_mp(txq); in txq_reset_hw_ptr() local
489 wrlp(mp, TXQ_CURRENT_DESC_PTR(txq->index), addr); in txq_reset_hw_ptr()
494 struct mv643xx_eth_private *mp = txq_to_mp(txq); in txq_enable() local
495 wrlp(mp, TXQ_COMMAND, 1 << txq->index); in txq_enable()
500 struct mv643xx_eth_private *mp = txq_to_mp(txq); in txq_disable() local
503 wrlp(mp, TXQ_COMMAND, mask << 8); in txq_disable()
504 while (rdlp(mp, TXQ_COMMAND) & mask) in txq_disable()
510 struct mv643xx_eth_private *mp = txq_to_mp(txq); in txq_maybe_wake() local
511 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); in txq_maybe_wake()
523 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); in rxq_process() local
524 struct net_device_stats *stats = &mp->dev->stats; in rxq_process()
548 dma_unmap_single(mp->dev->dev.parent, rx_desc->buf_ptr, in rxq_process()
553 mp->work_rx_refill |= 1 << rxq->index; in rxq_process()
585 skb->protocol = eth_type_trans(skb, mp->dev); in rxq_process()
587 napi_gro_receive(&mp->napi, skb); in rxq_process()
597 netdev_err(mp->dev, in rxq_process()
608 mp->work_rx &= ~(1 << rxq->index); in rxq_process()
615 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); in rxq_refill() local
625 skb = netdev_alloc_skb(mp->dev, mp->skb_size); in rxq_refill()
628 mp->oom = 1; in rxq_refill()
645 rx_desc->buf_ptr = dma_map_single(mp->dev->dev.parent, in rxq_refill()
663 mp->work_rx_refill &= ~(1 << rxq->index); in rxq_refill()
690 static int skb_tx_csum(struct mv643xx_eth_private *mp, struct sk_buff *skb, in skb_tx_csum() argument
706 if (length - hdr_len > mp->shared->tx_csum_limit || in skb_tx_csum()
784 struct mv643xx_eth_private *mp = txq_to_mp(txq); in txq_put_hdr_tso() local
795 ret = skb_tx_csum(mp, skb, &l4i_chk, &cmd_csum, length); in txq_put_hdr_tso()
817 struct mv643xx_eth_private *mp = txq_to_mp(txq); in txq_submit_tso() local
864 mp->work_tx_end &= ~(1 << txq->index); in txq_submit_tso()
880 struct mv643xx_eth_private *mp = txq_to_mp(txq); in txq_submit_frag_skb() local
910 desc->buf_ptr = skb_frag_dma_map(mp->dev->dev.parent, in txq_submit_frag_skb()
919 struct mv643xx_eth_private *mp = txq_to_mp(txq); in txq_submit_skb() local
936 ret = skb_tx_csum(mp, skb, &l4i_chk, &cmd_sts, skb->len); in txq_submit_skb()
957 desc->buf_ptr = dma_map_single(mp->dev->dev.parent, skb->data, in txq_submit_skb()
969 mp->work_tx_end &= ~(1 << txq->index); in txq_submit_skb()
982 struct mv643xx_eth_private *mp = netdev_priv(dev); in mv643xx_eth_xmit() local
988 txq = mp->txq + queue; in mv643xx_eth_xmit()
1021 struct mv643xx_eth_private *mp = txq_to_mp(txq); in txq_kick() local
1022 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); in txq_kick()
1028 if (rdlp(mp, TXQ_COMMAND) & (1 << txq->index)) in txq_kick()
1031 hw_desc_ptr = rdlp(mp, TXQ_CURRENT_DESC_PTR(txq->index)); in txq_kick()
1041 mp->work_tx_end &= ~(1 << txq->index); in txq_kick()
1046 struct mv643xx_eth_private *mp = txq_to_mp(txq); in txq_reclaim() local
1047 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); in txq_reclaim()
1081 dma_unmap_page(mp->dev->dev.parent, in txq_reclaim()
1086 dma_unmap_single(mp->dev->dev.parent, in txq_reclaim()
1100 netdev_info(mp->dev, "tx error\n"); in txq_reclaim()
1101 mp->dev->stats.tx_errors++; in txq_reclaim()
1109 mp->work_tx &= ~(1 << txq->index); in txq_reclaim()
1120 static void tx_set_rate(struct mv643xx_eth_private *mp, int rate, int burst) in tx_set_rate() argument
1126 token_rate = ((rate / 1000) * 64) / (mp->t_clk / 1000); in tx_set_rate()
1130 mtu = (mp->dev->mtu + 255) >> 8; in tx_set_rate()
1138 switch (mp->shared->tx_bw_control) { in tx_set_rate()
1140 wrlp(mp, TX_BW_RATE, token_rate); in tx_set_rate()
1141 wrlp(mp, TX_BW_MTU, mtu); in tx_set_rate()
1142 wrlp(mp, TX_BW_BURST, bucket_size); in tx_set_rate()
1145 wrlp(mp, TX_BW_RATE_MOVED, token_rate); in tx_set_rate()
1146 wrlp(mp, TX_BW_MTU_MOVED, mtu); in tx_set_rate()
1147 wrlp(mp, TX_BW_BURST_MOVED, bucket_size); in tx_set_rate()
1154 struct mv643xx_eth_private *mp = txq_to_mp(txq); in txq_set_rate() local
1158 token_rate = ((rate / 1000) * 64) / (mp->t_clk / 1000); in txq_set_rate()
1166 wrlp(mp, TXQ_BW_TOKENS(txq->index), token_rate << 14); in txq_set_rate()
1167 wrlp(mp, TXQ_BW_CONF(txq->index), (bucket_size << 10) | token_rate); in txq_set_rate()
1172 struct mv643xx_eth_private *mp = txq_to_mp(txq); in txq_set_fixed_prio_mode() local
1180 switch (mp->shared->tx_bw_control) { in txq_set_fixed_prio_mode()
1190 val = rdlp(mp, off); in txq_set_fixed_prio_mode()
1192 wrlp(mp, off, val); in txq_set_fixed_prio_mode()
1200 struct mv643xx_eth_private *mp = netdev_priv(dev); in mv643xx_eth_adjust_link() local
1201 u32 pscr = rdlp(mp, PORT_SERIAL_CONTROL); in mv643xx_eth_adjust_link()
1207 if (mp->phy->autoneg == AUTONEG_ENABLE) { in mv643xx_eth_adjust_link()
1215 if (mp->phy->speed == SPEED_1000) { in mv643xx_eth_adjust_link()
1224 if (mp->phy->speed == SPEED_100) in mv643xx_eth_adjust_link()
1229 if (mp->phy->duplex == DUPLEX_FULL) in mv643xx_eth_adjust_link()
1235 wrlp(mp, PORT_SERIAL_CONTROL, pscr); in mv643xx_eth_adjust_link()
1241 struct mv643xx_eth_private *mp = netdev_priv(dev); in mv643xx_eth_get_stats() local
1248 for (i = 0; i < mp->txq_count; i++) { in mv643xx_eth_get_stats()
1249 struct tx_queue *txq = mp->txq + i; in mv643xx_eth_get_stats()
1263 static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset) in mib_read() argument
1265 return rdl(mp, MIB_COUNTERS(mp->port_num) + offset); in mib_read()
1268 static void mib_counters_clear(struct mv643xx_eth_private *mp) in mib_counters_clear() argument
1273 mib_read(mp, i); in mib_counters_clear()
1276 rdlp(mp, RX_DISCARD_FRAME_CNT); in mib_counters_clear()
1277 rdlp(mp, RX_OVERRUN_FRAME_CNT); in mib_counters_clear()
1280 static void mib_counters_update(struct mv643xx_eth_private *mp) in mib_counters_update() argument
1282 struct mib_counters *p = &mp->mib_counters; in mib_counters_update()
1284 spin_lock_bh(&mp->mib_counters_lock); in mib_counters_update()
1285 p->good_octets_received += mib_read(mp, 0x00); in mib_counters_update()
1286 p->bad_octets_received += mib_read(mp, 0x08); in mib_counters_update()
1287 p->internal_mac_transmit_err += mib_read(mp, 0x0c); in mib_counters_update()
1288 p->good_frames_received += mib_read(mp, 0x10); in mib_counters_update()
1289 p->bad_frames_received += mib_read(mp, 0x14); in mib_counters_update()
1290 p->broadcast_frames_received += mib_read(mp, 0x18); in mib_counters_update()
1291 p->multicast_frames_received += mib_read(mp, 0x1c); in mib_counters_update()
1292 p->frames_64_octets += mib_read(mp, 0x20); in mib_counters_update()
1293 p->frames_65_to_127_octets += mib_read(mp, 0x24); in mib_counters_update()
1294 p->frames_128_to_255_octets += mib_read(mp, 0x28); in mib_counters_update()
1295 p->frames_256_to_511_octets += mib_read(mp, 0x2c); in mib_counters_update()
1296 p->frames_512_to_1023_octets += mib_read(mp, 0x30); in mib_counters_update()
1297 p->frames_1024_to_max_octets += mib_read(mp, 0x34); in mib_counters_update()
1298 p->good_octets_sent += mib_read(mp, 0x38); in mib_counters_update()
1299 p->good_frames_sent += mib_read(mp, 0x40); in mib_counters_update()
1300 p->excessive_collision += mib_read(mp, 0x44); in mib_counters_update()
1301 p->multicast_frames_sent += mib_read(mp, 0x48); in mib_counters_update()
1302 p->broadcast_frames_sent += mib_read(mp, 0x4c); in mib_counters_update()
1303 p->unrec_mac_control_received += mib_read(mp, 0x50); in mib_counters_update()
1304 p->fc_sent += mib_read(mp, 0x54); in mib_counters_update()
1305 p->good_fc_received += mib_read(mp, 0x58); in mib_counters_update()
1306 p->bad_fc_received += mib_read(mp, 0x5c); in mib_counters_update()
1307 p->undersize_received += mib_read(mp, 0x60); in mib_counters_update()
1308 p->fragments_received += mib_read(mp, 0x64); in mib_counters_update()
1309 p->oversize_received += mib_read(mp, 0x68); in mib_counters_update()
1310 p->jabber_received += mib_read(mp, 0x6c); in mib_counters_update()
1311 p->mac_receive_error += mib_read(mp, 0x70); in mib_counters_update()
1312 p->bad_crc_event += mib_read(mp, 0x74); in mib_counters_update()
1313 p->collision += mib_read(mp, 0x78); in mib_counters_update()
1314 p->late_collision += mib_read(mp, 0x7c); in mib_counters_update()
1316 p->rx_discard += rdlp(mp, RX_DISCARD_FRAME_CNT); in mib_counters_update()
1317 p->rx_overrun += rdlp(mp, RX_OVERRUN_FRAME_CNT); in mib_counters_update()
1318 spin_unlock_bh(&mp->mib_counters_lock); in mib_counters_update()
1323 struct mv643xx_eth_private *mp = (void *)_mp; in mib_counters_timer_wrapper() local
1324 mib_counters_update(mp); in mib_counters_timer_wrapper()
1325 mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ); in mib_counters_timer_wrapper()
1341 static unsigned int get_rx_coal(struct mv643xx_eth_private *mp) in get_rx_coal() argument
1343 u32 val = rdlp(mp, SDMA_CONFIG); in get_rx_coal()
1346 if (mp->shared->extended_rx_coal_limit) in get_rx_coal()
1352 do_div(temp, mp->t_clk); in get_rx_coal()
1357 static void set_rx_coal(struct mv643xx_eth_private *mp, unsigned int usec) in set_rx_coal() argument
1362 temp = (u64)usec * mp->t_clk; in set_rx_coal()
1366 val = rdlp(mp, SDMA_CONFIG); in set_rx_coal()
1367 if (mp->shared->extended_rx_coal_limit) { in set_rx_coal()
1379 wrlp(mp, SDMA_CONFIG, val); in set_rx_coal()
1382 static unsigned int get_tx_coal(struct mv643xx_eth_private *mp) in get_tx_coal() argument
1386 temp = (rdlp(mp, TX_FIFO_URGENT_THRESHOLD) & 0x3fff0) >> 4; in get_tx_coal()
1388 do_div(temp, mp->t_clk); in get_tx_coal()
1393 static void set_tx_coal(struct mv643xx_eth_private *mp, unsigned int usec) in set_tx_coal() argument
1397 temp = (u64)usec * mp->t_clk; in set_tx_coal()
1404 wrlp(mp, TX_FIFO_URGENT_THRESHOLD, temp << 4); in set_tx_coal()
1468 mv643xx_eth_get_settings_phy(struct mv643xx_eth_private *mp, in mv643xx_eth_get_settings_phy() argument
1473 err = phy_read_status(mp->phy); in mv643xx_eth_get_settings_phy()
1475 err = phy_ethtool_gset(mp->phy, cmd); in mv643xx_eth_get_settings_phy()
1487 mv643xx_eth_get_settings_phyless(struct mv643xx_eth_private *mp, in mv643xx_eth_get_settings_phyless() argument
1492 port_status = rdlp(mp, PORT_STATUS); in mv643xx_eth_get_settings_phyless()
1524 struct mv643xx_eth_private *mp = netdev_priv(dev); in mv643xx_eth_get_wol() local
1527 if (mp->phy) in mv643xx_eth_get_wol()
1528 phy_ethtool_get_wol(mp->phy, wol); in mv643xx_eth_get_wol()
1534 struct mv643xx_eth_private *mp = netdev_priv(dev); in mv643xx_eth_set_wol() local
1537 if (mp->phy == NULL) in mv643xx_eth_set_wol()
1540 err = phy_ethtool_set_wol(mp->phy, wol); in mv643xx_eth_set_wol()
1552 struct mv643xx_eth_private *mp = netdev_priv(dev); in mv643xx_eth_get_settings() local
1554 if (mp->phy != NULL) in mv643xx_eth_get_settings()
1555 return mv643xx_eth_get_settings_phy(mp, cmd); in mv643xx_eth_get_settings()
1557 return mv643xx_eth_get_settings_phyless(mp, cmd); in mv643xx_eth_get_settings()
1563 struct mv643xx_eth_private *mp = netdev_priv(dev); in mv643xx_eth_set_settings() local
1566 if (mp->phy == NULL) in mv643xx_eth_set_settings()
1574 ret = phy_ethtool_sset(mp->phy, cmd); in mv643xx_eth_set_settings()
1594 struct mv643xx_eth_private *mp = netdev_priv(dev); in mv643xx_eth_nway_reset() local
1596 if (mp->phy == NULL) in mv643xx_eth_nway_reset()
1599 return genphy_restart_aneg(mp->phy); in mv643xx_eth_nway_reset()
1605 struct mv643xx_eth_private *mp = netdev_priv(dev); in mv643xx_eth_get_coalesce() local
1607 ec->rx_coalesce_usecs = get_rx_coal(mp); in mv643xx_eth_get_coalesce()
1608 ec->tx_coalesce_usecs = get_tx_coal(mp); in mv643xx_eth_get_coalesce()
1616 struct mv643xx_eth_private *mp = netdev_priv(dev); in mv643xx_eth_set_coalesce() local
1618 set_rx_coal(mp, ec->rx_coalesce_usecs); in mv643xx_eth_set_coalesce()
1619 set_tx_coal(mp, ec->tx_coalesce_usecs); in mv643xx_eth_set_coalesce()
1627 struct mv643xx_eth_private *mp = netdev_priv(dev); in mv643xx_eth_get_ringparam() local
1632 er->rx_pending = mp->rx_ring_size; in mv643xx_eth_get_ringparam()
1633 er->tx_pending = mp->tx_ring_size; in mv643xx_eth_get_ringparam()
1639 struct mv643xx_eth_private *mp = netdev_priv(dev); in mv643xx_eth_set_ringparam() local
1644 mp->rx_ring_size = er->rx_pending < 4096 ? er->rx_pending : 4096; in mv643xx_eth_set_ringparam()
1645 mp->tx_ring_size = clamp_t(unsigned int, er->tx_pending, in mv643xx_eth_set_ringparam()
1647 if (mp->tx_ring_size != er->tx_pending) in mv643xx_eth_set_ringparam()
1649 mp->tx_ring_size, er->tx_pending); in mv643xx_eth_set_ringparam()
1667 struct mv643xx_eth_private *mp = netdev_priv(dev); in mv643xx_eth_set_features() local
1670 wrlp(mp, PORT_CONFIG, rx_csum ? 0x02000000 : 0x00000000); in mv643xx_eth_set_features()
1693 struct mv643xx_eth_private *mp = netdev_priv(dev); in mv643xx_eth_get_ethtool_stats() local
1697 mib_counters_update(mp); in mv643xx_eth_get_ethtool_stats()
1706 p = ((void *)mp->dev) + stat->netdev_off; in mv643xx_eth_get_ethtool_stats()
1708 p = ((void *)mp) + stat->mp_off; in mv643xx_eth_get_ethtool_stats()
1743 static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr) in uc_addr_get() argument
1745 unsigned int mac_h = rdlp(mp, MAC_ADDR_HIGH); in uc_addr_get()
1746 unsigned int mac_l = rdlp(mp, MAC_ADDR_LOW); in uc_addr_get()
1756 static void uc_addr_set(struct mv643xx_eth_private *mp, unsigned char *addr) in uc_addr_set() argument
1758 wrlp(mp, MAC_ADDR_HIGH, in uc_addr_set()
1760 wrlp(mp, MAC_ADDR_LOW, (addr[4] << 8) | addr[5]); in uc_addr_set()
1786 struct mv643xx_eth_private *mp = netdev_priv(dev); in mv643xx_eth_program_unicast_filter() local
1791 uc_addr_set(mp, dev->dev_addr); in mv643xx_eth_program_unicast_filter()
1793 port_config = rdlp(mp, PORT_CONFIG) & ~UNICAST_PROMISCUOUS_MODE; in mv643xx_eth_program_unicast_filter()
1802 int off = UNICAST_TABLE(mp->port_num) + i; in mv643xx_eth_program_unicast_filter()
1816 wrl(mp, off, v); in mv643xx_eth_program_unicast_filter()
1819 wrlp(mp, PORT_CONFIG, port_config); in mv643xx_eth_program_unicast_filter()
1842 struct mv643xx_eth_private *mp = netdev_priv(dev); in mv643xx_eth_program_multicast_filter() local
1853 port_num = mp->port_num; in mv643xx_eth_program_multicast_filter()
1856 wrl(mp, SPECIAL_MCAST_TABLE(port_num) + i, accept); in mv643xx_eth_program_multicast_filter()
1857 wrl(mp, OTHER_MCAST_TABLE(port_num) + i, accept); in mv643xx_eth_program_multicast_filter()
1887 wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i, mc_spec[i >> 2]); in mv643xx_eth_program_multicast_filter()
1888 wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i, mc_other[i >> 2]); in mv643xx_eth_program_multicast_filter()
1918 static int rxq_init(struct mv643xx_eth_private *mp, int index) in rxq_init() argument
1920 struct rx_queue *rxq = mp->rxq + index; in rxq_init()
1927 rxq->rx_ring_size = mp->rx_ring_size; in rxq_init()
1935 if (index == 0 && size <= mp->rx_desc_sram_size) { in rxq_init()
1936 rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr, in rxq_init()
1937 mp->rx_desc_sram_size); in rxq_init()
1938 rxq->rx_desc_dma = mp->rx_desc_sram_addr; in rxq_init()
1940 rxq->rx_desc_area = dma_alloc_coherent(mp->dev->dev.parent, in rxq_init()
1946 netdev_err(mp->dev, in rxq_init()
1974 if (index == 0 && size <= mp->rx_desc_sram_size) in rxq_init()
1977 dma_free_coherent(mp->dev->dev.parent, size, in rxq_init()
1987 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); in rxq_deinit() local
2000 netdev_err(mp->dev, "error freeing rx ring -- %d skbs stuck\n", in rxq_deinit()
2005 rxq->rx_desc_area_size <= mp->rx_desc_sram_size) in rxq_deinit()
2008 dma_free_coherent(mp->dev->dev.parent, rxq->rx_desc_area_size, in rxq_deinit()
2014 static int txq_init(struct mv643xx_eth_private *mp, int index) in txq_init() argument
2016 struct tx_queue *txq = mp->txq + index; in txq_init()
2024 txq->tx_ring_size = mp->tx_ring_size; in txq_init()
2039 if (index == 0 && size <= mp->tx_desc_sram_size) { in txq_init()
2040 txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr, in txq_init()
2041 mp->tx_desc_sram_size); in txq_init()
2042 txq->tx_desc_dma = mp->tx_desc_sram_addr; in txq_init()
2044 txq->tx_desc_area = dma_alloc_coherent(mp->dev->dev.parent, in txq_init()
2050 netdev_err(mp->dev, in txq_init()
2080 txq->tso_hdrs = dma_alloc_coherent(mp->dev->dev.parent, in txq_init()
2094 if (index == 0 && size <= mp->tx_desc_sram_size) in txq_init()
2097 dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size, in txq_init()
2104 struct mv643xx_eth_private *mp = txq_to_mp(txq); in txq_deinit() local
2112 txq->tx_desc_area_size <= mp->tx_desc_sram_size) in txq_deinit()
2115 dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size, in txq_deinit()
2120 dma_free_coherent(mp->dev->dev.parent, in txq_deinit()
2127 static int mv643xx_eth_collect_events(struct mv643xx_eth_private *mp) in mv643xx_eth_collect_events() argument
2132 int_cause = rdlp(mp, INT_CAUSE) & mp->int_mask; in mv643xx_eth_collect_events()
2139 int_cause_ext = rdlp(mp, INT_CAUSE_EXT); in mv643xx_eth_collect_events()
2143 wrlp(mp, INT_CAUSE, ~int_cause); in mv643xx_eth_collect_events()
2144 mp->work_tx_end |= ((int_cause & INT_TX_END) >> 19) & in mv643xx_eth_collect_events()
2145 ~(rdlp(mp, TXQ_COMMAND) & 0xff); in mv643xx_eth_collect_events()
2146 mp->work_rx |= (int_cause & INT_RX) >> 2; in mv643xx_eth_collect_events()
2151 wrlp(mp, INT_CAUSE_EXT, ~int_cause_ext); in mv643xx_eth_collect_events()
2153 mp->work_link = 1; in mv643xx_eth_collect_events()
2154 mp->work_tx |= int_cause_ext & INT_EXT_TX; in mv643xx_eth_collect_events()
2163 struct mv643xx_eth_private *mp = netdev_priv(dev); in mv643xx_eth_irq() local
2165 if (unlikely(!mv643xx_eth_collect_events(mp))) in mv643xx_eth_irq()
2168 wrlp(mp, INT_MASK, 0); in mv643xx_eth_irq()
2169 napi_schedule(&mp->napi); in mv643xx_eth_irq()
2174 static void handle_link_event(struct mv643xx_eth_private *mp) in handle_link_event() argument
2176 struct net_device *dev = mp->dev; in handle_link_event()
2182 port_status = rdlp(mp, PORT_STATUS); in handle_link_event()
2191 for (i = 0; i < mp->txq_count; i++) { in handle_link_event()
2192 struct tx_queue *txq = mp->txq + i; in handle_link_event()
2227 struct mv643xx_eth_private *mp; in mv643xx_eth_poll() local
2230 mp = container_of(napi, struct mv643xx_eth_private, napi); in mv643xx_eth_poll()
2232 if (unlikely(mp->oom)) { in mv643xx_eth_poll()
2233 mp->oom = 0; in mv643xx_eth_poll()
2234 del_timer(&mp->rx_oom); in mv643xx_eth_poll()
2243 if (mp->work_link) { in mv643xx_eth_poll()
2244 mp->work_link = 0; in mv643xx_eth_poll()
2245 handle_link_event(mp); in mv643xx_eth_poll()
2250 queue_mask = mp->work_tx | mp->work_tx_end | mp->work_rx; in mv643xx_eth_poll()
2251 if (likely(!mp->oom)) in mv643xx_eth_poll()
2252 queue_mask |= mp->work_rx_refill; in mv643xx_eth_poll()
2255 if (mv643xx_eth_collect_events(mp)) in mv643xx_eth_poll()
2267 if (mp->work_tx_end & queue_mask) { in mv643xx_eth_poll()
2268 txq_kick(mp->txq + queue); in mv643xx_eth_poll()
2269 } else if (mp->work_tx & queue_mask) { in mv643xx_eth_poll()
2270 work_done += txq_reclaim(mp->txq + queue, work_tbd, 0); in mv643xx_eth_poll()
2271 txq_maybe_wake(mp->txq + queue); in mv643xx_eth_poll()
2272 } else if (mp->work_rx & queue_mask) { in mv643xx_eth_poll()
2273 work_done += rxq_process(mp->rxq + queue, work_tbd); in mv643xx_eth_poll()
2274 } else if (!mp->oom && (mp->work_rx_refill & queue_mask)) { in mv643xx_eth_poll()
2275 work_done += rxq_refill(mp->rxq + queue, work_tbd); in mv643xx_eth_poll()
2282 if (mp->oom) in mv643xx_eth_poll()
2283 mod_timer(&mp->rx_oom, jiffies + (HZ / 10)); in mv643xx_eth_poll()
2285 wrlp(mp, INT_MASK, mp->int_mask); in mv643xx_eth_poll()
2293 struct mv643xx_eth_private *mp = (void *)data; in oom_timer_wrapper() local
2295 napi_schedule(&mp->napi); in oom_timer_wrapper()
2298 static void port_start(struct mv643xx_eth_private *mp) in port_start() argument
2306 if (mp->phy != NULL) { in port_start()
2309 mv643xx_eth_get_settings(mp->dev, &cmd); in port_start()
2310 phy_init_hw(mp->phy); in port_start()
2311 mv643xx_eth_set_settings(mp->dev, &cmd); in port_start()
2312 phy_start(mp->phy); in port_start()
2318 pscr = rdlp(mp, PORT_SERIAL_CONTROL); in port_start()
2321 wrlp(mp, PORT_SERIAL_CONTROL, pscr); in port_start()
2324 if (mp->phy == NULL) in port_start()
2326 wrlp(mp, PORT_SERIAL_CONTROL, pscr); in port_start()
2331 tx_set_rate(mp, 1000000000, 16777216); in port_start()
2332 for (i = 0; i < mp->txq_count; i++) { in port_start()
2333 struct tx_queue *txq = mp->txq + i; in port_start()
2345 mv643xx_eth_set_features(mp->dev, mp->dev->features); in port_start()
2350 wrlp(mp, PORT_CONFIG_EXT, 0x00000000); in port_start()
2355 mv643xx_eth_program_unicast_filter(mp->dev); in port_start()
2360 for (i = 0; i < mp->rxq_count; i++) { in port_start()
2361 struct rx_queue *rxq = mp->rxq + i; in port_start()
2366 wrlp(mp, RXQ_CURRENT_DESC_PTR(i), addr); in port_start()
2372 static void mv643xx_eth_recalc_skb_size(struct mv643xx_eth_private *mp) in mv643xx_eth_recalc_skb_size() argument
2382 skb_size = mp->dev->mtu + 36; in mv643xx_eth_recalc_skb_size()
2389 mp->skb_size = (skb_size + 7) & ~7; in mv643xx_eth_recalc_skb_size()
2397 mp->skb_size += SKB_DMA_REALIGN; in mv643xx_eth_recalc_skb_size()
2402 struct mv643xx_eth_private *mp = netdev_priv(dev); in mv643xx_eth_open() local
2406 wrlp(mp, INT_CAUSE, 0); in mv643xx_eth_open()
2407 wrlp(mp, INT_CAUSE_EXT, 0); in mv643xx_eth_open()
2408 rdlp(mp, INT_CAUSE_EXT); in mv643xx_eth_open()
2417 mv643xx_eth_recalc_skb_size(mp); in mv643xx_eth_open()
2419 napi_enable(&mp->napi); in mv643xx_eth_open()
2421 mp->int_mask = INT_EXT; in mv643xx_eth_open()
2423 for (i = 0; i < mp->rxq_count; i++) { in mv643xx_eth_open()
2424 err = rxq_init(mp, i); in mv643xx_eth_open()
2427 rxq_deinit(mp->rxq + i); in mv643xx_eth_open()
2431 rxq_refill(mp->rxq + i, INT_MAX); in mv643xx_eth_open()
2432 mp->int_mask |= INT_RX_0 << i; in mv643xx_eth_open()
2435 if (mp->oom) { in mv643xx_eth_open()
2436 mp->rx_oom.expires = jiffies + (HZ / 10); in mv643xx_eth_open()
2437 add_timer(&mp->rx_oom); in mv643xx_eth_open()
2440 for (i = 0; i < mp->txq_count; i++) { in mv643xx_eth_open()
2441 err = txq_init(mp, i); in mv643xx_eth_open()
2444 txq_deinit(mp->txq + i); in mv643xx_eth_open()
2447 mp->int_mask |= INT_TX_END_0 << i; in mv643xx_eth_open()
2450 add_timer(&mp->mib_counters_timer); in mv643xx_eth_open()
2451 port_start(mp); in mv643xx_eth_open()
2453 wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX); in mv643xx_eth_open()
2454 wrlp(mp, INT_MASK, mp->int_mask); in mv643xx_eth_open()
2460 for (i = 0; i < mp->rxq_count; i++) in mv643xx_eth_open()
2461 rxq_deinit(mp->rxq + i); in mv643xx_eth_open()
2468 static void port_reset(struct mv643xx_eth_private *mp) in port_reset() argument
2473 for (i = 0; i < mp->rxq_count; i++) in port_reset()
2474 rxq_disable(mp->rxq + i); in port_reset()
2475 for (i = 0; i < mp->txq_count; i++) in port_reset()
2476 txq_disable(mp->txq + i); in port_reset()
2479 u32 ps = rdlp(mp, PORT_STATUS); in port_reset()
2487 data = rdlp(mp, PORT_SERIAL_CONTROL); in port_reset()
2491 wrlp(mp, PORT_SERIAL_CONTROL, data); in port_reset()
2496 struct mv643xx_eth_private *mp = netdev_priv(dev); in mv643xx_eth_stop() local
2499 wrlp(mp, INT_MASK_EXT, 0x00000000); in mv643xx_eth_stop()
2500 wrlp(mp, INT_MASK, 0x00000000); in mv643xx_eth_stop()
2501 rdlp(mp, INT_MASK); in mv643xx_eth_stop()
2503 napi_disable(&mp->napi); in mv643xx_eth_stop()
2505 del_timer_sync(&mp->rx_oom); in mv643xx_eth_stop()
2508 if (mp->phy) in mv643xx_eth_stop()
2509 phy_stop(mp->phy); in mv643xx_eth_stop()
2512 port_reset(mp); in mv643xx_eth_stop()
2514 mib_counters_update(mp); in mv643xx_eth_stop()
2515 del_timer_sync(&mp->mib_counters_timer); in mv643xx_eth_stop()
2517 for (i = 0; i < mp->rxq_count; i++) in mv643xx_eth_stop()
2518 rxq_deinit(mp->rxq + i); in mv643xx_eth_stop()
2519 for (i = 0; i < mp->txq_count; i++) in mv643xx_eth_stop()
2520 txq_deinit(mp->txq + i); in mv643xx_eth_stop()
2527 struct mv643xx_eth_private *mp = netdev_priv(dev); in mv643xx_eth_ioctl() local
2530 if (mp->phy == NULL) in mv643xx_eth_ioctl()
2533 ret = phy_mii_ioctl(mp->phy, ifr, cmd); in mv643xx_eth_ioctl()
2541 struct mv643xx_eth_private *mp = netdev_priv(dev); in mv643xx_eth_change_mtu() local
2547 mv643xx_eth_recalc_skb_size(mp); in mv643xx_eth_change_mtu()
2548 tx_set_rate(mp, 1000000000, 16777216); in mv643xx_eth_change_mtu()
2570 struct mv643xx_eth_private *mp; in tx_timeout_task() local
2572 mp = container_of(ugly, struct mv643xx_eth_private, tx_timeout_task); in tx_timeout_task()
2573 if (netif_running(mp->dev)) { in tx_timeout_task()
2574 netif_tx_stop_all_queues(mp->dev); in tx_timeout_task()
2575 port_reset(mp); in tx_timeout_task()
2576 port_start(mp); in tx_timeout_task()
2577 netif_tx_wake_all_queues(mp->dev); in tx_timeout_task()
2583 struct mv643xx_eth_private *mp = netdev_priv(dev); in mv643xx_eth_tx_timeout() local
2587 schedule_work(&mp->tx_timeout_task); in mv643xx_eth_tx_timeout()
2593 struct mv643xx_eth_private *mp = netdev_priv(dev); in mv643xx_eth_netpoll() local
2595 wrlp(mp, INT_MASK, 0x00000000); in mv643xx_eth_netpoll()
2596 rdlp(mp, INT_MASK); in mv643xx_eth_netpoll()
2600 wrlp(mp, INT_MASK, mp->int_mask); in mv643xx_eth_netpoll()
2885 static void phy_addr_set(struct mv643xx_eth_private *mp, int phy_addr) in phy_addr_set() argument
2887 int addr_shift = 5 * mp->port_num; in phy_addr_set()
2890 data = rdl(mp, PHY_ADDR); in phy_addr_set()
2893 wrl(mp, PHY_ADDR, data); in phy_addr_set()
2896 static int phy_addr_get(struct mv643xx_eth_private *mp) in phy_addr_get() argument
2900 data = rdl(mp, PHY_ADDR); in phy_addr_get()
2902 return (data >> (5 * mp->port_num)) & 0x1f; in phy_addr_get()
2905 static void set_params(struct mv643xx_eth_private *mp, in set_params() argument
2908 struct net_device *dev = mp->dev; in set_params()
2914 uc_addr_get(mp, dev->dev_addr); in set_params()
2916 mp->rx_ring_size = DEFAULT_RX_QUEUE_SIZE; in set_params()
2918 mp->rx_ring_size = pd->rx_queue_size; in set_params()
2919 mp->rx_desc_sram_addr = pd->rx_sram_addr; in set_params()
2920 mp->rx_desc_sram_size = pd->rx_sram_size; in set_params()
2922 mp->rxq_count = pd->rx_queue_count ? : 1; in set_params()
2928 mp->tx_ring_size = clamp_t(unsigned int, tx_ring_size, in set_params()
2930 if (mp->tx_ring_size != tx_ring_size) in set_params()
2932 mp->tx_ring_size, tx_ring_size); in set_params()
2934 mp->tx_desc_sram_addr = pd->tx_sram_addr; in set_params()
2935 mp->tx_desc_sram_size = pd->tx_sram_size; in set_params()
2937 mp->txq_count = pd->tx_queue_count ? : 1; in set_params()
2940 static struct phy_device *phy_scan(struct mv643xx_eth_private *mp, in phy_scan() argument
2950 start = phy_addr_get(mp) & 0x1f; in phy_scan()
2965 phydev = phy_connect(mp->dev, phy_id, mv643xx_eth_adjust_link, in phy_scan()
2968 phy_addr_set(mp, addr); in phy_scan()
2976 static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex) in phy_init() argument
2978 struct phy_device *phy = mp->phy; in phy_init()
2994 static void init_pscr(struct mv643xx_eth_private *mp, int speed, int duplex) in init_pscr() argument
2998 pscr = rdlp(mp, PORT_SERIAL_CONTROL); in init_pscr()
3001 wrlp(mp, PORT_SERIAL_CONTROL, pscr); in init_pscr()
3005 if (mp->phy == NULL) { in init_pscr()
3019 wrlp(mp, PORT_SERIAL_CONTROL, pscr); in init_pscr()
3042 struct mv643xx_eth_private *mp; in mv643xx_eth_probe() local
3062 mp = netdev_priv(dev); in mv643xx_eth_probe()
3063 platform_set_drvdata(pdev, mp); in mv643xx_eth_probe()
3065 mp->shared = platform_get_drvdata(pd->shared); in mv643xx_eth_probe()
3066 mp->base = mp->shared->base + 0x0400 + (pd->port_number << 10); in mv643xx_eth_probe()
3067 mp->port_num = pd->port_number; in mv643xx_eth_probe()
3069 mp->dev = dev; in mv643xx_eth_probe()
3077 wrlp(mp, PORT_SERIAL_CONTROL1, in mv643xx_eth_probe()
3078 rdlp(mp, PORT_SERIAL_CONTROL1) & ~CLK125_BYPASS_EN); in mv643xx_eth_probe()
3084 mp->t_clk = 133000000; in mv643xx_eth_probe()
3085 mp->clk = devm_clk_get(&pdev->dev, NULL); in mv643xx_eth_probe()
3086 if (!IS_ERR(mp->clk)) { in mv643xx_eth_probe()
3087 clk_prepare_enable(mp->clk); in mv643xx_eth_probe()
3088 mp->t_clk = clk_get_rate(mp->clk); in mv643xx_eth_probe()
3089 } else if (!IS_ERR(mp->shared->clk)) { in mv643xx_eth_probe()
3090 mp->t_clk = clk_get_rate(mp->shared->clk); in mv643xx_eth_probe()
3093 set_params(mp, pd); in mv643xx_eth_probe()
3094 netif_set_real_num_tx_queues(dev, mp->txq_count); in mv643xx_eth_probe()
3095 netif_set_real_num_rx_queues(dev, mp->rxq_count); in mv643xx_eth_probe()
3099 mp->phy = of_phy_connect(mp->dev, pd->phy_node, in mv643xx_eth_probe()
3102 if (!mp->phy) in mv643xx_eth_probe()
3105 phy_addr_set(mp, mp->phy->addr); in mv643xx_eth_probe()
3107 mp->phy = phy_scan(mp, pd->phy_addr); in mv643xx_eth_probe()
3109 if (IS_ERR(mp->phy)) in mv643xx_eth_probe()
3110 err = PTR_ERR(mp->phy); in mv643xx_eth_probe()
3112 phy_init(mp, pd->speed, pd->duplex); in mv643xx_eth_probe()
3123 init_pscr(mp, pd->speed, pd->duplex); in mv643xx_eth_probe()
3126 mib_counters_clear(mp); in mv643xx_eth_probe()
3128 init_timer(&mp->mib_counters_timer); in mv643xx_eth_probe()
3129 mp->mib_counters_timer.data = (unsigned long)mp; in mv643xx_eth_probe()
3130 mp->mib_counters_timer.function = mib_counters_timer_wrapper; in mv643xx_eth_probe()
3131 mp->mib_counters_timer.expires = jiffies + 30 * HZ; in mv643xx_eth_probe()
3133 spin_lock_init(&mp->mib_counters_lock); in mv643xx_eth_probe()
3135 INIT_WORK(&mp->tx_timeout_task, tx_timeout_task); in mv643xx_eth_probe()
3137 netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, NAPI_POLL_WEIGHT); in mv643xx_eth_probe()
3139 init_timer(&mp->rx_oom); in mv643xx_eth_probe()
3140 mp->rx_oom.data = (unsigned long)mp; in mv643xx_eth_probe()
3141 mp->rx_oom.function = oom_timer_wrapper; in mv643xx_eth_probe()
3164 if (mp->shared->win_protect) in mv643xx_eth_probe()
3165 wrl(mp, WINDOW_PROTECT(mp->port_num), mp->shared->win_protect); in mv643xx_eth_probe()
3169 wrlp(mp, SDMA_CONFIG, PORT_SDMA_CONFIG_DEFAULT_VALUE); in mv643xx_eth_probe()
3171 set_rx_coal(mp, 250); in mv643xx_eth_probe()
3172 set_tx_coal(mp, 0); in mv643xx_eth_probe()
3179 mp->port_num, dev->dev_addr); in mv643xx_eth_probe()
3181 if (mp->tx_desc_sram_size > 0) in mv643xx_eth_probe()
3187 if (!IS_ERR(mp->clk)) in mv643xx_eth_probe()
3188 clk_disable_unprepare(mp->clk); in mv643xx_eth_probe()
3196 struct mv643xx_eth_private *mp = platform_get_drvdata(pdev); in mv643xx_eth_remove() local
3198 unregister_netdev(mp->dev); in mv643xx_eth_remove()
3199 if (mp->phy != NULL) in mv643xx_eth_remove()
3200 phy_disconnect(mp->phy); in mv643xx_eth_remove()
3201 cancel_work_sync(&mp->tx_timeout_task); in mv643xx_eth_remove()
3203 if (!IS_ERR(mp->clk)) in mv643xx_eth_remove()
3204 clk_disable_unprepare(mp->clk); in mv643xx_eth_remove()
3206 free_netdev(mp->dev); in mv643xx_eth_remove()
3213 struct mv643xx_eth_private *mp = platform_get_drvdata(pdev); in mv643xx_eth_shutdown() local
3216 wrlp(mp, INT_MASK, 0); in mv643xx_eth_shutdown()
3217 rdlp(mp, INT_MASK); in mv643xx_eth_shutdown()
3219 if (netif_running(mp->dev)) in mv643xx_eth_shutdown()
3220 port_reset(mp); in mv643xx_eth_shutdown()