Lines Matching refs:priv
58 (TOTAL_DESC - priv->hw_params->rx_queues * priv->hw_params->rx_bds_per_q)
60 (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->tx_bds_per_q)
67 #define DMA_DESC_SIZE (WORDS_PER_BD(priv) * sizeof(u32))
69 #define GENET_TDMA_REG_OFF (priv->hw_params->tdma_offset + \
72 #define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \
75 static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv, in dmadesc_set_length_status() argument
81 static inline u32 dmadesc_get_length_status(struct bcmgenet_priv *priv, in dmadesc_get_length_status() argument
87 static inline void dmadesc_set_addr(struct bcmgenet_priv *priv, in dmadesc_set_addr() argument
98 if (priv->hw_params->flags & GENET_HAS_40BITS) in dmadesc_set_addr()
104 static inline void dmadesc_set(struct bcmgenet_priv *priv, in dmadesc_set() argument
107 dmadesc_set_length_status(priv, d, val); in dmadesc_set()
108 dmadesc_set_addr(priv, d, addr); in dmadesc_set()
111 static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv, in dmadesc_get_addr() argument
123 if (priv->hw_params->flags & GENET_HAS_40BITS) in dmadesc_get_addr()
134 static inline u32 bcmgenet_rbuf_ctrl_get(struct bcmgenet_priv *priv) in bcmgenet_rbuf_ctrl_get() argument
136 if (GENET_IS_V1(priv)) in bcmgenet_rbuf_ctrl_get()
137 return bcmgenet_rbuf_readl(priv, RBUF_FLUSH_CTRL_V1); in bcmgenet_rbuf_ctrl_get()
139 return bcmgenet_sys_readl(priv, SYS_RBUF_FLUSH_CTRL); in bcmgenet_rbuf_ctrl_get()
142 static inline void bcmgenet_rbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val) in bcmgenet_rbuf_ctrl_set() argument
144 if (GENET_IS_V1(priv)) in bcmgenet_rbuf_ctrl_set()
145 bcmgenet_rbuf_writel(priv, val, RBUF_FLUSH_CTRL_V1); in bcmgenet_rbuf_ctrl_set()
147 bcmgenet_sys_writel(priv, val, SYS_RBUF_FLUSH_CTRL); in bcmgenet_rbuf_ctrl_set()
154 static inline u32 bcmgenet_tbuf_ctrl_get(struct bcmgenet_priv *priv) in bcmgenet_tbuf_ctrl_get() argument
156 if (GENET_IS_V1(priv)) in bcmgenet_tbuf_ctrl_get()
157 return bcmgenet_rbuf_readl(priv, TBUF_CTRL_V1); in bcmgenet_tbuf_ctrl_get()
159 return __raw_readl(priv->base + in bcmgenet_tbuf_ctrl_get()
160 priv->hw_params->tbuf_offset + TBUF_CTRL); in bcmgenet_tbuf_ctrl_get()
163 static inline void bcmgenet_tbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val) in bcmgenet_tbuf_ctrl_set() argument
165 if (GENET_IS_V1(priv)) in bcmgenet_tbuf_ctrl_set()
166 bcmgenet_rbuf_writel(priv, val, TBUF_CTRL_V1); in bcmgenet_tbuf_ctrl_set()
168 __raw_writel(val, priv->base + in bcmgenet_tbuf_ctrl_set()
169 priv->hw_params->tbuf_offset + TBUF_CTRL); in bcmgenet_tbuf_ctrl_set()
172 static inline u32 bcmgenet_bp_mc_get(struct bcmgenet_priv *priv) in bcmgenet_bp_mc_get() argument
174 if (GENET_IS_V1(priv)) in bcmgenet_bp_mc_get()
175 return bcmgenet_rbuf_readl(priv, TBUF_BP_MC_V1); in bcmgenet_bp_mc_get()
177 return __raw_readl(priv->base + in bcmgenet_bp_mc_get()
178 priv->hw_params->tbuf_offset + TBUF_BP_MC); in bcmgenet_bp_mc_get()
181 static inline void bcmgenet_bp_mc_set(struct bcmgenet_priv *priv, u32 val) in bcmgenet_bp_mc_set() argument
183 if (GENET_IS_V1(priv)) in bcmgenet_bp_mc_set()
184 bcmgenet_rbuf_writel(priv, val, TBUF_BP_MC_V1); in bcmgenet_bp_mc_set()
186 __raw_writel(val, priv->base + in bcmgenet_bp_mc_set()
187 priv->hw_params->tbuf_offset + TBUF_BP_MC); in bcmgenet_bp_mc_set()
258 static inline u32 bcmgenet_tdma_readl(struct bcmgenet_priv *priv, in bcmgenet_tdma_readl() argument
261 return __raw_readl(priv->base + GENET_TDMA_REG_OFF + in bcmgenet_tdma_readl()
265 static inline void bcmgenet_tdma_writel(struct bcmgenet_priv *priv, in bcmgenet_tdma_writel() argument
268 __raw_writel(val, priv->base + GENET_TDMA_REG_OFF + in bcmgenet_tdma_writel()
272 static inline u32 bcmgenet_rdma_readl(struct bcmgenet_priv *priv, in bcmgenet_rdma_readl() argument
275 return __raw_readl(priv->base + GENET_RDMA_REG_OFF + in bcmgenet_rdma_readl()
279 static inline void bcmgenet_rdma_writel(struct bcmgenet_priv *priv, in bcmgenet_rdma_writel() argument
282 __raw_writel(val, priv->base + GENET_RDMA_REG_OFF + in bcmgenet_rdma_writel()
349 static inline u32 bcmgenet_tdma_ring_readl(struct bcmgenet_priv *priv, in bcmgenet_tdma_ring_readl() argument
353 return __raw_readl(priv->base + GENET_TDMA_REG_OFF + in bcmgenet_tdma_ring_readl()
358 static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv *priv, in bcmgenet_tdma_ring_writel() argument
362 __raw_writel(val, priv->base + GENET_TDMA_REG_OFF + in bcmgenet_tdma_ring_writel()
367 static inline u32 bcmgenet_rdma_ring_readl(struct bcmgenet_priv *priv, in bcmgenet_rdma_ring_readl() argument
371 return __raw_readl(priv->base + GENET_RDMA_REG_OFF + in bcmgenet_rdma_ring_readl()
376 static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv, in bcmgenet_rdma_ring_writel() argument
380 __raw_writel(val, priv->base + GENET_RDMA_REG_OFF + in bcmgenet_rdma_ring_writel()
388 struct bcmgenet_priv *priv = netdev_priv(dev); in bcmgenet_get_settings() local
393 if (!priv->phydev) in bcmgenet_get_settings()
396 return phy_ethtool_gset(priv->phydev, cmd); in bcmgenet_get_settings()
402 struct bcmgenet_priv *priv = netdev_priv(dev); in bcmgenet_set_settings() local
407 if (!priv->phydev) in bcmgenet_set_settings()
410 return phy_ethtool_sset(priv->phydev, cmd); in bcmgenet_set_settings()
416 struct bcmgenet_priv *priv = netdev_priv(dev); in bcmgenet_set_rx_csum() local
422 rbuf_chk_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL); in bcmgenet_set_rx_csum()
429 priv->desc_rxchk_en = rx_csum_en; in bcmgenet_set_rx_csum()
434 if (rx_csum_en && priv->crc_fwd_en) in bcmgenet_set_rx_csum()
439 bcmgenet_rbuf_writel(priv, rbuf_chk_ctrl, RBUF_CHK_CTRL); in bcmgenet_set_rx_csum()
447 struct bcmgenet_priv *priv = netdev_priv(dev); in bcmgenet_set_tx_csum() local
451 tbuf_ctrl = bcmgenet_tbuf_ctrl_get(priv); in bcmgenet_set_tx_csum()
452 rbuf_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CTRL); in bcmgenet_set_tx_csum()
464 priv->desc_64b_en = desc_64b_en; in bcmgenet_set_tx_csum()
466 bcmgenet_tbuf_ctrl_set(priv, tbuf_ctrl); in bcmgenet_set_tx_csum()
467 bcmgenet_rbuf_writel(priv, rbuf_ctrl, RBUF_CTRL); in bcmgenet_set_tx_csum()
489 struct bcmgenet_priv *priv = netdev_priv(dev); in bcmgenet_get_msglevel() local
491 return priv->msg_enable; in bcmgenet_get_msglevel()
496 struct bcmgenet_priv *priv = netdev_priv(dev); in bcmgenet_set_msglevel() local
498 priv->msg_enable = level; in bcmgenet_set_msglevel()
678 static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv) in bcmgenet_update_mib_counters() argument
698 val = bcmgenet_umac_readl(priv, in bcmgenet_update_mib_counters()
702 val = bcmgenet_umac_readl(priv, s->reg_offset); in bcmgenet_update_mib_counters()
705 bcmgenet_umac_writel(priv, 0, s->reg_offset); in bcmgenet_update_mib_counters()
710 p = (char *)priv + s->stat_offset; in bcmgenet_update_mib_counters()
719 struct bcmgenet_priv *priv = netdev_priv(dev); in bcmgenet_get_ethtool_stats() local
723 bcmgenet_update_mib_counters(priv); in bcmgenet_get_ethtool_stats()
733 p = (char *)priv; in bcmgenet_get_ethtool_stats()
741 struct bcmgenet_priv *priv = netdev_priv(dev); in bcmgenet_eee_enable_set() local
742 u32 off = priv->hw_params->tbuf_offset + TBUF_ENERGY_CTRL; in bcmgenet_eee_enable_set()
745 if (enable && !priv->clk_eee_enabled) { in bcmgenet_eee_enable_set()
746 clk_prepare_enable(priv->clk_eee); in bcmgenet_eee_enable_set()
747 priv->clk_eee_enabled = true; in bcmgenet_eee_enable_set()
750 reg = bcmgenet_umac_readl(priv, UMAC_EEE_CTRL); in bcmgenet_eee_enable_set()
755 bcmgenet_umac_writel(priv, reg, UMAC_EEE_CTRL); in bcmgenet_eee_enable_set()
758 reg = __raw_readl(priv->base + off); in bcmgenet_eee_enable_set()
763 __raw_writel(reg, priv->base + off); in bcmgenet_eee_enable_set()
766 reg = bcmgenet_rbuf_readl(priv, RBUF_ENERGY_CTRL); in bcmgenet_eee_enable_set()
771 bcmgenet_rbuf_writel(priv, reg, RBUF_ENERGY_CTRL); in bcmgenet_eee_enable_set()
773 if (!enable && priv->clk_eee_enabled) { in bcmgenet_eee_enable_set()
774 clk_disable_unprepare(priv->clk_eee); in bcmgenet_eee_enable_set()
775 priv->clk_eee_enabled = false; in bcmgenet_eee_enable_set()
778 priv->eee.eee_enabled = enable; in bcmgenet_eee_enable_set()
779 priv->eee.eee_active = enable; in bcmgenet_eee_enable_set()
784 struct bcmgenet_priv *priv = netdev_priv(dev); in bcmgenet_get_eee() local
785 struct ethtool_eee *p = &priv->eee; in bcmgenet_get_eee()
787 if (GENET_IS_V1(priv)) in bcmgenet_get_eee()
792 e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER); in bcmgenet_get_eee()
794 return phy_ethtool_get_eee(priv->phydev, e); in bcmgenet_get_eee()
799 struct bcmgenet_priv *priv = netdev_priv(dev); in bcmgenet_set_eee() local
800 struct ethtool_eee *p = &priv->eee; in bcmgenet_set_eee()
803 if (GENET_IS_V1(priv)) in bcmgenet_set_eee()
811 ret = phy_init_eee(priv->phydev, 0); in bcmgenet_set_eee()
813 netif_err(priv, hw, dev, "EEE initialization failed\n"); in bcmgenet_set_eee()
817 bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER); in bcmgenet_set_eee()
821 return phy_ethtool_set_eee(priv->phydev, e); in bcmgenet_set_eee()
826 struct bcmgenet_priv *priv = netdev_priv(dev); in bcmgenet_nway_reset() local
828 return genphy_restart_aneg(priv->phydev); in bcmgenet_nway_reset()
850 static int bcmgenet_power_down(struct bcmgenet_priv *priv, in bcmgenet_power_down() argument
858 phy_detach(priv->phydev); in bcmgenet_power_down()
862 ret = bcmgenet_wol_power_down_cfg(priv, mode); in bcmgenet_power_down()
867 if (priv->hw_params->flags & GENET_HAS_EXT) { in bcmgenet_power_down()
868 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); in bcmgenet_power_down()
871 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); in bcmgenet_power_down()
873 bcmgenet_phy_power_set(priv->dev, false); in bcmgenet_power_down()
883 static void bcmgenet_power_up(struct bcmgenet_priv *priv, in bcmgenet_power_up() argument
888 if (!(priv->hw_params->flags & GENET_HAS_EXT)) in bcmgenet_power_up()
891 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); in bcmgenet_power_up()
903 bcmgenet_wol_power_up_cfg(priv, mode); in bcmgenet_power_up()
909 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); in bcmgenet_power_up()
912 bcmgenet_mii_reset(priv->dev); in bcmgenet_power_up()
918 struct bcmgenet_priv *priv = netdev_priv(dev); in bcmgenet_ioctl() local
928 if (!priv->phydev) in bcmgenet_ioctl()
931 val = phy_mii_ioctl(priv->phydev, rq, cmd); in bcmgenet_ioctl()
942 static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv, in bcmgenet_get_txcb() argument
969 bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE, in bcmgenet_rx_ring16_int_disable()
975 bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE, in bcmgenet_rx_ring16_int_enable()
981 bcmgenet_intrl2_1_writel(ring->priv, in bcmgenet_rx_ring_int_disable()
988 bcmgenet_intrl2_1_writel(ring->priv, in bcmgenet_rx_ring_int_enable()
995 bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE, in bcmgenet_tx_ring16_int_disable()
1001 bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE, in bcmgenet_tx_ring16_int_enable()
1007 bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index, in bcmgenet_tx_ring_int_enable()
1013 bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index, in bcmgenet_tx_ring_int_disable()
1021 struct bcmgenet_priv *priv = netdev_priv(dev); in __bcmgenet_tx_reclaim() local
1030 c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX); in __bcmgenet_tx_reclaim()
1038 netif_dbg(priv, tx_done, dev, in __bcmgenet_tx_reclaim()
1044 tx_cb_ptr = &priv->tx_cbs[ring->clean_ptr]; in __bcmgenet_tx_reclaim()
1102 work_done = bcmgenet_tx_reclaim(ring->priv->dev, ring); in bcmgenet_tx_poll()
1116 struct bcmgenet_priv *priv = netdev_priv(dev); in bcmgenet_tx_reclaim_all() local
1120 for (i = 0; i < priv->hw_params->tx_queues; i++) in bcmgenet_tx_reclaim_all()
1121 bcmgenet_tx_reclaim(dev, &priv->tx_rings[i]); in bcmgenet_tx_reclaim_all()
1124 bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]); in bcmgenet_tx_reclaim_all()
1135 struct bcmgenet_priv *priv = netdev_priv(dev); in bcmgenet_xmit_single() local
1136 struct device *kdev = &priv->pdev->dev; in bcmgenet_xmit_single()
1143 tx_cb_ptr = bcmgenet_get_txcb(priv, ring); in bcmgenet_xmit_single()
1155 priv->mib.tx_dma_failed++; in bcmgenet_xmit_single()
1156 netif_err(priv, tx_err, dev, "Tx DMA map failed\n"); in bcmgenet_xmit_single()
1164 (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT) | in bcmgenet_xmit_single()
1170 dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, length_status); in bcmgenet_xmit_single()
1181 struct bcmgenet_priv *priv = netdev_priv(dev); in bcmgenet_xmit_frag() local
1182 struct device *kdev = &priv->pdev->dev; in bcmgenet_xmit_frag()
1187 tx_cb_ptr = bcmgenet_get_txcb(priv, ring); in bcmgenet_xmit_frag()
1197 priv->mib.tx_dma_failed++; in bcmgenet_xmit_frag()
1198 netif_err(priv, tx_err, dev, "%s: Tx DMA map failed\n", in bcmgenet_xmit_frag()
1206 dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, in bcmgenet_xmit_frag()
1208 (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT)); in bcmgenet_xmit_frag()
1279 struct bcmgenet_priv *priv = netdev_priv(dev); in bcmgenet_xmit() local
1302 ring = &priv->tx_rings[index]; in bcmgenet_xmit()
1320 if (priv->desc_64b_en) { in bcmgenet_xmit()
1363 bcmgenet_tdma_ring_writel(priv, ring->index, in bcmgenet_xmit()
1371 static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv, in bcmgenet_rx_refill() argument
1374 struct device *kdev = &priv->pdev->dev; in bcmgenet_rx_refill()
1380 skb = netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT); in bcmgenet_rx_refill()
1382 priv->mib.alloc_rx_buff_failed++; in bcmgenet_rx_refill()
1383 netif_err(priv, rx_err, priv->dev, in bcmgenet_rx_refill()
1389 mapping = dma_map_single(kdev, skb->data, priv->rx_buf_len, in bcmgenet_rx_refill()
1392 priv->mib.rx_dma_failed++; in bcmgenet_rx_refill()
1394 netif_err(priv, rx_err, priv->dev, in bcmgenet_rx_refill()
1403 priv->rx_buf_len, DMA_FROM_DEVICE); in bcmgenet_rx_refill()
1408 dmadesc_set_addr(priv, cb->bd_addr, mapping); in bcmgenet_rx_refill()
1420 struct bcmgenet_priv *priv = ring->priv; in bcmgenet_desc_rx() local
1421 struct net_device *dev = priv->dev; in bcmgenet_desc_rx()
1432 p_index = bcmgenet_rdma_ring_readl(priv, ring->index, RDMA_PROD_INDEX); in bcmgenet_desc_rx()
1445 bcmgenet_rdma_ring_writel(priv, ring->index, 0, in bcmgenet_desc_rx()
1458 netif_dbg(priv, rx_status, dev, in bcmgenet_desc_rx()
1463 cb = &priv->rx_cbs[ring->read_ptr]; in bcmgenet_desc_rx()
1464 skb = bcmgenet_rx_refill(priv, cb); in bcmgenet_desc_rx()
1472 if (!priv->desc_64b_en) { in bcmgenet_desc_rx()
1474 dmadesc_get_length_status(priv, cb->bd_addr); in bcmgenet_desc_rx()
1488 netif_dbg(priv, rx_status, dev, in bcmgenet_desc_rx()
1494 netif_err(priv, rx_status, dev, in bcmgenet_desc_rx()
1508 netif_err(priv, rx_status, dev, "dma_flag=0x%x\n", in bcmgenet_desc_rx()
1524 chksum_ok = (dma_flag & priv->dma_rx_chk_bit) && in bcmgenet_desc_rx()
1525 priv->desc_rxchk_en; in bcmgenet_desc_rx()
1528 if (priv->desc_64b_en) { in bcmgenet_desc_rx()
1540 if (priv->crc_fwd_en) { in bcmgenet_desc_rx()
1546 skb->protocol = eth_type_trans(skb, priv->dev); in bcmgenet_desc_rx()
1554 netif_dbg(priv, rx_status, dev, "pushed up to kernel\n"); in bcmgenet_desc_rx()
1564 bcmgenet_rdma_ring_writel(priv, ring->index, ring->c_index, RDMA_CONS_INDEX); in bcmgenet_desc_rx()
1588 static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv, in bcmgenet_alloc_rx_buffers() argument
1595 netif_dbg(priv, hw, priv->dev, "%s\n", __func__); in bcmgenet_alloc_rx_buffers()
1600 skb = bcmgenet_rx_refill(priv, cb); in bcmgenet_alloc_rx_buffers()
1610 static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv) in bcmgenet_free_rx_buffers() argument
1615 for (i = 0; i < priv->num_rx_bds; i++) { in bcmgenet_free_rx_buffers()
1616 cb = &priv->rx_cbs[i]; in bcmgenet_free_rx_buffers()
1619 dma_unmap_single(&priv->dev->dev, in bcmgenet_free_rx_buffers()
1621 priv->rx_buf_len, DMA_FROM_DEVICE); in bcmgenet_free_rx_buffers()
1630 static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable) in umac_enable_set() argument
1634 reg = bcmgenet_umac_readl(priv, UMAC_CMD); in umac_enable_set()
1639 bcmgenet_umac_writel(priv, reg, UMAC_CMD); in umac_enable_set()
1648 static int reset_umac(struct bcmgenet_priv *priv) in reset_umac() argument
1650 struct device *kdev = &priv->pdev->dev; in reset_umac()
1655 bcmgenet_rbuf_ctrl_set(priv, 0); in reset_umac()
1659 bcmgenet_umac_writel(priv, 0, UMAC_CMD); in reset_umac()
1662 bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD); in reset_umac()
1664 reg = bcmgenet_umac_readl(priv, UMAC_CMD); in reset_umac()
1680 static void bcmgenet_intr_disable(struct bcmgenet_priv *priv) in bcmgenet_intr_disable() argument
1683 bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET); in bcmgenet_intr_disable()
1684 bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR); in bcmgenet_intr_disable()
1685 bcmgenet_intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR); in bcmgenet_intr_disable()
1686 bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET); in bcmgenet_intr_disable()
1687 bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR); in bcmgenet_intr_disable()
1688 bcmgenet_intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR); in bcmgenet_intr_disable()
1691 static int init_umac(struct bcmgenet_priv *priv) in init_umac() argument
1693 struct device *kdev = &priv->pdev->dev; in init_umac()
1700 dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n"); in init_umac()
1702 ret = reset_umac(priv); in init_umac()
1706 bcmgenet_umac_writel(priv, 0, UMAC_CMD); in init_umac()
1708 bcmgenet_umac_writel(priv, in init_umac()
1711 bcmgenet_umac_writel(priv, 0, UMAC_MIB_CTRL); in init_umac()
1713 bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); in init_umac()
1716 reg = bcmgenet_rbuf_readl(priv, RBUF_CTRL); in init_umac()
1718 bcmgenet_rbuf_writel(priv, reg, RBUF_CTRL); in init_umac()
1720 if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv)) in init_umac()
1721 bcmgenet_rbuf_writel(priv, 1, RBUF_TBUF_SIZE_CTRL); in init_umac()
1723 bcmgenet_intr_disable(priv); in init_umac()
1732 if (phy_is_internal(priv->phydev)) { in init_umac()
1734 } else if (priv->ext_phy) { in init_umac()
1736 } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { in init_umac()
1737 if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET) in init_umac()
1740 reg = bcmgenet_bp_mc_get(priv); in init_umac()
1741 reg |= BIT(priv->hw_params->bp_in_en_shift); in init_umac()
1744 if (netif_is_multiqueue(priv->dev)) in init_umac()
1745 reg |= priv->hw_params->bp_in_mask; in init_umac()
1747 reg &= ~priv->hw_params->bp_in_mask; in init_umac()
1748 bcmgenet_bp_mc_set(priv, reg); in init_umac()
1752 if (priv->hw_params->flags & GENET_HAS_MDIO_INTR) in init_umac()
1756 for (i = 0; i < priv->hw_params->rx_queues; ++i) in init_umac()
1760 for (i = 0; i < priv->hw_params->tx_queues; ++i) in init_umac()
1763 bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR); in init_umac()
1764 bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR); in init_umac()
1773 static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv, in bcmgenet_init_tx_ring() argument
1777 struct bcmgenet_tx_ring *ring = &priv->tx_rings[index]; in bcmgenet_init_tx_ring()
1778 u32 words_per_bd = WORDS_PER_BD(priv); in bcmgenet_init_tx_ring()
1782 ring->priv = priv; in bcmgenet_init_tx_ring()
1793 ring->cbs = priv->tx_cbs + start_ptr; in bcmgenet_init_tx_ring()
1807 bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_PROD_INDEX); in bcmgenet_init_tx_ring()
1808 bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_CONS_INDEX); in bcmgenet_init_tx_ring()
1809 bcmgenet_tdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH); in bcmgenet_init_tx_ring()
1811 bcmgenet_tdma_ring_writel(priv, index, flow_period_val, in bcmgenet_init_tx_ring()
1813 bcmgenet_tdma_ring_writel(priv, index, in bcmgenet_init_tx_ring()
1818 bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd, in bcmgenet_init_tx_ring()
1820 bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd, in bcmgenet_init_tx_ring()
1822 bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd, in bcmgenet_init_tx_ring()
1824 bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, in bcmgenet_init_tx_ring()
1829 static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv, in bcmgenet_init_rx_ring() argument
1833 struct bcmgenet_rx_ring *ring = &priv->rx_rings[index]; in bcmgenet_init_rx_ring()
1834 u32 words_per_bd = WORDS_PER_BD(priv); in bcmgenet_init_rx_ring()
1837 ring->priv = priv; in bcmgenet_init_rx_ring()
1846 ring->cbs = priv->rx_cbs + start_ptr; in bcmgenet_init_rx_ring()
1853 ret = bcmgenet_alloc_rx_buffers(priv, ring); in bcmgenet_init_rx_ring()
1857 bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX); in bcmgenet_init_rx_ring()
1858 bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX); in bcmgenet_init_rx_ring()
1859 bcmgenet_rdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH); in bcmgenet_init_rx_ring()
1860 bcmgenet_rdma_ring_writel(priv, index, in bcmgenet_init_rx_ring()
1863 bcmgenet_rdma_ring_writel(priv, index, in bcmgenet_init_rx_ring()
1869 bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd, in bcmgenet_init_rx_ring()
1871 bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd, in bcmgenet_init_rx_ring()
1873 bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd, in bcmgenet_init_rx_ring()
1875 bcmgenet_rdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, in bcmgenet_init_rx_ring()
1881 static void bcmgenet_init_tx_napi(struct bcmgenet_priv *priv) in bcmgenet_init_tx_napi() argument
1886 for (i = 0; i < priv->hw_params->tx_queues; ++i) { in bcmgenet_init_tx_napi()
1887 ring = &priv->tx_rings[i]; in bcmgenet_init_tx_napi()
1888 netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64); in bcmgenet_init_tx_napi()
1891 ring = &priv->tx_rings[DESC_INDEX]; in bcmgenet_init_tx_napi()
1892 netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64); in bcmgenet_init_tx_napi()
1895 static void bcmgenet_enable_tx_napi(struct bcmgenet_priv *priv) in bcmgenet_enable_tx_napi() argument
1900 for (i = 0; i < priv->hw_params->tx_queues; ++i) { in bcmgenet_enable_tx_napi()
1901 ring = &priv->tx_rings[i]; in bcmgenet_enable_tx_napi()
1905 ring = &priv->tx_rings[DESC_INDEX]; in bcmgenet_enable_tx_napi()
1909 static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv) in bcmgenet_disable_tx_napi() argument
1914 for (i = 0; i < priv->hw_params->tx_queues; ++i) { in bcmgenet_disable_tx_napi()
1915 ring = &priv->tx_rings[i]; in bcmgenet_disable_tx_napi()
1919 ring = &priv->tx_rings[DESC_INDEX]; in bcmgenet_disable_tx_napi()
1923 static void bcmgenet_fini_tx_napi(struct bcmgenet_priv *priv) in bcmgenet_fini_tx_napi() argument
1928 for (i = 0; i < priv->hw_params->tx_queues; ++i) { in bcmgenet_fini_tx_napi()
1929 ring = &priv->tx_rings[i]; in bcmgenet_fini_tx_napi()
1933 ring = &priv->tx_rings[DESC_INDEX]; in bcmgenet_fini_tx_napi()
1954 struct bcmgenet_priv *priv = netdev_priv(dev); in bcmgenet_init_tx_queues() local
1959 dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL); in bcmgenet_init_tx_queues()
1962 bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL); in bcmgenet_init_tx_queues()
1968 bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL); in bcmgenet_init_tx_queues()
1971 for (i = 0; i < priv->hw_params->tx_queues; i++) { in bcmgenet_init_tx_queues()
1972 bcmgenet_init_tx_ring(priv, i, priv->hw_params->tx_bds_per_q, in bcmgenet_init_tx_queues()
1973 i * priv->hw_params->tx_bds_per_q, in bcmgenet_init_tx_queues()
1974 (i + 1) * priv->hw_params->tx_bds_per_q); in bcmgenet_init_tx_queues()
1982 bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_Q16_TX_BD_CNT, in bcmgenet_init_tx_queues()
1983 priv->hw_params->tx_queues * in bcmgenet_init_tx_queues()
1984 priv->hw_params->tx_bds_per_q, in bcmgenet_init_tx_queues()
1989 ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) << in bcmgenet_init_tx_queues()
1993 bcmgenet_tdma_writel(priv, dma_priority[0], DMA_PRIORITY_0); in bcmgenet_init_tx_queues()
1994 bcmgenet_tdma_writel(priv, dma_priority[1], DMA_PRIORITY_1); in bcmgenet_init_tx_queues()
1995 bcmgenet_tdma_writel(priv, dma_priority[2], DMA_PRIORITY_2); in bcmgenet_init_tx_queues()
1998 bcmgenet_init_tx_napi(priv); in bcmgenet_init_tx_queues()
2001 bcmgenet_tdma_writel(priv, ring_cfg, DMA_RING_CFG); in bcmgenet_init_tx_queues()
2006 bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL); in bcmgenet_init_tx_queues()
2009 static void bcmgenet_init_rx_napi(struct bcmgenet_priv *priv) in bcmgenet_init_rx_napi() argument
2014 for (i = 0; i < priv->hw_params->rx_queues; ++i) { in bcmgenet_init_rx_napi()
2015 ring = &priv->rx_rings[i]; in bcmgenet_init_rx_napi()
2016 netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll, 64); in bcmgenet_init_rx_napi()
2019 ring = &priv->rx_rings[DESC_INDEX]; in bcmgenet_init_rx_napi()
2020 netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll, 64); in bcmgenet_init_rx_napi()
2023 static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv) in bcmgenet_enable_rx_napi() argument
2028 for (i = 0; i < priv->hw_params->rx_queues; ++i) { in bcmgenet_enable_rx_napi()
2029 ring = &priv->rx_rings[i]; in bcmgenet_enable_rx_napi()
2033 ring = &priv->rx_rings[DESC_INDEX]; in bcmgenet_enable_rx_napi()
2037 static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv) in bcmgenet_disable_rx_napi() argument
2042 for (i = 0; i < priv->hw_params->rx_queues; ++i) { in bcmgenet_disable_rx_napi()
2043 ring = &priv->rx_rings[i]; in bcmgenet_disable_rx_napi()
2047 ring = &priv->rx_rings[DESC_INDEX]; in bcmgenet_disable_rx_napi()
2051 static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv) in bcmgenet_fini_rx_napi() argument
2056 for (i = 0; i < priv->hw_params->rx_queues; ++i) { in bcmgenet_fini_rx_napi()
2057 ring = &priv->rx_rings[i]; in bcmgenet_fini_rx_napi()
2061 ring = &priv->rx_rings[DESC_INDEX]; in bcmgenet_fini_rx_napi()
2074 struct bcmgenet_priv *priv = netdev_priv(dev); in bcmgenet_init_rx_queues() local
2081 dma_ctrl = bcmgenet_rdma_readl(priv, DMA_CTRL); in bcmgenet_init_rx_queues()
2084 bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL); in bcmgenet_init_rx_queues()
2090 for (i = 0; i < priv->hw_params->rx_queues; i++) { in bcmgenet_init_rx_queues()
2091 ret = bcmgenet_init_rx_ring(priv, i, in bcmgenet_init_rx_queues()
2092 priv->hw_params->rx_bds_per_q, in bcmgenet_init_rx_queues()
2093 i * priv->hw_params->rx_bds_per_q, in bcmgenet_init_rx_queues()
2095 priv->hw_params->rx_bds_per_q); in bcmgenet_init_rx_queues()
2104 ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, GENET_Q16_RX_BD_CNT, in bcmgenet_init_rx_queues()
2105 priv->hw_params->rx_queues * in bcmgenet_init_rx_queues()
2106 priv->hw_params->rx_bds_per_q, in bcmgenet_init_rx_queues()
2115 bcmgenet_init_rx_napi(priv); in bcmgenet_init_rx_queues()
2118 bcmgenet_rdma_writel(priv, ring_cfg, DMA_RING_CFG); in bcmgenet_init_rx_queues()
2123 bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL); in bcmgenet_init_rx_queues()
2128 static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv) in bcmgenet_dma_teardown() argument
2135 reg = bcmgenet_tdma_readl(priv, DMA_CTRL); in bcmgenet_dma_teardown()
2137 bcmgenet_tdma_writel(priv, reg, DMA_CTRL); in bcmgenet_dma_teardown()
2141 reg = bcmgenet_tdma_readl(priv, DMA_STATUS); in bcmgenet_dma_teardown()
2149 netdev_warn(priv->dev, "Timed out while disabling TX DMA\n"); in bcmgenet_dma_teardown()
2157 reg = bcmgenet_rdma_readl(priv, DMA_CTRL); in bcmgenet_dma_teardown()
2159 bcmgenet_rdma_writel(priv, reg, DMA_CTRL); in bcmgenet_dma_teardown()
2164 reg = bcmgenet_rdma_readl(priv, DMA_STATUS); in bcmgenet_dma_teardown()
2172 netdev_warn(priv->dev, "Timed out while disabling RX DMA\n"); in bcmgenet_dma_teardown()
2179 static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) in bcmgenet_fini_dma() argument
2183 bcmgenet_fini_rx_napi(priv); in bcmgenet_fini_dma()
2184 bcmgenet_fini_tx_napi(priv); in bcmgenet_fini_dma()
2187 bcmgenet_dma_teardown(priv); in bcmgenet_fini_dma()
2189 for (i = 0; i < priv->num_tx_bds; i++) { in bcmgenet_fini_dma()
2190 if (priv->tx_cbs[i].skb != NULL) { in bcmgenet_fini_dma()
2191 dev_kfree_skb(priv->tx_cbs[i].skb); in bcmgenet_fini_dma()
2192 priv->tx_cbs[i].skb = NULL; in bcmgenet_fini_dma()
2196 bcmgenet_free_rx_buffers(priv); in bcmgenet_fini_dma()
2197 kfree(priv->rx_cbs); in bcmgenet_fini_dma()
2198 kfree(priv->tx_cbs); in bcmgenet_fini_dma()
2202 static int bcmgenet_init_dma(struct bcmgenet_priv *priv) in bcmgenet_init_dma() argument
2208 netif_dbg(priv, hw, priv->dev, "%s\n", __func__); in bcmgenet_init_dma()
2211 priv->rx_bds = priv->base + priv->hw_params->rdma_offset; in bcmgenet_init_dma()
2212 priv->num_rx_bds = TOTAL_DESC; in bcmgenet_init_dma()
2213 priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct enet_cb), in bcmgenet_init_dma()
2215 if (!priv->rx_cbs) in bcmgenet_init_dma()
2218 for (i = 0; i < priv->num_rx_bds; i++) { in bcmgenet_init_dma()
2219 cb = priv->rx_cbs + i; in bcmgenet_init_dma()
2220 cb->bd_addr = priv->rx_bds + i * DMA_DESC_SIZE; in bcmgenet_init_dma()
2224 priv->tx_bds = priv->base + priv->hw_params->tdma_offset; in bcmgenet_init_dma()
2225 priv->num_tx_bds = TOTAL_DESC; in bcmgenet_init_dma()
2226 priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb), in bcmgenet_init_dma()
2228 if (!priv->tx_cbs) { in bcmgenet_init_dma()
2229 kfree(priv->rx_cbs); in bcmgenet_init_dma()
2233 for (i = 0; i < priv->num_tx_bds; i++) { in bcmgenet_init_dma()
2234 cb = priv->tx_cbs + i; in bcmgenet_init_dma()
2235 cb->bd_addr = priv->tx_bds + i * DMA_DESC_SIZE; in bcmgenet_init_dma()
2239 bcmgenet_rdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE); in bcmgenet_init_dma()
2242 ret = bcmgenet_init_rx_queues(priv->dev); in bcmgenet_init_dma()
2244 netdev_err(priv->dev, "failed to initialize Rx queues\n"); in bcmgenet_init_dma()
2245 bcmgenet_free_rx_buffers(priv); in bcmgenet_init_dma()
2246 kfree(priv->rx_cbs); in bcmgenet_init_dma()
2247 kfree(priv->tx_cbs); in bcmgenet_init_dma()
2252 bcmgenet_tdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE); in bcmgenet_init_dma()
2255 bcmgenet_init_tx_queues(priv->dev); in bcmgenet_init_dma()
2263 struct bcmgenet_priv *priv = container_of( in bcmgenet_irq_task() local
2266 netif_dbg(priv, intr, priv->dev, "%s\n", __func__); in bcmgenet_irq_task()
2268 if (priv->irq0_stat & UMAC_IRQ_MPD_R) { in bcmgenet_irq_task()
2269 priv->irq0_stat &= ~UMAC_IRQ_MPD_R; in bcmgenet_irq_task()
2270 netif_dbg(priv, wol, priv->dev, in bcmgenet_irq_task()
2272 bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC); in bcmgenet_irq_task()
2276 if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) && in bcmgenet_irq_task()
2277 (priv->irq0_stat & UMAC_IRQ_LINK_EVENT)) { in bcmgenet_irq_task()
2278 phy_mac_interrupt(priv->phydev, in bcmgenet_irq_task()
2279 !!(priv->irq0_stat & UMAC_IRQ_LINK_UP)); in bcmgenet_irq_task()
2280 priv->irq0_stat &= ~UMAC_IRQ_LINK_EVENT; in bcmgenet_irq_task()
2287 struct bcmgenet_priv *priv = dev_id; in bcmgenet_isr1() local
2293 priv->irq1_stat = in bcmgenet_isr1()
2294 bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) & in bcmgenet_isr1()
2295 ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS); in bcmgenet_isr1()
2298 bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR); in bcmgenet_isr1()
2300 netif_dbg(priv, intr, priv->dev, in bcmgenet_isr1()
2301 "%s: IRQ=0x%x\n", __func__, priv->irq1_stat); in bcmgenet_isr1()
2304 for (index = 0; index < priv->hw_params->rx_queues; index++) { in bcmgenet_isr1()
2305 if (!(priv->irq1_stat & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index))) in bcmgenet_isr1()
2308 rx_ring = &priv->rx_rings[index]; in bcmgenet_isr1()
2317 for (index = 0; index < priv->hw_params->tx_queues; index++) { in bcmgenet_isr1()
2318 if (!(priv->irq1_stat & BIT(index))) in bcmgenet_isr1()
2321 tx_ring = &priv->tx_rings[index]; in bcmgenet_isr1()
2335 struct bcmgenet_priv *priv = dev_id; in bcmgenet_isr0() local
2340 priv->irq0_stat = in bcmgenet_isr0()
2341 bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) & in bcmgenet_isr0()
2342 ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); in bcmgenet_isr0()
2345 bcmgenet_intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR); in bcmgenet_isr0()
2347 netif_dbg(priv, intr, priv->dev, in bcmgenet_isr0()
2348 "IRQ=0x%x\n", priv->irq0_stat); in bcmgenet_isr0()
2350 if (priv->irq0_stat & UMAC_IRQ_RXDMA_DONE) { in bcmgenet_isr0()
2351 rx_ring = &priv->rx_rings[DESC_INDEX]; in bcmgenet_isr0()
2359 if (priv->irq0_stat & UMAC_IRQ_TXDMA_DONE) { in bcmgenet_isr0()
2360 tx_ring = &priv->tx_rings[DESC_INDEX]; in bcmgenet_isr0()
2368 if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R | in bcmgenet_isr0()
2375 schedule_work(&priv->bcmgenet_irq_work); in bcmgenet_isr0()
2378 if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) && in bcmgenet_isr0()
2379 priv->irq0_stat & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) { in bcmgenet_isr0()
2380 priv->irq0_stat &= ~(UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR); in bcmgenet_isr0()
2381 wake_up(&priv->wq); in bcmgenet_isr0()
2389 struct bcmgenet_priv *priv = dev_id; in bcmgenet_wol_isr() local
2391 pm_wakeup_event(&priv->pdev->dev, 0); in bcmgenet_wol_isr()
2396 static void bcmgenet_umac_reset(struct bcmgenet_priv *priv) in bcmgenet_umac_reset() argument
2400 reg = bcmgenet_rbuf_ctrl_get(priv); in bcmgenet_umac_reset()
2402 bcmgenet_rbuf_ctrl_set(priv, reg); in bcmgenet_umac_reset()
2406 bcmgenet_rbuf_ctrl_set(priv, reg); in bcmgenet_umac_reset()
2410 static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv, in bcmgenet_set_hw_addr() argument
2413 bcmgenet_umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) | in bcmgenet_set_hw_addr()
2415 bcmgenet_umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1); in bcmgenet_set_hw_addr()
2419 static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv) in bcmgenet_dma_disable() argument
2426 reg = bcmgenet_tdma_readl(priv, DMA_CTRL); in bcmgenet_dma_disable()
2428 bcmgenet_tdma_writel(priv, reg, DMA_CTRL); in bcmgenet_dma_disable()
2430 reg = bcmgenet_rdma_readl(priv, DMA_CTRL); in bcmgenet_dma_disable()
2432 bcmgenet_rdma_writel(priv, reg, DMA_CTRL); in bcmgenet_dma_disable()
2434 bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH); in bcmgenet_dma_disable()
2436 bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH); in bcmgenet_dma_disable()
2441 static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl) in bcmgenet_enable_dma() argument
2445 reg = bcmgenet_rdma_readl(priv, DMA_CTRL); in bcmgenet_enable_dma()
2447 bcmgenet_rdma_writel(priv, reg, DMA_CTRL); in bcmgenet_enable_dma()
2449 reg = bcmgenet_tdma_readl(priv, DMA_CTRL); in bcmgenet_enable_dma()
2451 bcmgenet_tdma_writel(priv, reg, DMA_CTRL); in bcmgenet_enable_dma()
2454 static bool bcmgenet_hfb_is_filter_enabled(struct bcmgenet_priv *priv, in bcmgenet_hfb_is_filter_enabled() argument
2461 reg = bcmgenet_hfb_reg_readl(priv, offset); in bcmgenet_hfb_is_filter_enabled()
2465 static void bcmgenet_hfb_enable_filter(struct bcmgenet_priv *priv, u32 f_index) in bcmgenet_hfb_enable_filter() argument
2471 reg = bcmgenet_hfb_reg_readl(priv, offset); in bcmgenet_hfb_enable_filter()
2473 bcmgenet_hfb_reg_writel(priv, reg, offset); in bcmgenet_hfb_enable_filter()
2476 static void bcmgenet_hfb_set_filter_rx_queue_mapping(struct bcmgenet_priv *priv, in bcmgenet_hfb_set_filter_rx_queue_mapping() argument
2483 reg = bcmgenet_rdma_readl(priv, DMA_INDEX2RING_0 + offset); in bcmgenet_hfb_set_filter_rx_queue_mapping()
2486 bcmgenet_rdma_writel(priv, reg, DMA_INDEX2RING_0 + offset); in bcmgenet_hfb_set_filter_rx_queue_mapping()
2489 static void bcmgenet_hfb_set_filter_length(struct bcmgenet_priv *priv, in bcmgenet_hfb_set_filter_length() argument
2496 ((priv->hw_params->hfb_filter_cnt - 1 - f_index) / 4) * in bcmgenet_hfb_set_filter_length()
2498 reg = bcmgenet_hfb_reg_readl(priv, offset); in bcmgenet_hfb_set_filter_length()
2501 bcmgenet_hfb_reg_writel(priv, reg, offset); in bcmgenet_hfb_set_filter_length()
2504 static int bcmgenet_hfb_find_unused_filter(struct bcmgenet_priv *priv) in bcmgenet_hfb_find_unused_filter() argument
2508 for (f_index = 0; f_index < priv->hw_params->hfb_filter_cnt; f_index++) in bcmgenet_hfb_find_unused_filter()
2509 if (!bcmgenet_hfb_is_filter_enabled(priv, f_index)) in bcmgenet_hfb_find_unused_filter()
2550 int bcmgenet_hfb_add_filter(struct bcmgenet_priv *priv, u32 *f_data, in bcmgenet_hfb_add_filter() argument
2556 f_index = bcmgenet_hfb_find_unused_filter(priv); in bcmgenet_hfb_add_filter()
2560 if (f_length > priv->hw_params->hfb_filter_size) in bcmgenet_hfb_add_filter()
2564 bcmgenet_hfb_writel(priv, f_data[i], in bcmgenet_hfb_add_filter()
2565 (f_index * priv->hw_params->hfb_filter_size + i) * in bcmgenet_hfb_add_filter()
2568 bcmgenet_hfb_set_filter_length(priv, f_index, 2 * f_length); in bcmgenet_hfb_add_filter()
2569 bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f_index, rx_queue); in bcmgenet_hfb_add_filter()
2570 bcmgenet_hfb_enable_filter(priv, f_index); in bcmgenet_hfb_add_filter()
2571 bcmgenet_hfb_reg_writel(priv, 0x1, HFB_CTRL); in bcmgenet_hfb_add_filter()
2580 static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv) in bcmgenet_hfb_clear() argument
2584 bcmgenet_hfb_reg_writel(priv, 0x0, HFB_CTRL); in bcmgenet_hfb_clear()
2585 bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS); in bcmgenet_hfb_clear()
2586 bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS + 4); in bcmgenet_hfb_clear()
2589 bcmgenet_rdma_writel(priv, 0x0, i); in bcmgenet_hfb_clear()
2591 for (i = 0; i < (priv->hw_params->hfb_filter_cnt / 4); i++) in bcmgenet_hfb_clear()
2592 bcmgenet_hfb_reg_writel(priv, 0x0, in bcmgenet_hfb_clear()
2595 for (i = 0; i < priv->hw_params->hfb_filter_cnt * in bcmgenet_hfb_clear()
2596 priv->hw_params->hfb_filter_size; i++) in bcmgenet_hfb_clear()
2597 bcmgenet_hfb_writel(priv, 0x0, i * sizeof(u32)); in bcmgenet_hfb_clear()
2600 static void bcmgenet_hfb_init(struct bcmgenet_priv *priv) in bcmgenet_hfb_init() argument
2602 if (GENET_IS_V1(priv) || GENET_IS_V2(priv)) in bcmgenet_hfb_init()
2605 bcmgenet_hfb_clear(priv); in bcmgenet_hfb_init()
2610 struct bcmgenet_priv *priv = netdev_priv(dev); in bcmgenet_netif_start() local
2613 bcmgenet_enable_rx_napi(priv); in bcmgenet_netif_start()
2614 bcmgenet_enable_tx_napi(priv); in bcmgenet_netif_start()
2616 umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true); in bcmgenet_netif_start()
2620 phy_start(priv->phydev); in bcmgenet_netif_start()
2625 struct bcmgenet_priv *priv = netdev_priv(dev); in bcmgenet_open() local
2630 netif_dbg(priv, ifup, dev, "bcmgenet_open\n"); in bcmgenet_open()
2633 if (!IS_ERR(priv->clk)) in bcmgenet_open()
2634 clk_prepare_enable(priv->clk); in bcmgenet_open()
2639 if (phy_is_internal(priv->phydev)) in bcmgenet_open()
2640 bcmgenet_power_up(priv, GENET_POWER_PASSIVE); in bcmgenet_open()
2643 bcmgenet_umac_reset(priv); in bcmgenet_open()
2645 ret = init_umac(priv); in bcmgenet_open()
2650 umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false); in bcmgenet_open()
2653 reg = bcmgenet_umac_readl(priv, UMAC_CMD); in bcmgenet_open()
2654 priv->crc_fwd_en = !!(reg & CMD_CRC_FWD); in bcmgenet_open()
2656 bcmgenet_set_hw_addr(priv, dev->dev_addr); in bcmgenet_open()
2658 if (phy_is_internal(priv->phydev)) { in bcmgenet_open()
2659 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); in bcmgenet_open()
2661 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); in bcmgenet_open()
2665 dma_ctrl = bcmgenet_dma_disable(priv); in bcmgenet_open()
2668 ret = bcmgenet_init_dma(priv); in bcmgenet_open()
2675 bcmgenet_enable_dma(priv, dma_ctrl); in bcmgenet_open()
2678 bcmgenet_hfb_init(priv); in bcmgenet_open()
2680 ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED, in bcmgenet_open()
2681 dev->name, priv); in bcmgenet_open()
2683 netdev_err(dev, "can't request IRQ %d\n", priv->irq0); in bcmgenet_open()
2687 ret = request_irq(priv->irq1, bcmgenet_isr1, IRQF_SHARED, in bcmgenet_open()
2688 dev->name, priv); in bcmgenet_open()
2690 netdev_err(dev, "can't request IRQ %d\n", priv->irq1); in bcmgenet_open()
2695 bcmgenet_mii_config(priv->dev, false); in bcmgenet_open()
2697 phy_connect_direct(dev, priv->phydev, bcmgenet_mii_setup, in bcmgenet_open()
2698 priv->phy_interface); in bcmgenet_open()
2705 free_irq(priv->irq0, dev); in bcmgenet_open()
2707 bcmgenet_fini_dma(priv); in bcmgenet_open()
2709 if (!IS_ERR(priv->clk)) in bcmgenet_open()
2710 clk_disable_unprepare(priv->clk); in bcmgenet_open()
2716 struct bcmgenet_priv *priv = netdev_priv(dev); in bcmgenet_netif_stop() local
2719 phy_stop(priv->phydev); in bcmgenet_netif_stop()
2720 bcmgenet_intr_disable(priv); in bcmgenet_netif_stop()
2721 bcmgenet_disable_rx_napi(priv); in bcmgenet_netif_stop()
2722 bcmgenet_disable_tx_napi(priv); in bcmgenet_netif_stop()
2727 cancel_work_sync(&priv->bcmgenet_irq_work); in bcmgenet_netif_stop()
2729 priv->old_link = -1; in bcmgenet_netif_stop()
2730 priv->old_speed = -1; in bcmgenet_netif_stop()
2731 priv->old_duplex = -1; in bcmgenet_netif_stop()
2732 priv->old_pause = -1; in bcmgenet_netif_stop()
2737 struct bcmgenet_priv *priv = netdev_priv(dev); in bcmgenet_close() local
2740 netif_dbg(priv, ifdown, dev, "bcmgenet_close\n"); in bcmgenet_close()
2745 phy_disconnect(priv->phydev); in bcmgenet_close()
2748 umac_enable_set(priv, CMD_RX_EN, false); in bcmgenet_close()
2750 ret = bcmgenet_dma_teardown(priv); in bcmgenet_close()
2755 umac_enable_set(priv, CMD_TX_EN, false); in bcmgenet_close()
2759 bcmgenet_fini_dma(priv); in bcmgenet_close()
2761 free_irq(priv->irq0, priv); in bcmgenet_close()
2762 free_irq(priv->irq1, priv); in bcmgenet_close()
2764 if (phy_is_internal(priv->phydev)) in bcmgenet_close()
2765 ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE); in bcmgenet_close()
2767 if (!IS_ERR(priv->clk)) in bcmgenet_close()
2768 clk_disable_unprepare(priv->clk); in bcmgenet_close()
2775 struct bcmgenet_priv *priv = netdev_priv(dev); in bcmgenet_timeout() local
2777 netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n"); in bcmgenet_timeout()
2788 static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv, in bcmgenet_set_mdf_addr() argument
2795 bcmgenet_umac_writel(priv, addr[0] << 8 | addr[1], in bcmgenet_set_mdf_addr()
2797 bcmgenet_umac_writel(priv, addr[2] << 24 | addr[3] << 16 | in bcmgenet_set_mdf_addr()
2800 reg = bcmgenet_umac_readl(priv, UMAC_MDF_CTRL); in bcmgenet_set_mdf_addr()
2802 bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL); in bcmgenet_set_mdf_addr()
2809 struct bcmgenet_priv *priv = netdev_priv(dev); in bcmgenet_set_rx_mode() local
2814 netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags); in bcmgenet_set_rx_mode()
2817 reg = bcmgenet_umac_readl(priv, UMAC_CMD); in bcmgenet_set_rx_mode()
2820 bcmgenet_umac_writel(priv, reg, UMAC_CMD); in bcmgenet_set_rx_mode()
2821 bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL); in bcmgenet_set_rx_mode()
2825 bcmgenet_umac_writel(priv, reg, UMAC_CMD); in bcmgenet_set_rx_mode()
2838 bcmgenet_set_mdf_addr(priv, dev->broadcast, &i, &mc); in bcmgenet_set_rx_mode()
2840 bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i, &mc); in bcmgenet_set_rx_mode()
2847 bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc); in bcmgenet_set_rx_mode()
2853 bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc); in bcmgenet_set_rx_mode()
2957 static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv) in bcmgenet_set_hw_params() argument
2964 if (GENET_IS_V4(priv)) { in bcmgenet_set_hw_params()
2967 priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS; in bcmgenet_set_hw_params()
2968 priv->version = GENET_V4; in bcmgenet_set_hw_params()
2969 } else if (GENET_IS_V3(priv)) { in bcmgenet_set_hw_params()
2972 priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS; in bcmgenet_set_hw_params()
2973 priv->version = GENET_V3; in bcmgenet_set_hw_params()
2974 } else if (GENET_IS_V2(priv)) { in bcmgenet_set_hw_params()
2977 priv->dma_rx_chk_bit = DMA_RX_CHK_V12; in bcmgenet_set_hw_params()
2978 priv->version = GENET_V2; in bcmgenet_set_hw_params()
2979 } else if (GENET_IS_V1(priv)) { in bcmgenet_set_hw_params()
2982 priv->dma_rx_chk_bit = DMA_RX_CHK_V12; in bcmgenet_set_hw_params()
2983 priv->version = GENET_V1; in bcmgenet_set_hw_params()
2987 priv->hw_params = &bcmgenet_hw_params[priv->version]; in bcmgenet_set_hw_params()
2988 params = priv->hw_params; in bcmgenet_set_hw_params()
2991 reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL); in bcmgenet_set_hw_params()
2997 if (major != priv->version) { in bcmgenet_set_hw_params()
2998 dev_err(&priv->pdev->dev, in bcmgenet_set_hw_params()
3000 major, priv->version); in bcmgenet_set_hw_params()
3004 dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT, in bcmgenet_set_hw_params()
3023 priv->gphy_rev = gphy_rev << 8; in bcmgenet_set_hw_params()
3027 priv->gphy_rev = gphy_rev; in bcmgenet_set_hw_params()
3047 priv->version, in bcmgenet_set_hw_params()
3071 struct bcmgenet_priv *priv; in bcmgenet_probe() local
3078 dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1, in bcmgenet_probe()
3091 priv = netdev_priv(dev); in bcmgenet_probe()
3092 priv->irq0 = platform_get_irq(pdev, 0); in bcmgenet_probe()
3093 priv->irq1 = platform_get_irq(pdev, 1); in bcmgenet_probe()
3094 priv->wol_irq = platform_get_irq(pdev, 2); in bcmgenet_probe()
3095 if (!priv->irq0 || !priv->irq1) { in bcmgenet_probe()
3113 priv->base = devm_ioremap_resource(&pdev->dev, r); in bcmgenet_probe()
3114 if (IS_ERR(priv->base)) { in bcmgenet_probe()
3115 err = PTR_ERR(priv->base); in bcmgenet_probe()
3126 priv->msg_enable = netif_msg_init(-1, GENET_MSG_DEFAULT); in bcmgenet_probe()
3133 priv->wol_irq_disabled = true; in bcmgenet_probe()
3134 err = devm_request_irq(&pdev->dev, priv->wol_irq, bcmgenet_wol_isr, 0, in bcmgenet_probe()
3135 dev->name, priv); in bcmgenet_probe()
3146 priv->dev = dev; in bcmgenet_probe()
3147 priv->pdev = pdev; in bcmgenet_probe()
3149 priv->version = (enum bcmgenet_version)of_id->data; in bcmgenet_probe()
3151 priv->version = pd->genet_version; in bcmgenet_probe()
3153 priv->clk = devm_clk_get(&priv->pdev->dev, "enet"); in bcmgenet_probe()
3154 if (IS_ERR(priv->clk)) in bcmgenet_probe()
3155 dev_warn(&priv->pdev->dev, "failed to get enet clock\n"); in bcmgenet_probe()
3157 if (!IS_ERR(priv->clk)) in bcmgenet_probe()
3158 clk_prepare_enable(priv->clk); in bcmgenet_probe()
3160 bcmgenet_set_hw_params(priv); in bcmgenet_probe()
3163 init_waitqueue_head(&priv->wq); in bcmgenet_probe()
3165 priv->rx_buf_len = RX_BUF_LENGTH; in bcmgenet_probe()
3166 INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task); in bcmgenet_probe()
3168 priv->clk_wol = devm_clk_get(&priv->pdev->dev, "enet-wol"); in bcmgenet_probe()
3169 if (IS_ERR(priv->clk_wol)) in bcmgenet_probe()
3170 dev_warn(&priv->pdev->dev, "failed to get enet-wol clock\n"); in bcmgenet_probe()
3172 priv->clk_eee = devm_clk_get(&priv->pdev->dev, "enet-eee"); in bcmgenet_probe()
3173 if (IS_ERR(priv->clk_eee)) { in bcmgenet_probe()
3174 dev_warn(&priv->pdev->dev, "failed to get enet-eee clock\n"); in bcmgenet_probe()
3175 priv->clk_eee = NULL; in bcmgenet_probe()
3178 err = reset_umac(priv); in bcmgenet_probe()
3189 netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1); in bcmgenet_probe()
3190 netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1); in bcmgenet_probe()
3196 if (!IS_ERR(priv->clk)) in bcmgenet_probe()
3197 clk_disable_unprepare(priv->clk); in bcmgenet_probe()
3206 if (!IS_ERR(priv->clk)) in bcmgenet_probe()
3207 clk_disable_unprepare(priv->clk); in bcmgenet_probe()
3215 struct bcmgenet_priv *priv = dev_to_priv(&pdev->dev); in bcmgenet_remove() local
3218 unregister_netdev(priv->dev); in bcmgenet_remove()
3219 bcmgenet_mii_exit(priv->dev); in bcmgenet_remove()
3220 free_netdev(priv->dev); in bcmgenet_remove()
3229 struct bcmgenet_priv *priv = netdev_priv(dev); in bcmgenet_suspend() local
3237 phy_suspend(priv->phydev); in bcmgenet_suspend()
3242 umac_enable_set(priv, CMD_RX_EN, false); in bcmgenet_suspend()
3244 ret = bcmgenet_dma_teardown(priv); in bcmgenet_suspend()
3249 umac_enable_set(priv, CMD_TX_EN, false); in bcmgenet_suspend()
3253 bcmgenet_fini_dma(priv); in bcmgenet_suspend()
3256 if (device_may_wakeup(d) && priv->wolopts) { in bcmgenet_suspend()
3257 ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC); in bcmgenet_suspend()
3258 clk_prepare_enable(priv->clk_wol); in bcmgenet_suspend()
3259 } else if (phy_is_internal(priv->phydev)) { in bcmgenet_suspend()
3260 ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE); in bcmgenet_suspend()
3264 clk_disable_unprepare(priv->clk); in bcmgenet_suspend()
3272 struct bcmgenet_priv *priv = netdev_priv(dev); in bcmgenet_resume() local
3281 ret = clk_prepare_enable(priv->clk); in bcmgenet_resume()
3288 if (phy_is_internal(priv->phydev)) in bcmgenet_resume()
3289 bcmgenet_power_up(priv, GENET_POWER_PASSIVE); in bcmgenet_resume()
3291 bcmgenet_umac_reset(priv); in bcmgenet_resume()
3293 ret = init_umac(priv); in bcmgenet_resume()
3298 if (priv->wolopts) in bcmgenet_resume()
3299 clk_disable_unprepare(priv->clk_wol); in bcmgenet_resume()
3301 phy_init_hw(priv->phydev); in bcmgenet_resume()
3303 bcmgenet_mii_config(priv->dev, false); in bcmgenet_resume()
3306 umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false); in bcmgenet_resume()
3308 bcmgenet_set_hw_addr(priv, dev->dev_addr); in bcmgenet_resume()
3310 if (phy_is_internal(priv->phydev)) { in bcmgenet_resume()
3311 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); in bcmgenet_resume()
3313 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); in bcmgenet_resume()
3316 if (priv->wolopts) in bcmgenet_resume()
3317 bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC); in bcmgenet_resume()
3320 dma_ctrl = bcmgenet_dma_disable(priv); in bcmgenet_resume()
3323 ret = bcmgenet_init_dma(priv); in bcmgenet_resume()
3330 bcmgenet_enable_dma(priv, dma_ctrl); in bcmgenet_resume()
3334 phy_resume(priv->phydev); in bcmgenet_resume()
3336 if (priv->eee.eee_enabled) in bcmgenet_resume()
3344 clk_disable_unprepare(priv->clk); in bcmgenet_resume()