Lines Matching refs:priv

226 	struct cpmac_priv *priv = netdev_priv(dev);  in cpmac_dump_regs()  local
232 printk("%s: reg[%p]:", dev->name, priv->regs + i); in cpmac_dump_regs()
234 printk(" %08x", cpmac_read(priv->regs, i)); in cpmac_dump_regs()
251 struct cpmac_priv *priv = netdev_priv(dev); in cpmac_dump_all_desc() local
252 struct cpmac_desc *dump = priv->rx_head; in cpmac_dump_all_desc()
257 } while (dump != priv->rx_head); in cpmac_dump_all_desc()
280 while (cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0)) & MDIO_BUSY) in cpmac_mdio_read()
282 cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_REG(reg) | in cpmac_mdio_read()
284 while ((val = cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0))) & MDIO_BUSY) in cpmac_mdio_read()
293 while (cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0)) & MDIO_BUSY) in cpmac_mdio_write()
295 cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_WRITE | in cpmac_mdio_write()
311 cpmac_write(bus->priv, CPMAC_MDIO_CONTROL, MDIOC_ENABLE | in cpmac_mdio_reset()
326 struct cpmac_priv *priv = netdev_priv(dev); in cpmac_set_multicast_list() local
328 mbp = cpmac_read(priv->regs, CPMAC_MBP); in cpmac_set_multicast_list()
330 cpmac_write(priv->regs, CPMAC_MBP, (mbp & ~MBP_PROMISCCHAN(0)) | in cpmac_set_multicast_list()
333 cpmac_write(priv->regs, CPMAC_MBP, mbp & ~MBP_RXPROMISC); in cpmac_set_multicast_list()
336 cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, 0xffffffff); in cpmac_set_multicast_list()
337 cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, 0xffffffff); in cpmac_set_multicast_list()
360 cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, hash[0]); in cpmac_set_multicast_list()
361 cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, hash[1]); in cpmac_set_multicast_list()
366 static struct sk_buff *cpmac_rx_one(struct cpmac_priv *priv, in cpmac_rx_one() argument
371 if (unlikely(netif_msg_hw(priv))) in cpmac_rx_one()
372 cpmac_dump_desc(priv->dev, desc); in cpmac_rx_one()
373 cpmac_write(priv->regs, CPMAC_RX_ACK(0), (u32)desc->mapping); in cpmac_rx_one()
375 if (netif_msg_rx_err(priv) && net_ratelimit()) in cpmac_rx_one()
376 netdev_warn(priv->dev, "rx: spurious interrupt\n"); in cpmac_rx_one()
381 skb = netdev_alloc_skb_ip_align(priv->dev, CPMAC_SKB_SIZE); in cpmac_rx_one()
384 desc->skb->protocol = eth_type_trans(desc->skb, priv->dev); in cpmac_rx_one()
386 priv->dev->stats.rx_packets++; in cpmac_rx_one()
387 priv->dev->stats.rx_bytes += desc->datalen; in cpmac_rx_one()
389 dma_unmap_single(&priv->dev->dev, desc->data_mapping, in cpmac_rx_one()
392 desc->data_mapping = dma_map_single(&priv->dev->dev, skb->data, in cpmac_rx_one()
396 if (unlikely(netif_msg_pktdata(priv))) { in cpmac_rx_one()
397 netdev_dbg(priv->dev, "received packet:\n"); in cpmac_rx_one()
398 cpmac_dump_skb(priv->dev, result); in cpmac_rx_one()
401 if (netif_msg_rx_err(priv) && net_ratelimit()) in cpmac_rx_one()
402 netdev_warn(priv->dev, in cpmac_rx_one()
405 priv->dev->stats.rx_dropped++; in cpmac_rx_one()
418 struct cpmac_priv *priv = container_of(napi, struct cpmac_priv, napi); in cpmac_poll() local
421 spin_lock(&priv->rx_lock); in cpmac_poll()
422 if (unlikely(!priv->rx_head)) { in cpmac_poll()
423 if (netif_msg_rx_err(priv) && net_ratelimit()) in cpmac_poll()
424 netdev_warn(priv->dev, "rx: polling, but no queue\n"); in cpmac_poll()
426 spin_unlock(&priv->rx_lock); in cpmac_poll()
431 desc = priv->rx_head; in cpmac_poll()
443 if (netif_msg_rx_err(priv)) in cpmac_poll()
444 netdev_err(priv->dev, "poll found a" in cpmac_poll()
453 skb = cpmac_rx_one(priv, desc); in cpmac_poll()
461 if (desc != priv->rx_head) { in cpmac_poll()
466 priv->rx_head->prev->hw_next = priv->rx_head->mapping; in cpmac_poll()
478 (priv->rx_head->prev->dataflags & (CPMAC_OWN|CPMAC_EOQ)) in cpmac_poll()
480 (priv->rx_head->dataflags & CPMAC_OWN) != 0) { in cpmac_poll()
484 priv->rx_head->prev->dataflags &= ~CPMAC_EOQ; in cpmac_poll()
485 restart = priv->rx_head; in cpmac_poll()
489 priv->dev->stats.rx_errors++; in cpmac_poll()
490 priv->dev->stats.rx_fifo_errors++; in cpmac_poll()
491 if (netif_msg_rx_err(priv) && net_ratelimit()) in cpmac_poll()
492 netdev_warn(priv->dev, "rx dma ring overrun\n"); in cpmac_poll()
495 if (netif_msg_drv(priv)) in cpmac_poll()
496 netdev_err(priv->dev, "cpmac_poll is trying " in cpmac_poll()
502 cpmac_write(priv->regs, CPMAC_RX_PTR(0), restart->mapping); in cpmac_poll()
505 priv->rx_head = desc; in cpmac_poll()
506 spin_unlock(&priv->rx_lock); in cpmac_poll()
507 if (unlikely(netif_msg_rx_status(priv))) in cpmac_poll()
508 netdev_dbg(priv->dev, "poll processed %d packets\n", received); in cpmac_poll()
515 cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1); in cpmac_poll()
525 if (netif_msg_drv(priv)) { in cpmac_poll()
526 netdev_err(priv->dev, "cpmac_poll is confused. " in cpmac_poll()
528 cpmac_dump_all_desc(priv->dev); in cpmac_poll()
529 netdev_dbg(priv->dev, "RX_PTR(0)=0x%08x RX_ACK(0)=0x%08x\n", in cpmac_poll()
530 cpmac_read(priv->regs, CPMAC_RX_PTR(0)), in cpmac_poll()
531 cpmac_read(priv->regs, CPMAC_RX_ACK(0))); in cpmac_poll()
534 spin_unlock(&priv->rx_lock); in cpmac_poll()
536 netif_tx_stop_all_queues(priv->dev); in cpmac_poll()
537 napi_disable(&priv->napi); in cpmac_poll()
539 atomic_inc(&priv->reset_pending); in cpmac_poll()
540 cpmac_hw_stop(priv->dev); in cpmac_poll()
541 if (!schedule_work(&priv->reset_work)) in cpmac_poll()
542 atomic_dec(&priv->reset_pending); in cpmac_poll()
552 struct cpmac_priv *priv = netdev_priv(dev); in cpmac_start_xmit() local
554 if (unlikely(atomic_read(&priv->reset_pending))) in cpmac_start_xmit()
564 desc = &priv->desc_ring[queue]; in cpmac_start_xmit()
566 if (netif_msg_tx_err(priv) && net_ratelimit()) in cpmac_start_xmit()
572 spin_lock(&priv->lock); in cpmac_start_xmit()
573 spin_unlock(&priv->lock); in cpmac_start_xmit()
581 if (unlikely(netif_msg_tx_queued(priv))) in cpmac_start_xmit()
583 if (unlikely(netif_msg_hw(priv))) in cpmac_start_xmit()
585 if (unlikely(netif_msg_pktdata(priv))) in cpmac_start_xmit()
587 cpmac_write(priv->regs, CPMAC_TX_PTR(queue), (u32)desc->mapping); in cpmac_start_xmit()
595 struct cpmac_priv *priv = netdev_priv(dev); in cpmac_end_xmit() local
597 desc = &priv->desc_ring[queue]; in cpmac_end_xmit()
598 cpmac_write(priv->regs, CPMAC_TX_ACK(queue), (u32)desc->mapping); in cpmac_end_xmit()
600 spin_lock(&priv->lock); in cpmac_end_xmit()
603 spin_unlock(&priv->lock); in cpmac_end_xmit()
607 if (unlikely(netif_msg_tx_done(priv))) in cpmac_end_xmit()
616 if (netif_msg_tx_err(priv) && net_ratelimit()) in cpmac_end_xmit()
626 struct cpmac_priv *priv = netdev_priv(dev); in cpmac_hw_stop() local
627 struct plat_cpmac_data *pdata = dev_get_platdata(&priv->pdev->dev); in cpmac_hw_stop()
630 cpmac_write(priv->regs, CPMAC_RX_CONTROL, in cpmac_hw_stop()
631 cpmac_read(priv->regs, CPMAC_RX_CONTROL) & ~1); in cpmac_hw_stop()
632 cpmac_write(priv->regs, CPMAC_TX_CONTROL, in cpmac_hw_stop()
633 cpmac_read(priv->regs, CPMAC_TX_CONTROL) & ~1); in cpmac_hw_stop()
635 cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0); in cpmac_hw_stop()
636 cpmac_write(priv->regs, CPMAC_RX_PTR(i), 0); in cpmac_hw_stop()
638 cpmac_write(priv->regs, CPMAC_UNICAST_CLEAR, 0xff); in cpmac_hw_stop()
639 cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 0xff); in cpmac_hw_stop()
640 cpmac_write(priv->regs, CPMAC_TX_INT_CLEAR, 0xff); in cpmac_hw_stop()
641 cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff); in cpmac_hw_stop()
642 cpmac_write(priv->regs, CPMAC_MAC_CONTROL, in cpmac_hw_stop()
643 cpmac_read(priv->regs, CPMAC_MAC_CONTROL) & ~MAC_MII); in cpmac_hw_stop()
649 struct cpmac_priv *priv = netdev_priv(dev); in cpmac_hw_start() local
650 struct plat_cpmac_data *pdata = dev_get_platdata(&priv->pdev->dev); in cpmac_hw_start()
654 cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0); in cpmac_hw_start()
655 cpmac_write(priv->regs, CPMAC_RX_PTR(i), 0); in cpmac_hw_start()
657 cpmac_write(priv->regs, CPMAC_RX_PTR(0), priv->rx_head->mapping); in cpmac_hw_start()
659 cpmac_write(priv->regs, CPMAC_MBP, MBP_RXSHORT | MBP_RXBCAST | in cpmac_hw_start()
661 cpmac_write(priv->regs, CPMAC_BUFFER_OFFSET, 0); in cpmac_hw_start()
663 cpmac_write(priv->regs, CPMAC_MAC_ADDR_LO(i), dev->dev_addr[5]); in cpmac_hw_start()
664 cpmac_write(priv->regs, CPMAC_MAC_ADDR_MID, dev->dev_addr[4]); in cpmac_hw_start()
665 cpmac_write(priv->regs, CPMAC_MAC_ADDR_HI, dev->dev_addr[0] | in cpmac_hw_start()
668 cpmac_write(priv->regs, CPMAC_MAX_LENGTH, CPMAC_SKB_SIZE); in cpmac_hw_start()
669 cpmac_write(priv->regs, CPMAC_UNICAST_CLEAR, 0xff); in cpmac_hw_start()
670 cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 0xff); in cpmac_hw_start()
671 cpmac_write(priv->regs, CPMAC_TX_INT_CLEAR, 0xff); in cpmac_hw_start()
672 cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff); in cpmac_hw_start()
673 cpmac_write(priv->regs, CPMAC_UNICAST_ENABLE, 1); in cpmac_hw_start()
674 cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1); in cpmac_hw_start()
675 cpmac_write(priv->regs, CPMAC_TX_INT_ENABLE, 0xff); in cpmac_hw_start()
676 cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3); in cpmac_hw_start()
678 cpmac_write(priv->regs, CPMAC_RX_CONTROL, in cpmac_hw_start()
679 cpmac_read(priv->regs, CPMAC_RX_CONTROL) | 1); in cpmac_hw_start()
680 cpmac_write(priv->regs, CPMAC_TX_CONTROL, in cpmac_hw_start()
681 cpmac_read(priv->regs, CPMAC_TX_CONTROL) | 1); in cpmac_hw_start()
682 cpmac_write(priv->regs, CPMAC_MAC_CONTROL, in cpmac_hw_start()
683 cpmac_read(priv->regs, CPMAC_MAC_CONTROL) | MAC_MII | in cpmac_hw_start()
689 struct cpmac_priv *priv = netdev_priv(dev); in cpmac_clear_rx() local
693 if (unlikely(!priv->rx_head)) in cpmac_clear_rx()
695 desc = priv->rx_head; in cpmac_clear_rx()
696 for (i = 0; i < priv->ring_size; i++) { in cpmac_clear_rx()
698 if (netif_msg_rx_err(priv) && net_ratelimit()) in cpmac_clear_rx()
700 if (unlikely(netif_msg_hw(priv))) in cpmac_clear_rx()
708 priv->rx_head->prev->hw_next = 0; in cpmac_clear_rx()
713 struct cpmac_priv *priv = netdev_priv(dev); in cpmac_clear_tx() local
716 if (unlikely(!priv->desc_ring)) in cpmac_clear_tx()
719 priv->desc_ring[i].dataflags = 0; in cpmac_clear_tx()
720 if (priv->desc_ring[i].skb) { in cpmac_clear_tx()
721 dev_kfree_skb_any(priv->desc_ring[i].skb); in cpmac_clear_tx()
722 priv->desc_ring[i].skb = NULL; in cpmac_clear_tx()
729 struct cpmac_priv *priv = in cpmac_hw_error() local
732 spin_lock(&priv->rx_lock); in cpmac_hw_error()
733 cpmac_clear_rx(priv->dev); in cpmac_hw_error()
734 spin_unlock(&priv->rx_lock); in cpmac_hw_error()
735 cpmac_clear_tx(priv->dev); in cpmac_hw_error()
736 cpmac_hw_start(priv->dev); in cpmac_hw_error()
738 atomic_dec(&priv->reset_pending); in cpmac_hw_error()
740 netif_tx_wake_all_queues(priv->dev); in cpmac_hw_error()
741 cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3); in cpmac_hw_error()
746 struct cpmac_priv *priv = netdev_priv(dev); in cpmac_check_status() local
748 u32 macstatus = cpmac_read(priv->regs, CPMAC_MAC_STATUS); in cpmac_check_status()
755 if (netif_msg_drv(priv) && net_ratelimit()) { in cpmac_check_status()
771 if (schedule_work(&priv->reset_work)) in cpmac_check_status()
772 atomic_inc(&priv->reset_pending); in cpmac_check_status()
773 if (unlikely(netif_msg_hw(priv))) in cpmac_check_status()
776 cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff); in cpmac_check_status()
782 struct cpmac_priv *priv; in cpmac_irq() local
786 priv = netdev_priv(dev); in cpmac_irq()
788 status = cpmac_read(priv->regs, CPMAC_MAC_INT_VECTOR); in cpmac_irq()
790 if (unlikely(netif_msg_intr(priv))) in cpmac_irq()
798 if (napi_schedule_prep(&priv->napi)) { in cpmac_irq()
799 cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 1 << queue); in cpmac_irq()
800 __napi_schedule(&priv->napi); in cpmac_irq()
804 cpmac_write(priv->regs, CPMAC_MAC_EOI_VECTOR, 0); in cpmac_irq()
814 struct cpmac_priv *priv = netdev_priv(dev); in cpmac_tx_timeout() local
816 spin_lock(&priv->lock); in cpmac_tx_timeout()
818 spin_unlock(&priv->lock); in cpmac_tx_timeout()
819 if (netif_msg_tx_err(priv) && net_ratelimit()) in cpmac_tx_timeout()
822 atomic_inc(&priv->reset_pending); in cpmac_tx_timeout()
826 atomic_dec(&priv->reset_pending); in cpmac_tx_timeout()
828 netif_tx_wake_all_queues(priv->dev); in cpmac_tx_timeout()
833 struct cpmac_priv *priv = netdev_priv(dev); in cpmac_ioctl() local
837 if (!priv->phy) in cpmac_ioctl()
840 return phy_mii_ioctl(priv->phy, ifr, cmd); in cpmac_ioctl()
845 struct cpmac_priv *priv = netdev_priv(dev); in cpmac_get_settings() local
847 if (priv->phy) in cpmac_get_settings()
848 return phy_ethtool_gset(priv->phy, cmd); in cpmac_get_settings()
855 struct cpmac_priv *priv = netdev_priv(dev); in cpmac_set_settings() local
860 if (priv->phy) in cpmac_set_settings()
861 return phy_ethtool_sset(priv->phy, cmd); in cpmac_set_settings()
869 struct cpmac_priv *priv = netdev_priv(dev); in cpmac_get_ringparam() local
876 ring->rx_pending = priv->ring_size; in cpmac_get_ringparam()
885 struct cpmac_priv *priv = netdev_priv(dev); in cpmac_set_ringparam() local
889 priv->ring_size = ring->rx_pending; in cpmac_set_ringparam()
914 struct cpmac_priv *priv = netdev_priv(dev); in cpmac_adjust_link() local
917 spin_lock(&priv->lock); in cpmac_adjust_link()
918 if (priv->phy->link) { in cpmac_adjust_link()
920 if (priv->phy->duplex != priv->oldduplex) { in cpmac_adjust_link()
922 priv->oldduplex = priv->phy->duplex; in cpmac_adjust_link()
925 if (priv->phy->speed != priv->oldspeed) { in cpmac_adjust_link()
927 priv->oldspeed = priv->phy->speed; in cpmac_adjust_link()
930 if (!priv->oldlink) { in cpmac_adjust_link()
932 priv->oldlink = 1; in cpmac_adjust_link()
934 } else if (priv->oldlink) { in cpmac_adjust_link()
936 priv->oldlink = 0; in cpmac_adjust_link()
937 priv->oldspeed = 0; in cpmac_adjust_link()
938 priv->oldduplex = -1; in cpmac_adjust_link()
941 if (new_state && netif_msg_link(priv) && net_ratelimit()) in cpmac_adjust_link()
942 phy_print_status(priv->phy); in cpmac_adjust_link()
944 spin_unlock(&priv->lock); in cpmac_adjust_link()
950 struct cpmac_priv *priv = netdev_priv(dev); in cpmac_open() local
955 mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs"); in cpmac_open()
957 if (netif_msg_drv(priv)) in cpmac_open()
964 priv->regs = ioremap(mem->start, resource_size(mem)); in cpmac_open()
965 if (!priv->regs) { in cpmac_open()
966 if (netif_msg_drv(priv)) in cpmac_open()
973 size = priv->ring_size + CPMAC_QUEUES; in cpmac_open()
974 priv->desc_ring = dma_alloc_coherent(&dev->dev, in cpmac_open()
976 &priv->dma_ring, in cpmac_open()
978 if (!priv->desc_ring) { in cpmac_open()
984 priv->desc_ring[i].mapping = priv->dma_ring + sizeof(*desc) * i; in cpmac_open()
986 priv->rx_head = &priv->desc_ring[CPMAC_QUEUES]; in cpmac_open()
987 for (i = 0, desc = priv->rx_head; i < priv->ring_size; i++, desc++) { in cpmac_open()
1000 desc->next = &priv->rx_head[(i + 1) % priv->ring_size]; in cpmac_open()
1005 priv->rx_head->prev->hw_next = (u32)0; in cpmac_open()
1009 if (netif_msg_drv(priv)) in cpmac_open()
1015 atomic_set(&priv->reset_pending, 0); in cpmac_open()
1016 INIT_WORK(&priv->reset_work, cpmac_hw_error); in cpmac_open()
1019 napi_enable(&priv->napi); in cpmac_open()
1020 priv->phy->state = PHY_CHANGELINK; in cpmac_open()
1021 phy_start(priv->phy); in cpmac_open()
1027 for (i = 0; i < priv->ring_size; i++) { in cpmac_open()
1028 if (priv->rx_head[i].skb) { in cpmac_open()
1030 priv->rx_head[i].data_mapping, in cpmac_open()
1033 kfree_skb(priv->rx_head[i].skb); in cpmac_open()
1037 kfree(priv->desc_ring); in cpmac_open()
1038 iounmap(priv->regs); in cpmac_open()
1050 struct cpmac_priv *priv = netdev_priv(dev); in cpmac_stop() local
1055 cancel_work_sync(&priv->reset_work); in cpmac_stop()
1056 napi_disable(&priv->napi); in cpmac_stop()
1057 phy_stop(priv->phy); in cpmac_stop()
1062 cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0); in cpmac_stop()
1063 cpmac_write(priv->regs, CPMAC_RX_PTR(0), 0); in cpmac_stop()
1064 cpmac_write(priv->regs, CPMAC_MBP, 0); in cpmac_stop()
1067 iounmap(priv->regs); in cpmac_stop()
1068 mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs"); in cpmac_stop()
1070 priv->rx_head = &priv->desc_ring[CPMAC_QUEUES]; in cpmac_stop()
1071 for (i = 0; i < priv->ring_size; i++) { in cpmac_stop()
1072 if (priv->rx_head[i].skb) { in cpmac_stop()
1074 priv->rx_head[i].data_mapping, in cpmac_stop()
1077 kfree_skb(priv->rx_head[i].skb); in cpmac_stop()
1082 (CPMAC_QUEUES + priv->ring_size), in cpmac_stop()
1083 priv->desc_ring, priv->dma_ring); in cpmac_stop()
1107 struct cpmac_priv *priv; in cpmac_probe() local
1135 dev = alloc_etherdev_mq(sizeof(*priv), CPMAC_QUEUES); in cpmac_probe()
1140 priv = netdev_priv(dev); in cpmac_probe()
1142 priv->pdev = pdev; in cpmac_probe()
1154 netif_napi_add(dev, &priv->napi, cpmac_poll, 64); in cpmac_probe()
1156 spin_lock_init(&priv->lock); in cpmac_probe()
1157 spin_lock_init(&priv->rx_lock); in cpmac_probe()
1158 priv->dev = dev; in cpmac_probe()
1159 priv->ring_size = 64; in cpmac_probe()
1160 priv->msg_enable = netif_msg_init(debug_level, 0xff); in cpmac_probe()
1163 snprintf(priv->phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, in cpmac_probe()
1166 priv->phy = phy_connect(dev, priv->phy_name, cpmac_adjust_link, in cpmac_probe()
1169 if (IS_ERR(priv->phy)) { in cpmac_probe()
1170 if (netif_msg_drv(priv)) in cpmac_probe()
1173 rc = PTR_ERR(priv->phy); in cpmac_probe()
1183 if (netif_msg_probe(priv)) { in cpmac_probe()
1186 priv->phy_name, dev->dev_addr); in cpmac_probe()
1230 cpmac_mii->priv = ioremap(AR7_REGS_MDIO, 256); in cpmac_init()
1232 if (!cpmac_mii->priv) { in cpmac_init()
1248 mask = cpmac_read(cpmac_mii->priv, CPMAC_MDIO_ALIVE); in cpmac_init()
1278 iounmap(cpmac_mii->priv); in cpmac_init()
1290 iounmap(cpmac_mii->priv); in cpmac_exit()