Lines Matching refs:lp
405 struct i596_private *lp = netdev_priv(dev); in i596_display_data() local
406 struct i596_dma *dma = lp->dma; in i596_display_data()
424 cmd = lp->cmd_head; in i596_display_data()
433 rfd = lp->rfd_head; in i596_display_data()
443 } while (rfd != lp->rfd_head); in i596_display_data()
444 rbd = lp->rbd_head; in i596_display_data()
453 } while (rbd != lp->rbd_head); in i596_display_data()
458 #define virt_to_dma(lp, v) ((lp)->dma_addr + (dma_addr_t)((unsigned long)(v)-(unsigned long)((lp)->… argument
462 struct i596_private *lp = netdev_priv(dev); in init_rx_bufs() local
463 struct i596_dma *dma = lp->dma; in init_rx_bufs()
480 rbd->b_next = SWAP32(virt_to_dma(lp, rbd+1)); in init_rx_bufs()
481 rbd->b_addr = SWAP32(virt_to_dma(lp, rbd)); in init_rx_bufs()
487 lp->rbd_head = dma->rbds; in init_rx_bufs()
490 rbd->b_next = SWAP32(virt_to_dma(lp, dma->rbds)); in init_rx_bufs()
498 rfd->b_next = SWAP32(virt_to_dma(lp, rfd+1)); in init_rx_bufs()
501 lp->rfd_head = dma->rfds; in init_rx_bufs()
502 dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds)); in init_rx_bufs()
504 rfd->rbd = SWAP32(virt_to_dma(lp, lp->rbd_head)); in init_rx_bufs()
508 rfd->b_next = SWAP32(virt_to_dma(lp, dma->rfds)); in init_rx_bufs()
517 struct i596_private *lp = netdev_priv(dev); in remove_rx_bufs() local
521 for (i = 0, rbd = lp->dma->rbds; i < rx_ring_size; i++, rbd++) { in remove_rx_bufs()
534 struct i596_private *lp = netdev_priv(dev); in rebuild_rx_bufs() local
535 struct i596_dma *dma = lp->dma; in rebuild_rx_bufs()
545 lp->rfd_head = dma->rfds; in rebuild_rx_bufs()
546 dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds)); in rebuild_rx_bufs()
547 lp->rbd_head = dma->rbds; in rebuild_rx_bufs()
548 dma->rfds[0].rbd = SWAP32(virt_to_dma(lp, dma->rbds)); in rebuild_rx_bufs()
556 struct i596_private *lp = netdev_priv(dev); in init_i596_mem() local
557 struct i596_dma *dma = lp->dma; in init_i596_mem()
565 lp->last_cmd = jiffies; in init_i596_mem()
568 dma->scp.iscp = SWAP32(virt_to_dma(lp, &(dma->iscp))); in init_i596_mem()
569 dma->iscp.scb = SWAP32(virt_to_dma(lp, &(dma->scb))); in init_i596_mem()
571 lp->cmd_backlog = 0; in init_i596_mem()
573 lp->cmd_head = NULL; in init_i596_mem()
582 mpu_port(dev, PORT_ALTSCP, virt_to_dma(lp, &dma->scp)); in init_i596_mem()
619 spin_lock_irqsave (&lp->lock, flags); in init_i596_mem()
622 spin_unlock_irqrestore (&lp->lock, flags); in init_i596_mem()
627 dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds)); in init_i596_mem()
632 spin_unlock_irqrestore (&lp->lock, flags); in init_i596_mem()
650 struct i596_private *lp = netdev_priv(dev); in i596_rx() local
657 lp->rfd_head, lp->rbd_head)); in i596_rx()
660 rfd = lp->rfd_head; /* Ref next frame to check */ in i596_rx()
666 else if (rfd->rbd == lp->rbd_head->b_addr) { in i596_rx()
667 rbd = lp->rbd_head; in i596_rx()
766 lp->rbd_head = rbd->v_next; in i596_rx()
779 lp->dma->scb.rfd = rfd->b_next; in i596_rx()
780 lp->rfd_head = rfd->v_next; in i596_rx()
787 rfd = lp->rfd_head; in i596_rx()
797 static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp) in i596_cleanup_cmd() argument
801 while (lp->cmd_head != NULL) { in i596_cleanup_cmd()
802 ptr = lp->cmd_head; in i596_cleanup_cmd()
803 lp->cmd_head = ptr->v_next; in i596_cleanup_cmd()
804 lp->cmd_backlog--; in i596_cleanup_cmd()
832 wait_cmd(dev, lp->dma, 100, "i596_cleanup_cmd timed out"); in i596_cleanup_cmd()
833 lp->dma->scb.cmd = I596_NULL; in i596_cleanup_cmd()
834 DMA_WBACK(dev, &(lp->dma->scb), sizeof(struct i596_scb)); in i596_cleanup_cmd()
838 static inline void i596_reset(struct net_device *dev, struct i596_private *lp) in i596_reset() argument
844 spin_lock_irqsave (&lp->lock, flags); in i596_reset()
846 wait_cmd(dev, lp->dma, 100, "i596_reset timed out"); in i596_reset()
851 lp->dma->scb.command = SWAP16(CUC_ABORT | RX_ABORT); in i596_reset()
852 DMA_WBACK(dev, &(lp->dma->scb), sizeof(struct i596_scb)); in i596_reset()
856 wait_cmd(dev, lp->dma, 1000, "i596_reset 2 timed out"); in i596_reset()
857 spin_unlock_irqrestore (&lp->lock, flags); in i596_reset()
859 i596_cleanup_cmd(dev, lp); in i596_reset()
869 struct i596_private *lp = netdev_priv(dev); in i596_add_cmd() local
870 struct i596_dma *dma = lp->dma; in i596_add_cmd()
874 lp->cmd_head)); in i596_add_cmd()
882 spin_lock_irqsave (&lp->lock, flags); in i596_add_cmd()
884 if (lp->cmd_head != NULL) { in i596_add_cmd()
885 lp->cmd_tail->v_next = cmd; in i596_add_cmd()
886 lp->cmd_tail->b_next = SWAP32(virt_to_dma(lp, &cmd->status)); in i596_add_cmd()
887 DMA_WBACK(dev, lp->cmd_tail, sizeof(struct i596_cmd)); in i596_add_cmd()
889 lp->cmd_head = cmd; in i596_add_cmd()
891 dma->scb.cmd = SWAP32(virt_to_dma(lp, &cmd->status)); in i596_add_cmd()
896 lp->cmd_tail = cmd; in i596_add_cmd()
897 lp->cmd_backlog++; in i596_add_cmd()
899 spin_unlock_irqrestore (&lp->lock, flags); in i596_add_cmd()
901 if (lp->cmd_backlog > max_cmd_backlog) { in i596_add_cmd()
902 unsigned long tickssofar = jiffies - lp->last_cmd; in i596_add_cmd()
911 i596_reset(dev, lp); in i596_add_cmd()
940 struct i596_private *lp = netdev_priv(dev); in i596_tx_timeout() local
950 if (lp->last_restart == dev->stats.tx_packets) { in i596_tx_timeout()
953 i596_reset (dev, lp); in i596_tx_timeout()
957 lp->dma->scb.command = SWAP16(CUC_START | RX_START); in i596_tx_timeout()
958 DMA_WBACK_INV(dev, &(lp->dma->scb), sizeof(struct i596_scb)); in i596_tx_timeout()
960 lp->last_restart = dev->stats.tx_packets; in i596_tx_timeout()
970 struct i596_private *lp = netdev_priv(dev); in i596_start_xmit() local
987 tx_cmd = lp->dma->tx_cmds + lp->next_tx_cmd; in i596_start_xmit()
988 tbd = lp->dma->tbds + lp->next_tx_cmd; in i596_start_xmit()
998 if (++lp->next_tx_cmd == TX_RING_SIZE) in i596_start_xmit()
999 lp->next_tx_cmd = 0; in i596_start_xmit()
1000 tx_cmd->tbd = SWAP32(virt_to_dma(lp, tbd)); in i596_start_xmit()
1051 struct i596_private *lp = netdev_priv(dev); in i82596_probe() local
1067 sizeof(struct i596_dma), &lp->dma_addr, GFP_KERNEL); in i82596_probe()
1077 lp->dma = dma; in i82596_probe()
1082 spin_lock_init(&lp->lock); in i82596_probe()
1089 (void *)dma, lp->dma_addr); in i82596_probe()
1116 struct i596_private *lp; in i596_interrupt() local
1120 lp = netdev_priv(dev); in i596_interrupt()
1121 dma = lp->dma; in i596_interrupt()
1123 spin_lock (&lp->lock); in i596_interrupt()
1138 spin_unlock (&lp->lock); in i596_interrupt()
1156 while (lp->cmd_head != NULL) { in i596_interrupt()
1157 DMA_INV(dev, lp->cmd_head, sizeof(struct i596_cmd)); in i596_interrupt()
1158 if (!(lp->cmd_head->status & SWAP16(STAT_C))) in i596_interrupt()
1161 ptr = lp->cmd_head; in i596_interrupt()
1166 SWAP16(lp->cmd_head->status), in i596_interrupt()
1167 SWAP16(lp->cmd_head->command))); in i596_interrupt()
1168 lp->cmd_head = ptr->v_next; in i596_interrupt()
1169 lp->cmd_backlog--; in i596_interrupt()
1240 lp->last_cmd = jiffies; in i596_interrupt()
1247 ptr = lp->cmd_head; in i596_interrupt()
1248 while ((ptr != NULL) && (ptr != lp->cmd_tail)) { in i596_interrupt()
1256 if (lp->cmd_head != NULL) in i596_interrupt()
1258 dma->scb.cmd = SWAP32(virt_to_dma(lp, &lp->cmd_head->status)); in i596_interrupt()
1295 spin_unlock (&lp->lock); in i596_interrupt()
1301 struct i596_private *lp = netdev_priv(dev); in i596_close() local
1309 dev->name, SWAP16(lp->dma->scb.status))); in i596_close()
1311 spin_lock_irqsave(&lp->lock, flags); in i596_close()
1313 wait_cmd(dev, lp->dma, 100, "close1 timed out"); in i596_close()
1314 lp->dma->scb.command = SWAP16(CUC_ABORT | RX_ABORT); in i596_close()
1315 DMA_WBACK(dev, &lp->dma->scb, sizeof(struct i596_scb)); in i596_close()
1319 wait_cmd(dev, lp->dma, 100, "close2 timed out"); in i596_close()
1320 spin_unlock_irqrestore(&lp->lock, flags); in i596_close()
1322 i596_cleanup_cmd(dev, lp); in i596_close()
1336 struct i596_private *lp = netdev_priv(dev); in set_multicast_list() local
1337 struct i596_dma *dma = lp->dma; in set_multicast_list()