root/drivers/net/hippi/rrunner.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. rr_init_one
  2. rr_remove_one
  3. rr_issue_cmd
  4. rr_reset
  5. rr_read_eeprom
  6. rr_read_eeprom_word
  7. write_eeprom
  8. rr_init
  9. rr_init1
  10. rr_handle_event
  11. rx_int
  12. rr_interrupt
  13. rr_raz_tx
  14. rr_raz_rx
  15. rr_timer
  16. rr_open
  17. rr_dump
  18. rr_close
  19. rr_start_xmit
  20. rr_load_firmware
  21. rr_ioctl

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /*
   3  * rrunner.c: Linux driver for the Essential RoadRunner HIPPI board.
   4  *
   5  * Copyright (C) 1998-2002 by Jes Sorensen, <jes@wildopensource.com>.
   6  *
   7  * Thanks to Essential Communication for providing us with hardware
   8  * and very comprehensive documentation without which I would not have
   9  * been able to write this driver. A special thank you to John Gibbon
  10  * for sorting out the legal issues, with the NDA, allowing the code to
  11  * be released under the GPL.
  12  *
  13  * Thanks to Jayaram Bhat from ODS/Essential for fixing some of the
  14  * stupid bugs in my code.
  15  *
  16  * Softnet support and various other patches from Val Henson of
  17  * ODS/Essential.
  18  *
  19  * PCI DMA mapping code partly based on work by Francois Romieu.
  20  */
  21 
  22 
  23 #define DEBUG 1
  24 #define RX_DMA_SKBUFF 1
  25 #define PKT_COPY_THRESHOLD 512
  26 
  27 #include <linux/module.h>
  28 #include <linux/types.h>
  29 #include <linux/errno.h>
  30 #include <linux/ioport.h>
  31 #include <linux/pci.h>
  32 #include <linux/kernel.h>
  33 #include <linux/netdevice.h>
  34 #include <linux/hippidevice.h>
  35 #include <linux/skbuff.h>
  36 #include <linux/delay.h>
  37 #include <linux/mm.h>
  38 #include <linux/slab.h>
  39 #include <net/sock.h>
  40 
  41 #include <asm/cache.h>
  42 #include <asm/byteorder.h>
  43 #include <asm/io.h>
  44 #include <asm/irq.h>
  45 #include <linux/uaccess.h>
  46 
  47 #define rr_if_busy(dev)     netif_queue_stopped(dev)
  48 #define rr_if_running(dev)  netif_running(dev)
  49 
  50 #include "rrunner.h"
  51 
  52 #define RUN_AT(x) (jiffies + (x))
  53 
  54 
  55 MODULE_AUTHOR("Jes Sorensen <jes@wildopensource.com>");
  56 MODULE_DESCRIPTION("Essential RoadRunner HIPPI driver");
  57 MODULE_LICENSE("GPL");
  58 
  59 static const char version[] =
  60 "rrunner.c: v0.50 11/11/2002  Jes Sorensen (jes@wildopensource.com)\n";
  61 
  62 
  63 static const struct net_device_ops rr_netdev_ops = {
  64         .ndo_open               = rr_open,
  65         .ndo_stop               = rr_close,
  66         .ndo_do_ioctl           = rr_ioctl,
  67         .ndo_start_xmit         = rr_start_xmit,
  68         .ndo_set_mac_address    = hippi_mac_addr,
  69 };
  70 
  71 /*
  72  * Implementation notes:
  73  *
  74  * The DMA engine only allows for DMA within physical 64KB chunks of
  75  * memory. The current approach of the driver (and stack) is to use
  76  * linear blocks of memory for the skbuffs. However, as the data block
  77  * is always the first part of the skb and skbs are 2^n aligned so we
  78  * are guarantted to get the whole block within one 64KB align 64KB
  79  * chunk.
  80  *
  81  * On the long term, relying on being able to allocate 64KB linear
  82  * chunks of memory is not feasible and the skb handling code and the
  83  * stack will need to know about I/O vectors or something similar.
  84  */
  85 
  86 static int rr_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
  87 {
  88         struct net_device *dev;
  89         static int version_disp;
  90         u8 pci_latency;
  91         struct rr_private *rrpriv;
  92         void *tmpptr;
  93         dma_addr_t ring_dma;
  94         int ret = -ENOMEM;
  95 
  96         dev = alloc_hippi_dev(sizeof(struct rr_private));
  97         if (!dev)
  98                 goto out3;
  99 
 100         ret = pci_enable_device(pdev);
 101         if (ret) {
 102                 ret = -ENODEV;
 103                 goto out2;
 104         }
 105 
 106         rrpriv = netdev_priv(dev);
 107 
 108         SET_NETDEV_DEV(dev, &pdev->dev);
 109 
 110         ret = pci_request_regions(pdev, "rrunner");
 111         if (ret < 0)
 112                 goto out;
 113 
 114         pci_set_drvdata(pdev, dev);
 115 
 116         rrpriv->pci_dev = pdev;
 117 
 118         spin_lock_init(&rrpriv->lock);
 119 
 120         dev->netdev_ops = &rr_netdev_ops;
 121 
 122         /* display version info if adapter is found */
 123         if (!version_disp) {
 124                 /* set display flag to TRUE so that */
 125                 /* we only display this string ONCE */
 126                 version_disp = 1;
 127                 printk(version);
 128         }
 129 
 130         pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &pci_latency);
 131         if (pci_latency <= 0x58){
 132                 pci_latency = 0x58;
 133                 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, pci_latency);
 134         }
 135 
 136         pci_set_master(pdev);
 137 
 138         printk(KERN_INFO "%s: Essential RoadRunner serial HIPPI "
 139                "at 0x%llx, irq %i, PCI latency %i\n", dev->name,
 140                (unsigned long long)pci_resource_start(pdev, 0),
 141                pdev->irq, pci_latency);
 142 
 143         /*
 144          * Remap the MMIO regs into kernel space.
 145          */
 146         rrpriv->regs = pci_iomap(pdev, 0, 0x1000);
 147         if (!rrpriv->regs) {
 148                 printk(KERN_ERR "%s:  Unable to map I/O register, "
 149                         "RoadRunner will be disabled.\n", dev->name);
 150                 ret = -EIO;
 151                 goto out;
 152         }
 153 
 154         tmpptr = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
 155         rrpriv->tx_ring = tmpptr;
 156         rrpriv->tx_ring_dma = ring_dma;
 157 
 158         if (!tmpptr) {
 159                 ret = -ENOMEM;
 160                 goto out;
 161         }
 162 
 163         tmpptr = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
 164         rrpriv->rx_ring = tmpptr;
 165         rrpriv->rx_ring_dma = ring_dma;
 166 
 167         if (!tmpptr) {
 168                 ret = -ENOMEM;
 169                 goto out;
 170         }
 171 
 172         tmpptr = pci_alloc_consistent(pdev, EVT_RING_SIZE, &ring_dma);
 173         rrpriv->evt_ring = tmpptr;
 174         rrpriv->evt_ring_dma = ring_dma;
 175 
 176         if (!tmpptr) {
 177                 ret = -ENOMEM;
 178                 goto out;
 179         }
 180 
 181         /*
 182          * Don't access any register before this point!
 183          */
 184 #ifdef __BIG_ENDIAN
 185         writel(readl(&rrpriv->regs->HostCtrl) | NO_SWAP,
 186                 &rrpriv->regs->HostCtrl);
 187 #endif
 188         /*
 189          * Need to add a case for little-endian 64-bit hosts here.
 190          */
 191 
 192         rr_init(dev);
 193 
 194         ret = register_netdev(dev);
 195         if (ret)
 196                 goto out;
 197         return 0;
 198 
 199  out:
 200         if (rrpriv->evt_ring)
 201                 pci_free_consistent(pdev, EVT_RING_SIZE, rrpriv->evt_ring,
 202                                     rrpriv->evt_ring_dma);
 203         if (rrpriv->rx_ring)
 204                 pci_free_consistent(pdev, RX_TOTAL_SIZE, rrpriv->rx_ring,
 205                                     rrpriv->rx_ring_dma);
 206         if (rrpriv->tx_ring)
 207                 pci_free_consistent(pdev, TX_TOTAL_SIZE, rrpriv->tx_ring,
 208                                     rrpriv->tx_ring_dma);
 209         if (rrpriv->regs)
 210                 pci_iounmap(pdev, rrpriv->regs);
 211         if (pdev)
 212                 pci_release_regions(pdev);
 213  out2:
 214         free_netdev(dev);
 215  out3:
 216         return ret;
 217 }
 218 
 219 static void rr_remove_one(struct pci_dev *pdev)
 220 {
 221         struct net_device *dev = pci_get_drvdata(pdev);
 222         struct rr_private *rr = netdev_priv(dev);
 223 
 224         if (!(readl(&rr->regs->HostCtrl) & NIC_HALTED)) {
 225                 printk(KERN_ERR "%s: trying to unload running NIC\n",
 226                        dev->name);
 227                 writel(HALT_NIC, &rr->regs->HostCtrl);
 228         }
 229 
 230         unregister_netdev(dev);
 231         pci_free_consistent(pdev, EVT_RING_SIZE, rr->evt_ring,
 232                             rr->evt_ring_dma);
 233         pci_free_consistent(pdev, RX_TOTAL_SIZE, rr->rx_ring,
 234                             rr->rx_ring_dma);
 235         pci_free_consistent(pdev, TX_TOTAL_SIZE, rr->tx_ring,
 236                             rr->tx_ring_dma);
 237         pci_iounmap(pdev, rr->regs);
 238         pci_release_regions(pdev);
 239         pci_disable_device(pdev);
 240         free_netdev(dev);
 241 }
 242 
 243 
 244 /*
 245  * Commands are considered to be slow, thus there is no reason to
 246  * inline this.
 247  */
 248 static void rr_issue_cmd(struct rr_private *rrpriv, struct cmd *cmd)
 249 {
 250         struct rr_regs __iomem *regs;
 251         u32 idx;
 252 
 253         regs = rrpriv->regs;
 254         /*
 255          * This is temporary - it will go away in the final version.
 256          * We probably also want to make this function inline.
 257          */
 258         if (readl(&regs->HostCtrl) & NIC_HALTED){
 259                 printk("issuing command for halted NIC, code 0x%x, "
 260                        "HostCtrl %08x\n", cmd->code, readl(&regs->HostCtrl));
 261                 if (readl(&regs->Mode) & FATAL_ERR)
 262                         printk("error codes Fail1 %02x, Fail2 %02x\n",
 263                                readl(&regs->Fail1), readl(&regs->Fail2));
 264         }
 265 
 266         idx = rrpriv->info->cmd_ctrl.pi;
 267 
 268         writel(*(u32*)(cmd), &regs->CmdRing[idx]);
 269         wmb();
 270 
 271         idx = (idx - 1) % CMD_RING_ENTRIES;
 272         rrpriv->info->cmd_ctrl.pi = idx;
 273         wmb();
 274 
 275         if (readl(&regs->Mode) & FATAL_ERR)
 276                 printk("error code %02x\n", readl(&regs->Fail1));
 277 }
 278 
 279 
 280 /*
 281  * Reset the board in a sensible manner. The NIC is already halted
 282  * when we get here and a spin-lock is held.
 283  */
 284 static int rr_reset(struct net_device *dev)
 285 {
 286         struct rr_private *rrpriv;
 287         struct rr_regs __iomem *regs;
 288         u32 start_pc;
 289         int i;
 290 
 291         rrpriv = netdev_priv(dev);
 292         regs = rrpriv->regs;
 293 
 294         rr_load_firmware(dev);
 295 
 296         writel(0x01000000, &regs->TX_state);
 297         writel(0xff800000, &regs->RX_state);
 298         writel(0, &regs->AssistState);
 299         writel(CLEAR_INTA, &regs->LocalCtrl);
 300         writel(0x01, &regs->BrkPt);
 301         writel(0, &regs->Timer);
 302         writel(0, &regs->TimerRef);
 303         writel(RESET_DMA, &regs->DmaReadState);
 304         writel(RESET_DMA, &regs->DmaWriteState);
 305         writel(0, &regs->DmaWriteHostHi);
 306         writel(0, &regs->DmaWriteHostLo);
 307         writel(0, &regs->DmaReadHostHi);
 308         writel(0, &regs->DmaReadHostLo);
 309         writel(0, &regs->DmaReadLen);
 310         writel(0, &regs->DmaWriteLen);
 311         writel(0, &regs->DmaWriteLcl);
 312         writel(0, &regs->DmaWriteIPchecksum);
 313         writel(0, &regs->DmaReadLcl);
 314         writel(0, &regs->DmaReadIPchecksum);
 315         writel(0, &regs->PciState);
 316 #if (BITS_PER_LONG == 64) && defined __LITTLE_ENDIAN
 317         writel(SWAP_DATA | PTR64BIT | PTR_WD_SWAP, &regs->Mode);
 318 #elif (BITS_PER_LONG == 64)
 319         writel(SWAP_DATA | PTR64BIT | PTR_WD_NOSWAP, &regs->Mode);
 320 #else
 321         writel(SWAP_DATA | PTR32BIT | PTR_WD_NOSWAP, &regs->Mode);
 322 #endif
 323 
 324 #if 0
 325         /*
 326          * Don't worry, this is just black magic.
 327          */
 328         writel(0xdf000, &regs->RxBase);
 329         writel(0xdf000, &regs->RxPrd);
 330         writel(0xdf000, &regs->RxCon);
 331         writel(0xce000, &regs->TxBase);
 332         writel(0xce000, &regs->TxPrd);
 333         writel(0xce000, &regs->TxCon);
 334         writel(0, &regs->RxIndPro);
 335         writel(0, &regs->RxIndCon);
 336         writel(0, &regs->RxIndRef);
 337         writel(0, &regs->TxIndPro);
 338         writel(0, &regs->TxIndCon);
 339         writel(0, &regs->TxIndRef);
 340         writel(0xcc000, &regs->pad10[0]);
 341         writel(0, &regs->DrCmndPro);
 342         writel(0, &regs->DrCmndCon);
 343         writel(0, &regs->DwCmndPro);
 344         writel(0, &regs->DwCmndCon);
 345         writel(0, &regs->DwCmndRef);
 346         writel(0, &regs->DrDataPro);
 347         writel(0, &regs->DrDataCon);
 348         writel(0, &regs->DrDataRef);
 349         writel(0, &regs->DwDataPro);
 350         writel(0, &regs->DwDataCon);
 351         writel(0, &regs->DwDataRef);
 352 #endif
 353 
 354         writel(0xffffffff, &regs->MbEvent);
 355         writel(0, &regs->Event);
 356 
 357         writel(0, &regs->TxPi);
 358         writel(0, &regs->IpRxPi);
 359 
 360         writel(0, &regs->EvtCon);
 361         writel(0, &regs->EvtPrd);
 362 
 363         rrpriv->info->evt_ctrl.pi = 0;
 364 
 365         for (i = 0; i < CMD_RING_ENTRIES; i++)
 366                 writel(0, &regs->CmdRing[i]);
 367 
 368 /*
 369  * Why 32 ? is this not cache line size dependent?
 370  */
 371         writel(RBURST_64|WBURST_64, &regs->PciState);
 372         wmb();
 373 
 374         start_pc = rr_read_eeprom_word(rrpriv,
 375                         offsetof(struct eeprom, rncd_info.FwStart));
 376 
 377 #if (DEBUG > 1)
 378         printk("%s: Executing firmware at address 0x%06x\n",
 379                dev->name, start_pc);
 380 #endif
 381 
 382         writel(start_pc + 0x800, &regs->Pc);
 383         wmb();
 384         udelay(5);
 385 
 386         writel(start_pc, &regs->Pc);
 387         wmb();
 388 
 389         return 0;
 390 }
 391 
 392 
 393 /*
 394  * Read a string from the EEPROM.
 395  */
 396 static unsigned int rr_read_eeprom(struct rr_private *rrpriv,
 397                                 unsigned long offset,
 398                                 unsigned char *buf,
 399                                 unsigned long length)
 400 {
 401         struct rr_regs __iomem *regs = rrpriv->regs;
 402         u32 misc, io, host, i;
 403 
 404         io = readl(&regs->ExtIo);
 405         writel(0, &regs->ExtIo);
 406         misc = readl(&regs->LocalCtrl);
 407         writel(0, &regs->LocalCtrl);
 408         host = readl(&regs->HostCtrl);
 409         writel(host | HALT_NIC, &regs->HostCtrl);
 410         mb();
 411 
 412         for (i = 0; i < length; i++){
 413                 writel((EEPROM_BASE + ((offset+i) << 3)), &regs->WinBase);
 414                 mb();
 415                 buf[i] = (readl(&regs->WinData) >> 24) & 0xff;
 416                 mb();
 417         }
 418 
 419         writel(host, &regs->HostCtrl);
 420         writel(misc, &regs->LocalCtrl);
 421         writel(io, &regs->ExtIo);
 422         mb();
 423         return i;
 424 }
 425 
 426 
 427 /*
 428  * Shortcut to read one word (4 bytes) out of the EEPROM and convert
 429  * it to our CPU byte-order.
 430  */
 431 static u32 rr_read_eeprom_word(struct rr_private *rrpriv,
 432                             size_t offset)
 433 {
 434         __be32 word;
 435 
 436         if ((rr_read_eeprom(rrpriv, offset,
 437                             (unsigned char *)&word, 4) == 4))
 438                 return be32_to_cpu(word);
 439         return 0;
 440 }
 441 
 442 
 443 /*
 444  * Write a string to the EEPROM.
 445  *
 446  * This is only called when the firmware is not running.
 447  */
 448 static unsigned int write_eeprom(struct rr_private *rrpriv,
 449                                  unsigned long offset,
 450                                  unsigned char *buf,
 451                                  unsigned long length)
 452 {
 453         struct rr_regs __iomem *regs = rrpriv->regs;
 454         u32 misc, io, data, i, j, ready, error = 0;
 455 
 456         io = readl(&regs->ExtIo);
 457         writel(0, &regs->ExtIo);
 458         misc = readl(&regs->LocalCtrl);
 459         writel(ENABLE_EEPROM_WRITE, &regs->LocalCtrl);
 460         mb();
 461 
 462         for (i = 0; i < length; i++){
 463                 writel((EEPROM_BASE + ((offset+i) << 3)), &regs->WinBase);
 464                 mb();
 465                 data = buf[i] << 24;
 466                 /*
 467                  * Only try to write the data if it is not the same
 468                  * value already.
 469                  */
 470                 if ((readl(&regs->WinData) & 0xff000000) != data){
 471                         writel(data, &regs->WinData);
 472                         ready = 0;
 473                         j = 0;
 474                         mb();
 475                         while(!ready){
 476                                 udelay(20);
 477                                 if ((readl(&regs->WinData) & 0xff000000) ==
 478                                     data)
 479                                         ready = 1;
 480                                 mb();
 481                                 if (j++ > 5000){
 482                                         printk("data mismatch: %08x, "
 483                                                "WinData %08x\n", data,
 484                                                readl(&regs->WinData));
 485                                         ready = 1;
 486                                         error = 1;
 487                                 }
 488                         }
 489                 }
 490         }
 491 
 492         writel(misc, &regs->LocalCtrl);
 493         writel(io, &regs->ExtIo);
 494         mb();
 495 
 496         return error;
 497 }
 498 
 499 
 500 static int rr_init(struct net_device *dev)
 501 {
 502         struct rr_private *rrpriv;
 503         struct rr_regs __iomem *regs;
 504         u32 sram_size, rev;
 505 
 506         rrpriv = netdev_priv(dev);
 507         regs = rrpriv->regs;
 508 
 509         rev = readl(&regs->FwRev);
 510         rrpriv->fw_rev = rev;
 511         if (rev > 0x00020024)
 512                 printk("  Firmware revision: %i.%i.%i\n", (rev >> 16),
 513                        ((rev >> 8) & 0xff), (rev & 0xff));
 514         else if (rev >= 0x00020000) {
 515                 printk("  Firmware revision: %i.%i.%i (2.0.37 or "
 516                        "later is recommended)\n", (rev >> 16),
 517                        ((rev >> 8) & 0xff), (rev & 0xff));
 518         }else{
 519                 printk("  Firmware revision too old: %i.%i.%i, please "
 520                        "upgrade to 2.0.37 or later.\n",
 521                        (rev >> 16), ((rev >> 8) & 0xff), (rev & 0xff));
 522         }
 523 
 524 #if (DEBUG > 2)
 525         printk("  Maximum receive rings %i\n", readl(&regs->MaxRxRng));
 526 #endif
 527 
 528         /*
 529          * Read the hardware address from the eeprom.  The HW address
 530          * is not really necessary for HIPPI but awfully convenient.
 531          * The pointer arithmetic to put it in dev_addr is ugly, but
 532          * Donald Becker does it this way for the GigE version of this
 533          * card and it's shorter and more portable than any
 534          * other method I've seen.  -VAL
 535          */
 536 
 537         *(__be16 *)(dev->dev_addr) =
 538           htons(rr_read_eeprom_word(rrpriv, offsetof(struct eeprom, manf.BoardULA)));
 539         *(__be32 *)(dev->dev_addr+2) =
 540           htonl(rr_read_eeprom_word(rrpriv, offsetof(struct eeprom, manf.BoardULA[4])));
 541 
 542         printk("  MAC: %pM\n", dev->dev_addr);
 543 
 544         sram_size = rr_read_eeprom_word(rrpriv, 8);
 545         printk("  SRAM size 0x%06x\n", sram_size);
 546 
 547         return 0;
 548 }
 549 
 550 
 551 static int rr_init1(struct net_device *dev)
 552 {
 553         struct rr_private *rrpriv;
 554         struct rr_regs __iomem *regs;
 555         unsigned long myjif, flags;
 556         struct cmd cmd;
 557         u32 hostctrl;
 558         int ecode = 0;
 559         short i;
 560 
 561         rrpriv = netdev_priv(dev);
 562         regs = rrpriv->regs;
 563 
 564         spin_lock_irqsave(&rrpriv->lock, flags);
 565 
 566         hostctrl = readl(&regs->HostCtrl);
 567         writel(hostctrl | HALT_NIC | RR_CLEAR_INT, &regs->HostCtrl);
 568         wmb();
 569 
 570         if (hostctrl & PARITY_ERR){
 571                 printk("%s: Parity error halting NIC - this is serious!\n",
 572                        dev->name);
 573                 spin_unlock_irqrestore(&rrpriv->lock, flags);
 574                 ecode = -EFAULT;
 575                 goto error;
 576         }
 577 
 578         set_rxaddr(regs, rrpriv->rx_ctrl_dma);
 579         set_infoaddr(regs, rrpriv->info_dma);
 580 
 581         rrpriv->info->evt_ctrl.entry_size = sizeof(struct event);
 582         rrpriv->info->evt_ctrl.entries = EVT_RING_ENTRIES;
 583         rrpriv->info->evt_ctrl.mode = 0;
 584         rrpriv->info->evt_ctrl.pi = 0;
 585         set_rraddr(&rrpriv->info->evt_ctrl.rngptr, rrpriv->evt_ring_dma);
 586 
 587         rrpriv->info->cmd_ctrl.entry_size = sizeof(struct cmd);
 588         rrpriv->info->cmd_ctrl.entries = CMD_RING_ENTRIES;
 589         rrpriv->info->cmd_ctrl.mode = 0;
 590         rrpriv->info->cmd_ctrl.pi = 15;
 591 
 592         for (i = 0; i < CMD_RING_ENTRIES; i++) {
 593                 writel(0, &regs->CmdRing[i]);
 594         }
 595 
 596         for (i = 0; i < TX_RING_ENTRIES; i++) {
 597                 rrpriv->tx_ring[i].size = 0;
 598                 set_rraddr(&rrpriv->tx_ring[i].addr, 0);
 599                 rrpriv->tx_skbuff[i] = NULL;
 600         }
 601         rrpriv->info->tx_ctrl.entry_size = sizeof(struct tx_desc);
 602         rrpriv->info->tx_ctrl.entries = TX_RING_ENTRIES;
 603         rrpriv->info->tx_ctrl.mode = 0;
 604         rrpriv->info->tx_ctrl.pi = 0;
 605         set_rraddr(&rrpriv->info->tx_ctrl.rngptr, rrpriv->tx_ring_dma);
 606 
 607         /*
 608          * Set dirty_tx before we start receiving interrupts, otherwise
 609          * the interrupt handler might think it is supposed to process
 610          * tx ints before we are up and running, which may cause a null
 611          * pointer access in the int handler.
 612          */
 613         rrpriv->tx_full = 0;
 614         rrpriv->cur_rx = 0;
 615         rrpriv->dirty_rx = rrpriv->dirty_tx = 0;
 616 
 617         rr_reset(dev);
 618 
 619         /* Tuning values */
 620         writel(0x5000, &regs->ConRetry);
 621         writel(0x100, &regs->ConRetryTmr);
 622         writel(0x500000, &regs->ConTmout);
 623         writel(0x60, &regs->IntrTmr);
 624         writel(0x500000, &regs->TxDataMvTimeout);
 625         writel(0x200000, &regs->RxDataMvTimeout);
 626         writel(0x80, &regs->WriteDmaThresh);
 627         writel(0x80, &regs->ReadDmaThresh);
 628 
 629         rrpriv->fw_running = 0;
 630         wmb();
 631 
 632         hostctrl &= ~(HALT_NIC | INVALID_INST_B | PARITY_ERR);
 633         writel(hostctrl, &regs->HostCtrl);
 634         wmb();
 635 
 636         spin_unlock_irqrestore(&rrpriv->lock, flags);
 637 
 638         for (i = 0; i < RX_RING_ENTRIES; i++) {
 639                 struct sk_buff *skb;
 640                 dma_addr_t addr;
 641 
 642                 rrpriv->rx_ring[i].mode = 0;
 643                 skb = alloc_skb(dev->mtu + HIPPI_HLEN, GFP_ATOMIC);
 644                 if (!skb) {
 645                         printk(KERN_WARNING "%s: Unable to allocate memory "
 646                                "for receive ring - halting NIC\n", dev->name);
 647                         ecode = -ENOMEM;
 648                         goto error;
 649                 }
 650                 rrpriv->rx_skbuff[i] = skb;
 651                 addr = pci_map_single(rrpriv->pci_dev, skb->data,
 652                         dev->mtu + HIPPI_HLEN, PCI_DMA_FROMDEVICE);
 653                 /*
 654                  * Sanity test to see if we conflict with the DMA
 655                  * limitations of the Roadrunner.
 656                  */
 657                 if ((((unsigned long)skb->data) & 0xfff) > ~65320)
 658                         printk("skb alloc error\n");
 659 
 660                 set_rraddr(&rrpriv->rx_ring[i].addr, addr);
 661                 rrpriv->rx_ring[i].size = dev->mtu + HIPPI_HLEN;
 662         }
 663 
 664         rrpriv->rx_ctrl[4].entry_size = sizeof(struct rx_desc);
 665         rrpriv->rx_ctrl[4].entries = RX_RING_ENTRIES;
 666         rrpriv->rx_ctrl[4].mode = 8;
 667         rrpriv->rx_ctrl[4].pi = 0;
 668         wmb();
 669         set_rraddr(&rrpriv->rx_ctrl[4].rngptr, rrpriv->rx_ring_dma);
 670 
 671         udelay(1000);
 672 
 673         /*
 674          * Now start the FirmWare.
 675          */
 676         cmd.code = C_START_FW;
 677         cmd.ring = 0;
 678         cmd.index = 0;
 679 
 680         rr_issue_cmd(rrpriv, &cmd);
 681 
 682         /*
 683          * Give the FirmWare time to chew on the `get running' command.
 684          */
 685         myjif = jiffies + 5 * HZ;
 686         while (time_before(jiffies, myjif) && !rrpriv->fw_running)
 687                 cpu_relax();
 688 
 689         netif_start_queue(dev);
 690 
 691         return ecode;
 692 
 693  error:
 694         /*
 695          * We might have gotten here because we are out of memory,
 696          * make sure we release everything we allocated before failing
 697          */
 698         for (i = 0; i < RX_RING_ENTRIES; i++) {
 699                 struct sk_buff *skb = rrpriv->rx_skbuff[i];
 700 
 701                 if (skb) {
 702                         pci_unmap_single(rrpriv->pci_dev,
 703                                          rrpriv->rx_ring[i].addr.addrlo,
 704                                          dev->mtu + HIPPI_HLEN,
 705                                          PCI_DMA_FROMDEVICE);
 706                         rrpriv->rx_ring[i].size = 0;
 707                         set_rraddr(&rrpriv->rx_ring[i].addr, 0);
 708                         dev_kfree_skb(skb);
 709                         rrpriv->rx_skbuff[i] = NULL;
 710                 }
 711         }
 712         return ecode;
 713 }
 714 
 715 
 716 /*
 717  * All events are considered to be slow (RX/TX ints do not generate
 718  * events) and are handled here, outside the main interrupt handler,
 719  * to reduce the size of the handler.
 720  */
 721 static u32 rr_handle_event(struct net_device *dev, u32 prodidx, u32 eidx)
 722 {
 723         struct rr_private *rrpriv;
 724         struct rr_regs __iomem *regs;
 725         u32 tmp;
 726 
 727         rrpriv = netdev_priv(dev);
 728         regs = rrpriv->regs;
 729 
 730         while (prodidx != eidx){
 731                 switch (rrpriv->evt_ring[eidx].code){
 732                 case E_NIC_UP:
 733                         tmp = readl(&regs->FwRev);
 734                         printk(KERN_INFO "%s: Firmware revision %i.%i.%i "
 735                                "up and running\n", dev->name,
 736                                (tmp >> 16), ((tmp >> 8) & 0xff), (tmp & 0xff));
 737                         rrpriv->fw_running = 1;
 738                         writel(RX_RING_ENTRIES - 1, &regs->IpRxPi);
 739                         wmb();
 740                         break;
 741                 case E_LINK_ON:
 742                         printk(KERN_INFO "%s: Optical link ON\n", dev->name);
 743                         break;
 744                 case E_LINK_OFF:
 745                         printk(KERN_INFO "%s: Optical link OFF\n", dev->name);
 746                         break;
 747                 case E_RX_IDLE:
 748                         printk(KERN_WARNING "%s: RX data not moving\n",
 749                                dev->name);
 750                         goto drop;
 751                 case E_WATCHDOG:
 752                         printk(KERN_INFO "%s: The watchdog is here to see "
 753                                "us\n", dev->name);
 754                         break;
 755                 case E_INTERN_ERR:
 756                         printk(KERN_ERR "%s: HIPPI Internal NIC error\n",
 757                                dev->name);
 758                         writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
 759                                &regs->HostCtrl);
 760                         wmb();
 761                         break;
 762                 case E_HOST_ERR:
 763                         printk(KERN_ERR "%s: Host software error\n",
 764                                dev->name);
 765                         writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
 766                                &regs->HostCtrl);
 767                         wmb();
 768                         break;
 769                 /*
 770                  * TX events.
 771                  */
 772                 case E_CON_REJ:
 773                         printk(KERN_WARNING "%s: Connection rejected\n",
 774                                dev->name);
 775                         dev->stats.tx_aborted_errors++;
 776                         break;
 777                 case E_CON_TMOUT:
 778                         printk(KERN_WARNING "%s: Connection timeout\n",
 779                                dev->name);
 780                         break;
 781                 case E_DISC_ERR:
 782                         printk(KERN_WARNING "%s: HIPPI disconnect error\n",
 783                                dev->name);
 784                         dev->stats.tx_aborted_errors++;
 785                         break;
 786                 case E_INT_PRTY:
 787                         printk(KERN_ERR "%s: HIPPI Internal Parity error\n",
 788                                dev->name);
 789                         writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
 790                                &regs->HostCtrl);
 791                         wmb();
 792                         break;
 793                 case E_TX_IDLE:
 794                         printk(KERN_WARNING "%s: Transmitter idle\n",
 795                                dev->name);
 796                         break;
 797                 case E_TX_LINK_DROP:
 798                         printk(KERN_WARNING "%s: Link lost during transmit\n",
 799                                dev->name);
 800                         dev->stats.tx_aborted_errors++;
 801                         writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
 802                                &regs->HostCtrl);
 803                         wmb();
 804                         break;
 805                 case E_TX_INV_RNG:
 806                         printk(KERN_ERR "%s: Invalid send ring block\n",
 807                                dev->name);
 808                         writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
 809                                &regs->HostCtrl);
 810                         wmb();
 811                         break;
 812                 case E_TX_INV_BUF:
 813                         printk(KERN_ERR "%s: Invalid send buffer address\n",
 814                                dev->name);
 815                         writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
 816                                &regs->HostCtrl);
 817                         wmb();
 818                         break;
 819                 case E_TX_INV_DSC:
 820                         printk(KERN_ERR "%s: Invalid descriptor address\n",
 821                                dev->name);
 822                         writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
 823                                &regs->HostCtrl);
 824                         wmb();
 825                         break;
 826                 /*
 827                  * RX events.
 828                  */
 829                 case E_RX_RNG_OUT:
 830                         printk(KERN_INFO "%s: Receive ring full\n", dev->name);
 831                         break;
 832 
 833                 case E_RX_PAR_ERR:
 834                         printk(KERN_WARNING "%s: Receive parity error\n",
 835                                dev->name);
 836                         goto drop;
 837                 case E_RX_LLRC_ERR:
 838                         printk(KERN_WARNING "%s: Receive LLRC error\n",
 839                                dev->name);
 840                         goto drop;
 841                 case E_PKT_LN_ERR:
 842                         printk(KERN_WARNING "%s: Receive packet length "
 843                                "error\n", dev->name);
 844                         goto drop;
 845                 case E_DTA_CKSM_ERR:
 846                         printk(KERN_WARNING "%s: Data checksum error\n",
 847                                dev->name);
 848                         goto drop;
 849                 case E_SHT_BST:
 850                         printk(KERN_WARNING "%s: Unexpected short burst "
 851                                "error\n", dev->name);
 852                         goto drop;
 853                 case E_STATE_ERR:
 854                         printk(KERN_WARNING "%s: Recv. state transition"
 855                                " error\n", dev->name);
 856                         goto drop;
 857                 case E_UNEXP_DATA:
 858                         printk(KERN_WARNING "%s: Unexpected data error\n",
 859                                dev->name);
 860                         goto drop;
 861                 case E_LST_LNK_ERR:
 862                         printk(KERN_WARNING "%s: Link lost error\n",
 863                                dev->name);
 864                         goto drop;
 865                 case E_FRM_ERR:
 866                         printk(KERN_WARNING "%s: Framing Error\n",
 867                                dev->name);
 868                         goto drop;
 869                 case E_FLG_SYN_ERR:
 870                         printk(KERN_WARNING "%s: Flag sync. lost during "
 871                                "packet\n", dev->name);
 872                         goto drop;
 873                 case E_RX_INV_BUF:
 874                         printk(KERN_ERR "%s: Invalid receive buffer "
 875                                "address\n", dev->name);
 876                         writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
 877                                &regs->HostCtrl);
 878                         wmb();
 879                         break;
 880                 case E_RX_INV_DSC:
 881                         printk(KERN_ERR "%s: Invalid receive descriptor "
 882                                "address\n", dev->name);
 883                         writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
 884                                &regs->HostCtrl);
 885                         wmb();
 886                         break;
 887                 case E_RNG_BLK:
 888                         printk(KERN_ERR "%s: Invalid ring block\n",
 889                                dev->name);
 890                         writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
 891                                &regs->HostCtrl);
 892                         wmb();
 893                         break;
 894                 drop:
 895                         /* Label packet to be dropped.
 896                          * Actual dropping occurs in rx
 897                          * handling.
 898                          *
 899                          * The index of packet we get to drop is
 900                          * the index of the packet following
 901                          * the bad packet. -kbf
 902                          */
 903                         {
 904                                 u16 index = rrpriv->evt_ring[eidx].index;
 905                                 index = (index + (RX_RING_ENTRIES - 1)) %
 906                                         RX_RING_ENTRIES;
 907                                 rrpriv->rx_ring[index].mode |=
 908                                         (PACKET_BAD | PACKET_END);
 909                         }
 910                         break;
 911                 default:
 912                         printk(KERN_WARNING "%s: Unhandled event 0x%02x\n",
 913                                dev->name, rrpriv->evt_ring[eidx].code);
 914                 }
 915                 eidx = (eidx + 1) % EVT_RING_ENTRIES;
 916         }
 917 
 918         rrpriv->info->evt_ctrl.pi = eidx;
 919         wmb();
 920         return eidx;
 921 }
 922 
 923 
 924 static void rx_int(struct net_device *dev, u32 rxlimit, u32 index)
 925 {
 926         struct rr_private *rrpriv = netdev_priv(dev);
 927         struct rr_regs __iomem *regs = rrpriv->regs;
 928 
 929         do {
 930                 struct rx_desc *desc;
 931                 u32 pkt_len;
 932 
 933                 desc = &(rrpriv->rx_ring[index]);
 934                 pkt_len = desc->size;
 935 #if (DEBUG > 2)
 936                 printk("index %i, rxlimit %i\n", index, rxlimit);
 937                 printk("len %x, mode %x\n", pkt_len, desc->mode);
 938 #endif
 939                 if ( (rrpriv->rx_ring[index].mode & PACKET_BAD) == PACKET_BAD){
 940                         dev->stats.rx_dropped++;
 941                         goto defer;
 942                 }
 943 
 944                 if (pkt_len > 0){
 945                         struct sk_buff *skb, *rx_skb;
 946 
 947                         rx_skb = rrpriv->rx_skbuff[index];
 948 
 949                         if (pkt_len < PKT_COPY_THRESHOLD) {
 950                                 skb = alloc_skb(pkt_len, GFP_ATOMIC);
 951                                 if (skb == NULL){
 952                                         printk(KERN_WARNING "%s: Unable to allocate skb (%i bytes), deferring packet\n", dev->name, pkt_len);
 953                                         dev->stats.rx_dropped++;
 954                                         goto defer;
 955                                 } else {
 956                                         pci_dma_sync_single_for_cpu(rrpriv->pci_dev,
 957                                                                     desc->addr.addrlo,
 958                                                                     pkt_len,
 959                                                                     PCI_DMA_FROMDEVICE);
 960 
 961                                         skb_put_data(skb, rx_skb->data,
 962                                                      pkt_len);
 963 
 964                                         pci_dma_sync_single_for_device(rrpriv->pci_dev,
 965                                                                        desc->addr.addrlo,
 966                                                                        pkt_len,
 967                                                                        PCI_DMA_FROMDEVICE);
 968                                 }
 969                         }else{
 970                                 struct sk_buff *newskb;
 971 
 972                                 newskb = alloc_skb(dev->mtu + HIPPI_HLEN,
 973                                         GFP_ATOMIC);
 974                                 if (newskb){
 975                                         dma_addr_t addr;
 976 
 977                                         pci_unmap_single(rrpriv->pci_dev,
 978                                                 desc->addr.addrlo, dev->mtu +
 979                                                 HIPPI_HLEN, PCI_DMA_FROMDEVICE);
 980                                         skb = rx_skb;
 981                                         skb_put(skb, pkt_len);
 982                                         rrpriv->rx_skbuff[index] = newskb;
 983                                         addr = pci_map_single(rrpriv->pci_dev,
 984                                                 newskb->data,
 985                                                 dev->mtu + HIPPI_HLEN,
 986                                                 PCI_DMA_FROMDEVICE);
 987                                         set_rraddr(&desc->addr, addr);
 988                                 } else {
 989                                         printk("%s: Out of memory, deferring "
 990                                                "packet\n", dev->name);
 991                                         dev->stats.rx_dropped++;
 992                                         goto defer;
 993                                 }
 994                         }
 995                         skb->protocol = hippi_type_trans(skb, dev);
 996 
 997                         netif_rx(skb);          /* send it up */
 998 
 999                         dev->stats.rx_packets++;
1000                         dev->stats.rx_bytes += pkt_len;
1001                 }
1002         defer:
1003                 desc->mode = 0;
1004                 desc->size = dev->mtu + HIPPI_HLEN;
1005 
1006                 if ((index & 7) == 7)
1007                         writel(index, &regs->IpRxPi);
1008 
1009                 index = (index + 1) % RX_RING_ENTRIES;
1010         } while(index != rxlimit);
1011 
1012         rrpriv->cur_rx = index;
1013         wmb();
1014 }
1015 
1016 
1017 static irqreturn_t rr_interrupt(int irq, void *dev_id)
1018 {
1019         struct rr_private *rrpriv;
1020         struct rr_regs __iomem *regs;
1021         struct net_device *dev = (struct net_device *)dev_id;
1022         u32 prodidx, rxindex, eidx, txcsmr, rxlimit, txcon;
1023 
1024         rrpriv = netdev_priv(dev);
1025         regs = rrpriv->regs;
1026 
1027         if (!(readl(&regs->HostCtrl) & RR_INT))
1028                 return IRQ_NONE;
1029 
1030         spin_lock(&rrpriv->lock);
1031 
1032         prodidx = readl(&regs->EvtPrd);
1033         txcsmr = (prodidx >> 8) & 0xff;
1034         rxlimit = (prodidx >> 16) & 0xff;
1035         prodidx &= 0xff;
1036 
1037 #if (DEBUG > 2)
1038         printk("%s: interrupt, prodidx = %i, eidx = %i\n", dev->name,
1039                prodidx, rrpriv->info->evt_ctrl.pi);
1040 #endif
1041         /*
1042          * Order here is important.  We must handle events
1043          * before doing anything else in order to catch
1044          * such things as LLRC errors, etc -kbf
1045          */
1046 
1047         eidx = rrpriv->info->evt_ctrl.pi;
1048         if (prodidx != eidx)
1049                 eidx = rr_handle_event(dev, prodidx, eidx);
1050 
1051         rxindex = rrpriv->cur_rx;
1052         if (rxindex != rxlimit)
1053                 rx_int(dev, rxlimit, rxindex);
1054 
1055         txcon = rrpriv->dirty_tx;
1056         if (txcsmr != txcon) {
1057                 do {
1058                         /* Due to occational firmware TX producer/consumer out
1059                          * of sync. error need to check entry in ring -kbf
1060                          */
1061                         if(rrpriv->tx_skbuff[txcon]){
1062                                 struct tx_desc *desc;
1063                                 struct sk_buff *skb;
1064 
1065                                 desc = &(rrpriv->tx_ring[txcon]);
1066                                 skb = rrpriv->tx_skbuff[txcon];
1067 
1068                                 dev->stats.tx_packets++;
1069                                 dev->stats.tx_bytes += skb->len;
1070 
1071                                 pci_unmap_single(rrpriv->pci_dev,
1072                                                  desc->addr.addrlo, skb->len,
1073                                                  PCI_DMA_TODEVICE);
1074                                 dev_kfree_skb_irq(skb);
1075 
1076                                 rrpriv->tx_skbuff[txcon] = NULL;
1077                                 desc->size = 0;
1078                                 set_rraddr(&rrpriv->tx_ring[txcon].addr, 0);
1079                                 desc->mode = 0;
1080                         }
1081                         txcon = (txcon + 1) % TX_RING_ENTRIES;
1082                 } while (txcsmr != txcon);
1083                 wmb();
1084 
1085                 rrpriv->dirty_tx = txcon;
1086                 if (rrpriv->tx_full && rr_if_busy(dev) &&
1087                     (((rrpriv->info->tx_ctrl.pi + 1) % TX_RING_ENTRIES)
1088                      != rrpriv->dirty_tx)){
1089                         rrpriv->tx_full = 0;
1090                         netif_wake_queue(dev);
1091                 }
1092         }
1093 
1094         eidx |= ((txcsmr << 8) | (rxlimit << 16));
1095         writel(eidx, &regs->EvtCon);
1096         wmb();
1097 
1098         spin_unlock(&rrpriv->lock);
1099         return IRQ_HANDLED;
1100 }
1101 
1102 static inline void rr_raz_tx(struct rr_private *rrpriv,
1103                              struct net_device *dev)
1104 {
1105         int i;
1106 
1107         for (i = 0; i < TX_RING_ENTRIES; i++) {
1108                 struct sk_buff *skb = rrpriv->tx_skbuff[i];
1109 
1110                 if (skb) {
1111                         struct tx_desc *desc = &(rrpriv->tx_ring[i]);
1112 
1113                         pci_unmap_single(rrpriv->pci_dev, desc->addr.addrlo,
1114                                 skb->len, PCI_DMA_TODEVICE);
1115                         desc->size = 0;
1116                         set_rraddr(&desc->addr, 0);
1117                         dev_kfree_skb(skb);
1118                         rrpriv->tx_skbuff[i] = NULL;
1119                 }
1120         }
1121 }
1122 
1123 
1124 static inline void rr_raz_rx(struct rr_private *rrpriv,
1125                              struct net_device *dev)
1126 {
1127         int i;
1128 
1129         for (i = 0; i < RX_RING_ENTRIES; i++) {
1130                 struct sk_buff *skb = rrpriv->rx_skbuff[i];
1131 
1132                 if (skb) {
1133                         struct rx_desc *desc = &(rrpriv->rx_ring[i]);
1134 
1135                         pci_unmap_single(rrpriv->pci_dev, desc->addr.addrlo,
1136                                 dev->mtu + HIPPI_HLEN, PCI_DMA_FROMDEVICE);
1137                         desc->size = 0;
1138                         set_rraddr(&desc->addr, 0);
1139                         dev_kfree_skb(skb);
1140                         rrpriv->rx_skbuff[i] = NULL;
1141                 }
1142         }
1143 }
1144 
1145 static void rr_timer(struct timer_list *t)
1146 {
1147         struct rr_private *rrpriv = from_timer(rrpriv, t, timer);
1148         struct net_device *dev = pci_get_drvdata(rrpriv->pci_dev);
1149         struct rr_regs __iomem *regs = rrpriv->regs;
1150         unsigned long flags;
1151 
1152         if (readl(&regs->HostCtrl) & NIC_HALTED){
1153                 printk("%s: Restarting nic\n", dev->name);
1154                 memset(rrpriv->rx_ctrl, 0, 256 * sizeof(struct ring_ctrl));
1155                 memset(rrpriv->info, 0, sizeof(struct rr_info));
1156                 wmb();
1157 
1158                 rr_raz_tx(rrpriv, dev);
1159                 rr_raz_rx(rrpriv, dev);
1160 
1161                 if (rr_init1(dev)) {
1162                         spin_lock_irqsave(&rrpriv->lock, flags);
1163                         writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
1164                                &regs->HostCtrl);
1165                         spin_unlock_irqrestore(&rrpriv->lock, flags);
1166                 }
1167         }
1168         rrpriv->timer.expires = RUN_AT(5*HZ);
1169         add_timer(&rrpriv->timer);
1170 }
1171 
1172 
1173 static int rr_open(struct net_device *dev)
1174 {
1175         struct rr_private *rrpriv = netdev_priv(dev);
1176         struct pci_dev *pdev = rrpriv->pci_dev;
1177         struct rr_regs __iomem *regs;
1178         int ecode = 0;
1179         unsigned long flags;
1180         dma_addr_t dma_addr;
1181 
1182         regs = rrpriv->regs;
1183 
1184         if (rrpriv->fw_rev < 0x00020000) {
1185                 printk(KERN_WARNING "%s: trying to configure device with "
1186                        "obsolete firmware\n", dev->name);
1187                 ecode = -EBUSY;
1188                 goto error;
1189         }
1190 
1191         rrpriv->rx_ctrl = pci_alloc_consistent(pdev,
1192                                                256 * sizeof(struct ring_ctrl),
1193                                                &dma_addr);
1194         if (!rrpriv->rx_ctrl) {
1195                 ecode = -ENOMEM;
1196                 goto error;
1197         }
1198         rrpriv->rx_ctrl_dma = dma_addr;
1199 
1200         rrpriv->info = pci_alloc_consistent(pdev, sizeof(struct rr_info),
1201                                             &dma_addr);
1202         if (!rrpriv->info) {
1203                 ecode = -ENOMEM;
1204                 goto error;
1205         }
1206         rrpriv->info_dma = dma_addr;
1207         wmb();
1208 
1209         spin_lock_irqsave(&rrpriv->lock, flags);
1210         writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT, &regs->HostCtrl);
1211         readl(&regs->HostCtrl);
1212         spin_unlock_irqrestore(&rrpriv->lock, flags);
1213 
1214         if (request_irq(pdev->irq, rr_interrupt, IRQF_SHARED, dev->name, dev)) {
1215                 printk(KERN_WARNING "%s: Requested IRQ %d is busy\n",
1216                        dev->name, pdev->irq);
1217                 ecode = -EAGAIN;
1218                 goto error;
1219         }
1220 
1221         if ((ecode = rr_init1(dev)))
1222                 goto error;
1223 
1224         /* Set the timer to switch to check for link beat and perhaps switch
1225            to an alternate media type. */
1226         timer_setup(&rrpriv->timer, rr_timer, 0);
1227         rrpriv->timer.expires = RUN_AT(5*HZ);           /* 5 sec. watchdog */
1228         add_timer(&rrpriv->timer);
1229 
1230         netif_start_queue(dev);
1231 
1232         return ecode;
1233 
1234  error:
1235         spin_lock_irqsave(&rrpriv->lock, flags);
1236         writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT, &regs->HostCtrl);
1237         spin_unlock_irqrestore(&rrpriv->lock, flags);
1238 
1239         if (rrpriv->info) {
1240                 pci_free_consistent(pdev, sizeof(struct rr_info), rrpriv->info,
1241                                     rrpriv->info_dma);
1242                 rrpriv->info = NULL;
1243         }
1244         if (rrpriv->rx_ctrl) {
1245                 pci_free_consistent(pdev, sizeof(struct ring_ctrl),
1246                                     rrpriv->rx_ctrl, rrpriv->rx_ctrl_dma);
1247                 rrpriv->rx_ctrl = NULL;
1248         }
1249 
1250         netif_stop_queue(dev);
1251 
1252         return ecode;
1253 }
1254 
1255 
1256 static void rr_dump(struct net_device *dev)
1257 {
1258         struct rr_private *rrpriv;
1259         struct rr_regs __iomem *regs;
1260         u32 index, cons;
1261         short i;
1262         int len;
1263 
1264         rrpriv = netdev_priv(dev);
1265         regs = rrpriv->regs;
1266 
1267         printk("%s: dumping NIC TX rings\n", dev->name);
1268 
1269         printk("RxPrd %08x, TxPrd %02x, EvtPrd %08x, TxPi %02x, TxCtrlPi %02x\n",
1270                readl(&regs->RxPrd), readl(&regs->TxPrd),
1271                readl(&regs->EvtPrd), readl(&regs->TxPi),
1272                rrpriv->info->tx_ctrl.pi);
1273 
1274         printk("Error code 0x%x\n", readl(&regs->Fail1));
1275 
1276         index = (((readl(&regs->EvtPrd) >> 8) & 0xff) - 1) % TX_RING_ENTRIES;
1277         cons = rrpriv->dirty_tx;
1278         printk("TX ring index %i, TX consumer %i\n",
1279                index, cons);
1280 
1281         if (rrpriv->tx_skbuff[index]){
1282                 len = min_t(int, 0x80, rrpriv->tx_skbuff[index]->len);
1283                 printk("skbuff for index %i is valid - dumping data (0x%x bytes - DMA len 0x%x)\n", index, len, rrpriv->tx_ring[index].size);
1284                 for (i = 0; i < len; i++){
1285                         if (!(i & 7))
1286                                 printk("\n");
1287                         printk("%02x ", (unsigned char) rrpriv->tx_skbuff[index]->data[i]);
1288                 }
1289                 printk("\n");
1290         }
1291 
1292         if (rrpriv->tx_skbuff[cons]){
1293                 len = min_t(int, 0x80, rrpriv->tx_skbuff[cons]->len);
1294                 printk("skbuff for cons %i is valid - dumping data (0x%x bytes - skbuff len 0x%x)\n", cons, len, rrpriv->tx_skbuff[cons]->len);
1295                 printk("mode 0x%x, size 0x%x,\n phys %08Lx, skbuff-addr %p, truesize 0x%x\n",
1296                        rrpriv->tx_ring[cons].mode,
1297                        rrpriv->tx_ring[cons].size,
1298                        (unsigned long long) rrpriv->tx_ring[cons].addr.addrlo,
1299                        rrpriv->tx_skbuff[cons]->data,
1300                        (unsigned int)rrpriv->tx_skbuff[cons]->truesize);
1301                 for (i = 0; i < len; i++){
1302                         if (!(i & 7))
1303                                 printk("\n");
1304                         printk("%02x ", (unsigned char)rrpriv->tx_ring[cons].size);
1305                 }
1306                 printk("\n");
1307         }
1308 
1309         printk("dumping TX ring info:\n");
1310         for (i = 0; i < TX_RING_ENTRIES; i++)
1311                 printk("mode 0x%x, size 0x%x, phys-addr %08Lx\n",
1312                        rrpriv->tx_ring[i].mode,
1313                        rrpriv->tx_ring[i].size,
1314                        (unsigned long long) rrpriv->tx_ring[i].addr.addrlo);
1315 
1316 }
1317 
1318 
1319 static int rr_close(struct net_device *dev)
1320 {
1321         struct rr_private *rrpriv = netdev_priv(dev);
1322         struct rr_regs __iomem *regs = rrpriv->regs;
1323         struct pci_dev *pdev = rrpriv->pci_dev;
1324         unsigned long flags;
1325         u32 tmp;
1326         short i;
1327 
1328         netif_stop_queue(dev);
1329 
1330 
1331         /*
1332          * Lock to make sure we are not cleaning up while another CPU
1333          * is handling interrupts.
1334          */
1335         spin_lock_irqsave(&rrpriv->lock, flags);
1336 
1337         tmp = readl(&regs->HostCtrl);
1338         if (tmp & NIC_HALTED){
1339                 printk("%s: NIC already halted\n", dev->name);
1340                 rr_dump(dev);
1341         }else{
1342                 tmp |= HALT_NIC | RR_CLEAR_INT;
1343                 writel(tmp, &regs->HostCtrl);
1344                 readl(&regs->HostCtrl);
1345         }
1346 
1347         rrpriv->fw_running = 0;
1348 
1349         del_timer_sync(&rrpriv->timer);
1350 
1351         writel(0, &regs->TxPi);
1352         writel(0, &regs->IpRxPi);
1353 
1354         writel(0, &regs->EvtCon);
1355         writel(0, &regs->EvtPrd);
1356 
1357         for (i = 0; i < CMD_RING_ENTRIES; i++)
1358                 writel(0, &regs->CmdRing[i]);
1359 
1360         rrpriv->info->tx_ctrl.entries = 0;
1361         rrpriv->info->cmd_ctrl.pi = 0;
1362         rrpriv->info->evt_ctrl.pi = 0;
1363         rrpriv->rx_ctrl[4].entries = 0;
1364 
1365         rr_raz_tx(rrpriv, dev);
1366         rr_raz_rx(rrpriv, dev);
1367 
1368         pci_free_consistent(pdev, 256 * sizeof(struct ring_ctrl),
1369                             rrpriv->rx_ctrl, rrpriv->rx_ctrl_dma);
1370         rrpriv->rx_ctrl = NULL;
1371 
1372         pci_free_consistent(pdev, sizeof(struct rr_info), rrpriv->info,
1373                             rrpriv->info_dma);
1374         rrpriv->info = NULL;
1375 
1376         spin_unlock_irqrestore(&rrpriv->lock, flags);
1377         free_irq(pdev->irq, dev);
1378 
1379         return 0;
1380 }
1381 
1382 
1383 static netdev_tx_t rr_start_xmit(struct sk_buff *skb,
1384                                  struct net_device *dev)
1385 {
1386         struct rr_private *rrpriv = netdev_priv(dev);
1387         struct rr_regs __iomem *regs = rrpriv->regs;
1388         struct hippi_cb *hcb = (struct hippi_cb *) skb->cb;
1389         struct ring_ctrl *txctrl;
1390         unsigned long flags;
1391         u32 index, len = skb->len;
1392         u32 *ifield;
1393         struct sk_buff *new_skb;
1394 
1395         if (readl(&regs->Mode) & FATAL_ERR)
1396                 printk("error codes Fail1 %02x, Fail2 %02x\n",
1397                        readl(&regs->Fail1), readl(&regs->Fail2));
1398 
1399         /*
1400          * We probably need to deal with tbusy here to prevent overruns.
1401          */
1402 
1403         if (skb_headroom(skb) < 8){
1404                 printk("incoming skb too small - reallocating\n");
1405                 if (!(new_skb = dev_alloc_skb(len + 8))) {
1406                         dev_kfree_skb(skb);
1407                         netif_wake_queue(dev);
1408                         return NETDEV_TX_OK;
1409                 }
1410                 skb_reserve(new_skb, 8);
1411                 skb_put(new_skb, len);
1412                 skb_copy_from_linear_data(skb, new_skb->data, len);
1413                 dev_kfree_skb(skb);
1414                 skb = new_skb;
1415         }
1416 
1417         ifield = skb_push(skb, 8);
1418 
1419         ifield[0] = 0;
1420         ifield[1] = hcb->ifield;
1421 
1422         /*
1423          * We don't need the lock before we are actually going to start
1424          * fiddling with the control blocks.
1425          */
1426         spin_lock_irqsave(&rrpriv->lock, flags);
1427 
1428         txctrl = &rrpriv->info->tx_ctrl;
1429 
1430         index = txctrl->pi;
1431 
1432         rrpriv->tx_skbuff[index] = skb;
1433         set_rraddr(&rrpriv->tx_ring[index].addr, pci_map_single(
1434                 rrpriv->pci_dev, skb->data, len + 8, PCI_DMA_TODEVICE));
1435         rrpriv->tx_ring[index].size = len + 8; /* include IFIELD */
1436         rrpriv->tx_ring[index].mode = PACKET_START | PACKET_END;
1437         txctrl->pi = (index + 1) % TX_RING_ENTRIES;
1438         wmb();
1439         writel(txctrl->pi, &regs->TxPi);
1440 
1441         if (txctrl->pi == rrpriv->dirty_tx){
1442                 rrpriv->tx_full = 1;
1443                 netif_stop_queue(dev);
1444         }
1445 
1446         spin_unlock_irqrestore(&rrpriv->lock, flags);
1447 
1448         return NETDEV_TX_OK;
1449 }
1450 
1451 
1452 /*
1453  * Read the firmware out of the EEPROM and put it into the SRAM
1454  * (or from user space - later)
1455  *
1456  * This operation requires the NIC to be halted and is performed with
1457  * interrupts disabled and with the spinlock hold.
1458  */
1459 static int rr_load_firmware(struct net_device *dev)
1460 {
1461         struct rr_private *rrpriv;
1462         struct rr_regs __iomem *regs;
1463         size_t eptr, segptr;
1464         int i, j;
1465         u32 localctrl, sptr, len, tmp;
1466         u32 p2len, p2size, nr_seg, revision, io, sram_size;
1467 
1468         rrpriv = netdev_priv(dev);
1469         regs = rrpriv->regs;
1470 
1471         if (dev->flags & IFF_UP)
1472                 return -EBUSY;
1473 
1474         if (!(readl(&regs->HostCtrl) & NIC_HALTED)){
1475                 printk("%s: Trying to load firmware to a running NIC.\n",
1476                        dev->name);
1477                 return -EBUSY;
1478         }
1479 
1480         localctrl = readl(&regs->LocalCtrl);
1481         writel(0, &regs->LocalCtrl);
1482 
1483         writel(0, &regs->EvtPrd);
1484         writel(0, &regs->RxPrd);
1485         writel(0, &regs->TxPrd);
1486 
1487         /*
1488          * First wipe the entire SRAM, otherwise we might run into all
1489          * kinds of trouble ... sigh, this took almost all afternoon
1490          * to track down ;-(
1491          */
1492         io = readl(&regs->ExtIo);
1493         writel(0, &regs->ExtIo);
1494         sram_size = rr_read_eeprom_word(rrpriv, 8);
1495 
1496         for (i = 200; i < sram_size / 4; i++){
1497                 writel(i * 4, &regs->WinBase);
1498                 mb();
1499                 writel(0, &regs->WinData);
1500                 mb();
1501         }
1502         writel(io, &regs->ExtIo);
1503         mb();
1504 
1505         eptr = rr_read_eeprom_word(rrpriv,
1506                        offsetof(struct eeprom, rncd_info.AddrRunCodeSegs));
1507         eptr = ((eptr & 0x1fffff) >> 3);
1508 
1509         p2len = rr_read_eeprom_word(rrpriv, 0x83*4);
1510         p2len = (p2len << 2);
1511         p2size = rr_read_eeprom_word(rrpriv, 0x84*4);
1512         p2size = ((p2size & 0x1fffff) >> 3);
1513 
1514         if ((eptr < p2size) || (eptr > (p2size + p2len))){
1515                 printk("%s: eptr is invalid\n", dev->name);
1516                 goto out;
1517         }
1518 
1519         revision = rr_read_eeprom_word(rrpriv,
1520                         offsetof(struct eeprom, manf.HeaderFmt));
1521 
1522         if (revision != 1){
1523                 printk("%s: invalid firmware format (%i)\n",
1524                        dev->name, revision);
1525                 goto out;
1526         }
1527 
1528         nr_seg = rr_read_eeprom_word(rrpriv, eptr);
1529         eptr +=4;
1530 #if (DEBUG > 1)
1531         printk("%s: nr_seg %i\n", dev->name, nr_seg);
1532 #endif
1533 
1534         for (i = 0; i < nr_seg; i++){
1535                 sptr = rr_read_eeprom_word(rrpriv, eptr);
1536                 eptr += 4;
1537                 len = rr_read_eeprom_word(rrpriv, eptr);
1538                 eptr += 4;
1539                 segptr = rr_read_eeprom_word(rrpriv, eptr);
1540                 segptr = ((segptr & 0x1fffff) >> 3);
1541                 eptr += 4;
1542 #if (DEBUG > 1)
1543                 printk("%s: segment %i, sram address %06x, length %04x, segptr %06x\n",
1544                        dev->name, i, sptr, len, segptr);
1545 #endif
1546                 for (j = 0; j < len; j++){
1547                         tmp = rr_read_eeprom_word(rrpriv, segptr);
1548                         writel(sptr, &regs->WinBase);
1549                         mb();
1550                         writel(tmp, &regs->WinData);
1551                         mb();
1552                         segptr += 4;
1553                         sptr += 4;
1554                 }
1555         }
1556 
1557 out:
1558         writel(localctrl, &regs->LocalCtrl);
1559         mb();
1560         return 0;
1561 }
1562 
1563 
1564 static int rr_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1565 {
1566         struct rr_private *rrpriv;
1567         unsigned char *image, *oldimage;
1568         unsigned long flags;
1569         unsigned int i;
1570         int error = -EOPNOTSUPP;
1571 
1572         rrpriv = netdev_priv(dev);
1573 
1574         switch(cmd){
1575         case SIOCRRGFW:
1576                 if (!capable(CAP_SYS_RAWIO)){
1577                         return -EPERM;
1578                 }
1579 
1580                 image = kmalloc_array(EEPROM_WORDS, sizeof(u32), GFP_KERNEL);
1581                 if (!image)
1582                         return -ENOMEM;
1583 
1584                 if (rrpriv->fw_running){
1585                         printk("%s: Firmware already running\n", dev->name);
1586                         error = -EPERM;
1587                         goto gf_out;
1588                 }
1589 
1590                 spin_lock_irqsave(&rrpriv->lock, flags);
1591                 i = rr_read_eeprom(rrpriv, 0, image, EEPROM_BYTES);
1592                 spin_unlock_irqrestore(&rrpriv->lock, flags);
1593                 if (i != EEPROM_BYTES){
1594                         printk(KERN_ERR "%s: Error reading EEPROM\n",
1595                                dev->name);
1596                         error = -EFAULT;
1597                         goto gf_out;
1598                 }
1599                 error = copy_to_user(rq->ifr_data, image, EEPROM_BYTES);
1600                 if (error)
1601                         error = -EFAULT;
1602         gf_out:
1603                 kfree(image);
1604                 return error;
1605 
1606         case SIOCRRPFW:
1607                 if (!capable(CAP_SYS_RAWIO)){
1608                         return -EPERM;
1609                 }
1610 
1611                 image = memdup_user(rq->ifr_data, EEPROM_BYTES);
1612                 if (IS_ERR(image))
1613                         return PTR_ERR(image);
1614 
1615                 oldimage = kmalloc(EEPROM_BYTES, GFP_KERNEL);
1616                 if (!oldimage) {
1617                         kfree(image);
1618                         return -ENOMEM;
1619                 }
1620 
1621                 if (rrpriv->fw_running){
1622                         printk("%s: Firmware already running\n", dev->name);
1623                         error = -EPERM;
1624                         goto wf_out;
1625                 }
1626 
1627                 printk("%s: Updating EEPROM firmware\n", dev->name);
1628 
1629                 spin_lock_irqsave(&rrpriv->lock, flags);
1630                 error = write_eeprom(rrpriv, 0, image, EEPROM_BYTES);
1631                 if (error)
1632                         printk(KERN_ERR "%s: Error writing EEPROM\n",
1633                                dev->name);
1634 
1635                 i = rr_read_eeprom(rrpriv, 0, oldimage, EEPROM_BYTES);
1636                 spin_unlock_irqrestore(&rrpriv->lock, flags);
1637 
1638                 if (i != EEPROM_BYTES)
1639                         printk(KERN_ERR "%s: Error reading back EEPROM "
1640                                "image\n", dev->name);
1641 
1642                 error = memcmp(image, oldimage, EEPROM_BYTES);
1643                 if (error){
1644                         printk(KERN_ERR "%s: Error verifying EEPROM image\n",
1645                                dev->name);
1646                         error = -EFAULT;
1647                 }
1648         wf_out:
1649                 kfree(oldimage);
1650                 kfree(image);
1651                 return error;
1652 
1653         case SIOCRRID:
1654                 return put_user(0x52523032, (int __user *)rq->ifr_data);
1655         default:
1656                 return error;
1657         }
1658 }
1659 
1660 static const struct pci_device_id rr_pci_tbl[] = {
1661         { PCI_VENDOR_ID_ESSENTIAL, PCI_DEVICE_ID_ESSENTIAL_ROADRUNNER,
1662                 PCI_ANY_ID, PCI_ANY_ID, },
1663         { 0,}
1664 };
1665 MODULE_DEVICE_TABLE(pci, rr_pci_tbl);
1666 
1667 static struct pci_driver rr_driver = {
1668         .name           = "rrunner",
1669         .id_table       = rr_pci_tbl,
1670         .probe          = rr_init_one,
1671         .remove         = rr_remove_one,
1672 };
1673 
1674 module_pci_driver(rr_driver);

/* [<][>][^][v][top][bottom][index][help] */