Lines Matching refs:qdev
107 static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask) in ql_sem_trylock() argument
137 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n"); in ql_sem_trylock()
141 ql_write32(qdev, SEM, sem_bits | sem_mask); in ql_sem_trylock()
142 return !(ql_read32(qdev, SEM) & sem_bits); in ql_sem_trylock()
145 int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask) in ql_sem_spinlock() argument
149 if (!ql_sem_trylock(qdev, sem_mask)) in ql_sem_spinlock()
156 void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask) in ql_sem_unlock() argument
158 ql_write32(qdev, SEM, sem_mask); in ql_sem_unlock()
159 ql_read32(qdev, SEM); /* flush */ in ql_sem_unlock()
167 int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit) in ql_wait_reg_rdy() argument
173 temp = ql_read32(qdev, reg); in ql_wait_reg_rdy()
177 netif_alert(qdev, probe, qdev->ndev, in ql_wait_reg_rdy()
186 netif_alert(qdev, probe, qdev->ndev, in ql_wait_reg_rdy()
194 static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit) in ql_wait_cfg() argument
200 temp = ql_read32(qdev, CFG); in ql_wait_cfg()
215 int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit, in ql_write_cfg() argument
228 map = pci_map_single(qdev->pdev, ptr, size, direction); in ql_write_cfg()
229 if (pci_dma_mapping_error(qdev->pdev, map)) { in ql_write_cfg()
230 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n"); in ql_write_cfg()
234 status = ql_sem_spinlock(qdev, SEM_ICB_MASK); in ql_write_cfg()
238 status = ql_wait_cfg(qdev, bit); in ql_write_cfg()
240 netif_err(qdev, ifup, qdev->ndev, in ql_write_cfg()
245 ql_write32(qdev, ICB_L, (u32) map); in ql_write_cfg()
246 ql_write32(qdev, ICB_H, (u32) (map >> 32)); in ql_write_cfg()
250 ql_write32(qdev, CFG, (mask | value)); in ql_write_cfg()
255 status = ql_wait_cfg(qdev, bit); in ql_write_cfg()
257 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */ in ql_write_cfg()
258 pci_unmap_single(qdev->pdev, map, size, direction); in ql_write_cfg()
263 int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index, in ql_get_mac_addr_reg() argument
274 ql_wait_reg_rdy(qdev, in ql_get_mac_addr_reg()
278 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ in ql_get_mac_addr_reg()
282 ql_wait_reg_rdy(qdev, in ql_get_mac_addr_reg()
286 *value++ = ql_read32(qdev, MAC_ADDR_DATA); in ql_get_mac_addr_reg()
288 ql_wait_reg_rdy(qdev, in ql_get_mac_addr_reg()
292 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ in ql_get_mac_addr_reg()
296 ql_wait_reg_rdy(qdev, in ql_get_mac_addr_reg()
300 *value++ = ql_read32(qdev, MAC_ADDR_DATA); in ql_get_mac_addr_reg()
303 ql_wait_reg_rdy(qdev, in ql_get_mac_addr_reg()
307 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ in ql_get_mac_addr_reg()
311 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, in ql_get_mac_addr_reg()
315 *value++ = ql_read32(qdev, MAC_ADDR_DATA); in ql_get_mac_addr_reg()
322 netif_crit(qdev, ifup, qdev->ndev, in ql_get_mac_addr_reg()
333 static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type, in ql_set_mac_addr_reg() argument
347 ql_wait_reg_rdy(qdev, in ql_set_mac_addr_reg()
351 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | in ql_set_mac_addr_reg()
354 ql_write32(qdev, MAC_ADDR_DATA, lower); in ql_set_mac_addr_reg()
356 ql_wait_reg_rdy(qdev, in ql_set_mac_addr_reg()
360 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | in ql_set_mac_addr_reg()
364 ql_write32(qdev, MAC_ADDR_DATA, upper); in ql_set_mac_addr_reg()
366 ql_wait_reg_rdy(qdev, in ql_set_mac_addr_reg()
380 ql_wait_reg_rdy(qdev, in ql_set_mac_addr_reg()
384 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ in ql_set_mac_addr_reg()
387 ql_write32(qdev, MAC_ADDR_DATA, lower); in ql_set_mac_addr_reg()
389 ql_wait_reg_rdy(qdev, in ql_set_mac_addr_reg()
393 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ in ql_set_mac_addr_reg()
396 ql_write32(qdev, MAC_ADDR_DATA, upper); in ql_set_mac_addr_reg()
398 ql_wait_reg_rdy(qdev, in ql_set_mac_addr_reg()
402 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */ in ql_set_mac_addr_reg()
410 (qdev-> in ql_set_mac_addr_reg()
413 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) in ql_set_mac_addr_reg()
416 ql_write32(qdev, MAC_ADDR_DATA, cam_output); in ql_set_mac_addr_reg()
428 ql_wait_reg_rdy(qdev, in ql_set_mac_addr_reg()
432 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */ in ql_set_mac_addr_reg()
440 netif_crit(qdev, ifup, qdev->ndev, in ql_set_mac_addr_reg()
452 static int ql_set_mac_addr(struct ql_adapter *qdev, int set) in ql_set_mac_addr() argument
459 addr = &qdev->current_mac_addr[0]; in ql_set_mac_addr()
460 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, in ql_set_mac_addr()
465 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, in ql_set_mac_addr()
468 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); in ql_set_mac_addr()
471 status = ql_set_mac_addr_reg(qdev, (u8 *) addr, in ql_set_mac_addr()
472 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ); in ql_set_mac_addr()
473 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); in ql_set_mac_addr()
475 netif_err(qdev, ifup, qdev->ndev, in ql_set_mac_addr()
480 void ql_link_on(struct ql_adapter *qdev) in ql_link_on() argument
482 netif_err(qdev, link, qdev->ndev, "Link is up.\n"); in ql_link_on()
483 netif_carrier_on(qdev->ndev); in ql_link_on()
484 ql_set_mac_addr(qdev, 1); in ql_link_on()
487 void ql_link_off(struct ql_adapter *qdev) in ql_link_off() argument
489 netif_err(qdev, link, qdev->ndev, "Link is down.\n"); in ql_link_off()
490 netif_carrier_off(qdev->ndev); in ql_link_off()
491 ql_set_mac_addr(qdev, 0); in ql_link_off()
497 int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value) in ql_get_routing_reg() argument
501 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0); in ql_get_routing_reg()
505 ql_write32(qdev, RT_IDX, in ql_get_routing_reg()
507 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0); in ql_get_routing_reg()
510 *value = ql_read32(qdev, RT_DATA); in ql_get_routing_reg()
520 static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask, in ql_set_routing_reg() argument
600 netif_err(qdev, ifup, qdev->ndev, in ql_set_routing_reg()
607 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0); in ql_set_routing_reg()
611 ql_write32(qdev, RT_IDX, value); in ql_set_routing_reg()
612 ql_write32(qdev, RT_DATA, enable ? mask : 0); in ql_set_routing_reg()
618 static void ql_enable_interrupts(struct ql_adapter *qdev) in ql_enable_interrupts() argument
620 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI); in ql_enable_interrupts()
623 static void ql_disable_interrupts(struct ql_adapter *qdev) in ql_disable_interrupts() argument
625 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16)); in ql_disable_interrupts()
634 u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr) in ql_enable_completion_interrupt() argument
638 struct intr_context *ctx = qdev->intr_context + intr; in ql_enable_completion_interrupt()
640 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) { in ql_enable_completion_interrupt()
644 ql_write32(qdev, INTR_EN, in ql_enable_completion_interrupt()
646 var = ql_read32(qdev, STS); in ql_enable_completion_interrupt()
650 spin_lock_irqsave(&qdev->hw_lock, hw_flags); in ql_enable_completion_interrupt()
652 ql_write32(qdev, INTR_EN, in ql_enable_completion_interrupt()
654 var = ql_read32(qdev, STS); in ql_enable_completion_interrupt()
656 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); in ql_enable_completion_interrupt()
660 static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr) in ql_disable_completion_interrupt() argument
668 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) in ql_disable_completion_interrupt()
671 ctx = qdev->intr_context + intr; in ql_disable_completion_interrupt()
672 spin_lock(&qdev->hw_lock); in ql_disable_completion_interrupt()
674 ql_write32(qdev, INTR_EN, in ql_disable_completion_interrupt()
676 var = ql_read32(qdev, STS); in ql_disable_completion_interrupt()
679 spin_unlock(&qdev->hw_lock); in ql_disable_completion_interrupt()
683 static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev) in ql_enable_all_completion_interrupts() argument
686 for (i = 0; i < qdev->intr_count; i++) { in ql_enable_all_completion_interrupts()
691 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) || in ql_enable_all_completion_interrupts()
693 atomic_set(&qdev->intr_context[i].irq_cnt, 1); in ql_enable_all_completion_interrupts()
694 ql_enable_completion_interrupt(qdev, i); in ql_enable_all_completion_interrupts()
699 static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str) in ql_validate_flash() argument
703 __le16 *flash = (__le16 *)&qdev->flash; in ql_validate_flash()
705 status = strncmp((char *)&qdev->flash, str, 4); in ql_validate_flash()
707 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n"); in ql_validate_flash()
715 netif_err(qdev, ifup, qdev->ndev, in ql_validate_flash()
721 static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data) in ql_read_flash_word() argument
725 status = ql_wait_reg_rdy(qdev, in ql_read_flash_word()
730 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset); in ql_read_flash_word()
732 status = ql_wait_reg_rdy(qdev, in ql_read_flash_word()
740 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA)); in ql_read_flash_word()
745 static int ql_get_8000_flash_params(struct ql_adapter *qdev) in ql_get_8000_flash_params() argument
749 __le32 *p = (__le32 *)&qdev->flash; in ql_get_8000_flash_params()
756 if (!qdev->port) in ql_get_8000_flash_params()
761 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK)) in ql_get_8000_flash_params()
766 status = ql_read_flash_word(qdev, i+offset, p); in ql_get_8000_flash_params()
768 netif_err(qdev, ifup, qdev->ndev, in ql_get_8000_flash_params()
774 status = ql_validate_flash(qdev, in ql_get_8000_flash_params()
778 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n"); in ql_get_8000_flash_params()
786 if (qdev->flash.flash_params_8000.data_type1 == 2) in ql_get_8000_flash_params()
788 qdev->flash.flash_params_8000.mac_addr1, in ql_get_8000_flash_params()
789 qdev->ndev->addr_len); in ql_get_8000_flash_params()
792 qdev->flash.flash_params_8000.mac_addr, in ql_get_8000_flash_params()
793 qdev->ndev->addr_len); in ql_get_8000_flash_params()
796 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n"); in ql_get_8000_flash_params()
801 memcpy(qdev->ndev->dev_addr, in ql_get_8000_flash_params()
803 qdev->ndev->addr_len); in ql_get_8000_flash_params()
806 ql_sem_unlock(qdev, SEM_FLASH_MASK); in ql_get_8000_flash_params()
810 static int ql_get_8012_flash_params(struct ql_adapter *qdev) in ql_get_8012_flash_params() argument
814 __le32 *p = (__le32 *)&qdev->flash; in ql_get_8012_flash_params()
821 if (qdev->port) in ql_get_8012_flash_params()
824 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK)) in ql_get_8012_flash_params()
828 status = ql_read_flash_word(qdev, i+offset, p); in ql_get_8012_flash_params()
830 netif_err(qdev, ifup, qdev->ndev, in ql_get_8012_flash_params()
837 status = ql_validate_flash(qdev, in ql_get_8012_flash_params()
841 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n"); in ql_get_8012_flash_params()
846 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) { in ql_get_8012_flash_params()
851 memcpy(qdev->ndev->dev_addr, in ql_get_8012_flash_params()
852 qdev->flash.flash_params_8012.mac_addr, in ql_get_8012_flash_params()
853 qdev->ndev->addr_len); in ql_get_8012_flash_params()
856 ql_sem_unlock(qdev, SEM_FLASH_MASK); in ql_get_8012_flash_params()
864 static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data) in ql_write_xgmac_reg() argument
868 status = ql_wait_reg_rdy(qdev, in ql_write_xgmac_reg()
873 ql_write32(qdev, XGMAC_DATA, data); in ql_write_xgmac_reg()
875 ql_write32(qdev, XGMAC_ADDR, reg); in ql_write_xgmac_reg()
883 int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data) in ql_read_xgmac_reg() argument
887 status = ql_wait_reg_rdy(qdev, in ql_read_xgmac_reg()
892 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R); in ql_read_xgmac_reg()
894 status = ql_wait_reg_rdy(qdev, in ql_read_xgmac_reg()
899 *data = ql_read32(qdev, XGMAC_DATA); in ql_read_xgmac_reg()
905 int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data) in ql_read_xgmac_reg64() argument
911 status = ql_read_xgmac_reg(qdev, reg, &lo); in ql_read_xgmac_reg64()
915 status = ql_read_xgmac_reg(qdev, reg + 4, &hi); in ql_read_xgmac_reg64()
925 static int ql_8000_port_initialize(struct ql_adapter *qdev) in ql_8000_port_initialize() argument
932 status = ql_mb_about_fw(qdev); in ql_8000_port_initialize()
935 status = ql_mb_get_fw_state(qdev); in ql_8000_port_initialize()
939 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0); in ql_8000_port_initialize()
950 static int ql_8012_port_initialize(struct ql_adapter *qdev) in ql_8012_port_initialize() argument
955 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) { in ql_8012_port_initialize()
959 netif_info(qdev, link, qdev->ndev, in ql_8012_port_initialize()
961 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0); in ql_8012_port_initialize()
963 netif_crit(qdev, link, qdev->ndev, in ql_8012_port_initialize()
969 netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n"); in ql_8012_port_initialize()
971 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data); in ql_8012_port_initialize()
975 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data); in ql_8012_port_initialize()
984 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data); in ql_8012_port_initialize()
989 status = ql_read_xgmac_reg(qdev, TX_CFG, &data); in ql_8012_port_initialize()
994 status = ql_write_xgmac_reg(qdev, TX_CFG, data); in ql_8012_port_initialize()
999 status = ql_read_xgmac_reg(qdev, RX_CFG, &data); in ql_8012_port_initialize()
1004 status = ql_write_xgmac_reg(qdev, RX_CFG, data); in ql_8012_port_initialize()
1010 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16)); in ql_8012_port_initialize()
1014 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580); in ql_8012_port_initialize()
1019 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init)); in ql_8012_port_initialize()
1021 ql_sem_unlock(qdev, qdev->xg_sem_mask); in ql_8012_port_initialize()
1025 static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev) in ql_lbq_block_size() argument
1027 return PAGE_SIZE << qdev->lbq_buf_order; in ql_lbq_block_size()
1041 static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev, in ql_get_curr_lchunk() argument
1046 pci_dma_sync_single_for_cpu(qdev->pdev, in ql_get_curr_lchunk()
1055 == ql_lbq_block_size(qdev)) in ql_get_curr_lchunk()
1056 pci_unmap_page(qdev->pdev, in ql_get_curr_lchunk()
1058 ql_lbq_block_size(qdev), in ql_get_curr_lchunk()
1090 static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring, in ql_get_next_chunk() argument
1097 qdev->lbq_buf_order); in ql_get_next_chunk()
1099 netif_err(qdev, drv, qdev->ndev, in ql_get_next_chunk()
1104 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page, in ql_get_next_chunk()
1105 0, ql_lbq_block_size(qdev), in ql_get_next_chunk()
1107 if (pci_dma_mapping_error(qdev->pdev, map)) { in ql_get_next_chunk()
1109 qdev->lbq_buf_order); in ql_get_next_chunk()
1111 netif_err(qdev, drv, qdev->ndev, in ql_get_next_chunk()
1128 if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) { in ql_get_next_chunk()
1139 static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) in ql_update_lbq() argument
1149 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, in ql_update_lbq()
1153 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) { in ql_update_lbq()
1155 netif_err(qdev, ifup, qdev->ndev, in ql_update_lbq()
1168 pci_dma_sync_single_for_device(qdev->pdev, map, in ql_update_lbq()
1184 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, in ql_update_lbq()
1193 static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) in ql_update_sbq() argument
1204 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, in ql_update_sbq()
1208 netif_printk(qdev, rx_status, KERN_DEBUG, in ql_update_sbq()
1209 qdev->ndev, in ql_update_sbq()
1213 netdev_alloc_skb(qdev->ndev, in ql_update_sbq()
1220 map = pci_map_single(qdev->pdev, in ql_update_sbq()
1224 if (pci_dma_mapping_error(qdev->pdev, map)) { in ql_update_sbq()
1225 netif_err(qdev, ifup, qdev->ndev, in ql_update_sbq()
1250 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, in ql_update_sbq()
1258 static void ql_update_buffer_queues(struct ql_adapter *qdev, in ql_update_buffer_queues() argument
1261 ql_update_sbq(qdev, rx_ring); in ql_update_buffer_queues()
1262 ql_update_lbq(qdev, rx_ring); in ql_update_buffer_queues()
1268 static void ql_unmap_send(struct ql_adapter *qdev, in ql_unmap_send() argument
1284 netif_printk(qdev, tx_done, KERN_DEBUG, in ql_unmap_send()
1285 qdev->ndev, in ql_unmap_send()
1288 pci_unmap_single(qdev->pdev, in ql_unmap_send()
1295 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev, in ql_unmap_send()
1297 pci_unmap_page(qdev->pdev, in ql_unmap_send()
1310 static int ql_map_send(struct ql_adapter *qdev, in ql_map_send() argument
1321 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev, in ql_map_send()
1327 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE); in ql_map_send()
1329 err = pci_dma_mapping_error(qdev->pdev, map); in ql_map_send()
1331 netif_err(qdev, tx_queued, qdev->ndev, in ql_map_send()
1373 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal, in ql_map_send()
1376 err = pci_dma_mapping_error(qdev->pdev, map); in ql_map_send()
1378 netif_err(qdev, tx_queued, qdev->ndev, in ql_map_send()
1401 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag), in ql_map_send()
1404 err = dma_mapping_error(&qdev->pdev->dev, map); in ql_map_send()
1406 netif_err(qdev, tx_queued, qdev->ndev, in ql_map_send()
1432 ql_unmap_send(qdev, tx_ring_desc, map_idx); in ql_map_send()
1437 static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err, in ql_categorize_rx_err() argument
1440 struct nic_stats *stats = &qdev->nic_stats; in ql_categorize_rx_err()
1472 static void ql_update_mac_hdr_len(struct ql_adapter *qdev, in ql_update_mac_hdr_len() argument
1478 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) in ql_update_mac_hdr_len()
1492 static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev, in ql_process_mac_rx_gro_page() argument
1499 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); in ql_process_mac_rx_gro_page()
1504 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring); in ql_process_mac_rx_gro_page()
1508 napi->dev = qdev->ndev; in ql_process_mac_rx_gro_page()
1512 netif_err(qdev, drv, qdev->ndev, in ql_process_mac_rx_gro_page()
1539 static void ql_process_mac_rx_page(struct ql_adapter *qdev, in ql_process_mac_rx_page() argument
1545 struct net_device *ndev = qdev->ndev; in ql_process_mac_rx_page()
1548 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); in ql_process_mac_rx_page()
1564 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring); in ql_process_mac_rx_page()
1569 ql_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen); in ql_process_mac_rx_page()
1575 netif_err(qdev, drv, qdev->ndev, in ql_process_mac_rx_page()
1581 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, in ql_process_mac_rx_page()
1600 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, in ql_process_mac_rx_page()
1611 netif_printk(qdev, rx_status, KERN_DEBUG, in ql_process_mac_rx_page()
1612 qdev->ndev, in ql_process_mac_rx_page()
1632 static void ql_process_mac_rx_skb(struct ql_adapter *qdev, in ql_process_mac_rx_skb() argument
1638 struct net_device *ndev = qdev->ndev; in ql_process_mac_rx_skb()
1645 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN); in ql_process_mac_rx_skb()
1656 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring); in ql_process_mac_rx_skb()
1662 if (test_bit(QL_SELFTEST, &qdev->flags)) { in ql_process_mac_rx_skb()
1663 ql_check_lb_frame(qdev, skb); in ql_process_mac_rx_skb()
1679 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, in ql_process_mac_rx_skb()
1689 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, in ql_process_mac_rx_skb()
1704 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, in ql_process_mac_rx_skb()
1714 netif_printk(qdev, rx_status, KERN_DEBUG, in ql_process_mac_rx_skb()
1715 qdev->ndev, in ql_process_mac_rx_skb()
1749 static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, in ql_build_rx_skb() argument
1765 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, in ql_build_rx_skb()
1771 pci_unmap_single(qdev->pdev, in ql_build_rx_skb()
1785 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, in ql_build_rx_skb()
1792 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, in ql_build_rx_skb()
1803 pci_dma_sync_single_for_cpu(qdev->pdev, in ql_build_rx_skb()
1811 pci_dma_sync_single_for_device(qdev->pdev, in ql_build_rx_skb()
1820 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, in ql_build_rx_skb()
1827 pci_unmap_single(qdev->pdev, in ql_build_rx_skb()
1837 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, in ql_build_rx_skb()
1845 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); in ql_build_rx_skb()
1846 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, in ql_build_rx_skb()
1861 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); in ql_build_rx_skb()
1862 skb = netdev_alloc_skb(qdev->ndev, length); in ql_build_rx_skb()
1864 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev, in ql_build_rx_skb()
1868 pci_unmap_page(qdev->pdev, in ql_build_rx_skb()
1874 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, in ql_build_rx_skb()
1885 ql_update_mac_hdr_len(qdev, ib_mac_rsp, in ql_build_rx_skb()
1904 pci_unmap_single(qdev->pdev, in ql_build_rx_skb()
1918 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, in ql_build_rx_skb()
1926 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); in ql_build_rx_skb()
1930 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, in ql_build_rx_skb()
1943 ql_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va, in ql_build_rx_skb()
1951 static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev, in ql_process_mac_split_rx_intr() argument
1956 struct net_device *ndev = qdev->ndev; in ql_process_mac_split_rx_intr()
1961 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp); in ql_process_mac_split_rx_intr()
1963 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, in ql_process_mac_split_rx_intr()
1971 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring); in ql_process_mac_split_rx_intr()
1986 if (test_bit(QL_SELFTEST, &qdev->flags)) { in ql_process_mac_split_rx_intr()
1987 ql_check_lb_frame(qdev, skb); in ql_process_mac_split_rx_intr()
1994 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n", in ql_process_mac_split_rx_intr()
2004 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, in ql_process_mac_split_rx_intr()
2018 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, in ql_process_mac_split_rx_intr()
2028 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, in ql_process_mac_split_rx_intr()
2046 static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev, in ql_process_mac_rx_intr() argument
2052 (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)) ? in ql_process_mac_rx_intr()
2062 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp, in ql_process_mac_rx_intr()
2069 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp, in ql_process_mac_rx_intr()
2077 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp, in ql_process_mac_rx_intr()
2083 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp, in ql_process_mac_rx_intr()
2089 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp, in ql_process_mac_rx_intr()
2097 static void ql_process_mac_tx_intr(struct ql_adapter *qdev, in ql_process_mac_tx_intr() argument
2104 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx]; in ql_process_mac_tx_intr()
2106 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt); in ql_process_mac_tx_intr()
2117 netif_warn(qdev, tx_done, qdev->ndev, in ql_process_mac_tx_intr()
2121 netif_warn(qdev, tx_done, qdev->ndev, in ql_process_mac_tx_intr()
2125 netif_warn(qdev, tx_done, qdev->ndev, in ql_process_mac_tx_intr()
2129 netif_warn(qdev, tx_done, qdev->ndev, in ql_process_mac_tx_intr()
2137 void ql_queue_fw_error(struct ql_adapter *qdev) in ql_queue_fw_error() argument
2139 ql_link_off(qdev); in ql_queue_fw_error()
2140 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0); in ql_queue_fw_error()
2143 void ql_queue_asic_error(struct ql_adapter *qdev) in ql_queue_asic_error() argument
2145 ql_link_off(qdev); in ql_queue_asic_error()
2146 ql_disable_interrupts(qdev); in ql_queue_asic_error()
2151 clear_bit(QL_ADAPTER_UP, &qdev->flags); in ql_queue_asic_error()
2155 set_bit(QL_ASIC_RECOVERY, &qdev->flags); in ql_queue_asic_error()
2156 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0); in ql_queue_asic_error()
2159 static void ql_process_chip_ae_intr(struct ql_adapter *qdev, in ql_process_chip_ae_intr() argument
2164 netif_err(qdev, rx_err, qdev->ndev, in ql_process_chip_ae_intr()
2166 ql_queue_fw_error(qdev); in ql_process_chip_ae_intr()
2170 netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n"); in ql_process_chip_ae_intr()
2171 netdev_err(qdev->ndev, "This event shouldn't occur.\n"); in ql_process_chip_ae_intr()
2172 ql_queue_asic_error(qdev); in ql_process_chip_ae_intr()
2176 netdev_err(qdev->ndev, "Soft ECC error detected.\n"); in ql_process_chip_ae_intr()
2177 ql_queue_asic_error(qdev); in ql_process_chip_ae_intr()
2181 netdev_err(qdev->ndev, "PCI error occurred when reading " in ql_process_chip_ae_intr()
2184 ql_queue_asic_error(qdev); in ql_process_chip_ae_intr()
2188 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n", in ql_process_chip_ae_intr()
2190 ql_queue_asic_error(qdev); in ql_process_chip_ae_intr()
2197 struct ql_adapter *qdev = rx_ring->qdev; in ql_clean_outbound_rx_ring() local
2206 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, in ql_clean_outbound_rx_ring()
2216 ql_process_mac_tx_intr(qdev, net_rsp); in ql_clean_outbound_rx_ring()
2219 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, in ql_clean_outbound_rx_ring()
2230 tx_ring = &qdev->tx_ring[net_rsp->txq_idx]; in ql_clean_outbound_rx_ring()
2231 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) { in ql_clean_outbound_rx_ring()
2237 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id); in ql_clean_outbound_rx_ring()
2245 struct ql_adapter *qdev = rx_ring->qdev; in ql_clean_inbound_rx_ring() local
2253 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, in ql_clean_inbound_rx_ring()
2261 ql_process_mac_rx_intr(qdev, rx_ring, in ql_clean_inbound_rx_ring()
2267 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *) in ql_clean_inbound_rx_ring()
2271 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, in ql_clean_inbound_rx_ring()
2282 ql_update_buffer_queues(qdev, rx_ring); in ql_clean_inbound_rx_ring()
2290 struct ql_adapter *qdev = rx_ring->qdev; in ql_napi_poll_msix() local
2293 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id]; in ql_napi_poll_msix()
2295 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, in ql_napi_poll_msix()
2300 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) { in ql_napi_poll_msix()
2301 trx_ring = &qdev->rx_ring[i]; in ql_napi_poll_msix()
2308 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev, in ql_napi_poll_msix()
2320 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev, in ql_napi_poll_msix()
2328 ql_enable_completion_interrupt(qdev, rx_ring->irq); in ql_napi_poll_msix()
2335 struct ql_adapter *qdev = netdev_priv(ndev); in qlge_vlan_mode() local
2338 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK | in qlge_vlan_mode()
2341 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK); in qlge_vlan_mode()
2352 struct ql_adapter *qdev = netdev_priv(ndev); in qlge_update_hw_vlan_features() local
2357 status = ql_adapter_down(qdev); in qlge_update_hw_vlan_features()
2359 netif_err(qdev, link, qdev->ndev, in qlge_update_hw_vlan_features()
2369 status = ql_adapter_up(qdev); in qlge_update_hw_vlan_features()
2371 netif_err(qdev, link, qdev->ndev, in qlge_update_hw_vlan_features()
2404 static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid) in __qlge_vlan_rx_add_vid() argument
2409 err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit, in __qlge_vlan_rx_add_vid()
2412 netif_err(qdev, ifup, qdev->ndev, in __qlge_vlan_rx_add_vid()
2419 struct ql_adapter *qdev = netdev_priv(ndev); in qlge_vlan_rx_add_vid() local
2423 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); in qlge_vlan_rx_add_vid()
2427 err = __qlge_vlan_rx_add_vid(qdev, vid); in qlge_vlan_rx_add_vid()
2428 set_bit(vid, qdev->active_vlans); in qlge_vlan_rx_add_vid()
2430 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); in qlge_vlan_rx_add_vid()
2435 static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid) in __qlge_vlan_rx_kill_vid() argument
2440 err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit, in __qlge_vlan_rx_kill_vid()
2443 netif_err(qdev, ifup, qdev->ndev, in __qlge_vlan_rx_kill_vid()
2450 struct ql_adapter *qdev = netdev_priv(ndev); in qlge_vlan_rx_kill_vid() local
2454 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); in qlge_vlan_rx_kill_vid()
2458 err = __qlge_vlan_rx_kill_vid(qdev, vid); in qlge_vlan_rx_kill_vid()
2459 clear_bit(vid, qdev->active_vlans); in qlge_vlan_rx_kill_vid()
2461 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); in qlge_vlan_rx_kill_vid()
2466 static void qlge_restore_vlan(struct ql_adapter *qdev) in qlge_restore_vlan() argument
2471 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); in qlge_restore_vlan()
2475 for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID) in qlge_restore_vlan()
2476 __qlge_vlan_rx_add_vid(qdev, vid); in qlge_restore_vlan()
2478 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); in qlge_restore_vlan()
2497 struct ql_adapter *qdev = rx_ring->qdev; in qlge_isr() local
2498 struct intr_context *intr_context = &qdev->intr_context[0]; in qlge_isr()
2502 spin_lock(&qdev->hw_lock); in qlge_isr()
2503 if (atomic_read(&qdev->intr_context[0].irq_cnt)) { in qlge_isr()
2504 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev, in qlge_isr()
2506 spin_unlock(&qdev->hw_lock); in qlge_isr()
2509 spin_unlock(&qdev->hw_lock); in qlge_isr()
2511 var = ql_disable_completion_interrupt(qdev, intr_context->intr); in qlge_isr()
2517 ql_queue_asic_error(qdev); in qlge_isr()
2518 netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var); in qlge_isr()
2519 var = ql_read32(qdev, ERR_STS); in qlge_isr()
2520 netdev_err(qdev->ndev, "Resetting chip. " in qlge_isr()
2529 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) { in qlge_isr()
2534 netif_err(qdev, intr, qdev->ndev, in qlge_isr()
2536 ql_disable_completion_interrupt(qdev, intr_context->intr); in qlge_isr()
2537 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); in qlge_isr()
2539 qdev->workqueue, &qdev->mpi_work, 0); in qlge_isr()
2548 var = ql_read32(qdev, ISR1); in qlge_isr()
2550 netif_info(qdev, intr, qdev->ndev, in qlge_isr()
2552 ql_disable_completion_interrupt(qdev, intr_context->intr); in qlge_isr()
2556 ql_enable_completion_interrupt(qdev, intr_context->intr); in qlge_isr()
2637 struct ql_adapter *qdev = netdev_priv(ndev); in qlge_send() local
2642 tx_ring = &qdev->tx_ring[tx_ring_idx]; in qlge_send()
2648 netif_info(qdev, tx_queued, qdev->ndev, in qlge_send()
2670 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev, in qlge_send()
2683 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) != in qlge_send()
2685 netif_err(qdev, tx_queued, qdev->ndev, in qlge_send()
2697 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev, in qlge_send()
2710 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id); in qlge_send()
2716 static void ql_free_shadow_space(struct ql_adapter *qdev) in ql_free_shadow_space() argument
2718 if (qdev->rx_ring_shadow_reg_area) { in ql_free_shadow_space()
2719 pci_free_consistent(qdev->pdev, in ql_free_shadow_space()
2721 qdev->rx_ring_shadow_reg_area, in ql_free_shadow_space()
2722 qdev->rx_ring_shadow_reg_dma); in ql_free_shadow_space()
2723 qdev->rx_ring_shadow_reg_area = NULL; in ql_free_shadow_space()
2725 if (qdev->tx_ring_shadow_reg_area) { in ql_free_shadow_space()
2726 pci_free_consistent(qdev->pdev, in ql_free_shadow_space()
2728 qdev->tx_ring_shadow_reg_area, in ql_free_shadow_space()
2729 qdev->tx_ring_shadow_reg_dma); in ql_free_shadow_space()
2730 qdev->tx_ring_shadow_reg_area = NULL; in ql_free_shadow_space()
2734 static int ql_alloc_shadow_space(struct ql_adapter *qdev) in ql_alloc_shadow_space() argument
2736 qdev->rx_ring_shadow_reg_area = in ql_alloc_shadow_space()
2737 pci_zalloc_consistent(qdev->pdev, PAGE_SIZE, in ql_alloc_shadow_space()
2738 &qdev->rx_ring_shadow_reg_dma); in ql_alloc_shadow_space()
2739 if (qdev->rx_ring_shadow_reg_area == NULL) { in ql_alloc_shadow_space()
2740 netif_err(qdev, ifup, qdev->ndev, in ql_alloc_shadow_space()
2745 qdev->tx_ring_shadow_reg_area = in ql_alloc_shadow_space()
2746 pci_zalloc_consistent(qdev->pdev, PAGE_SIZE, in ql_alloc_shadow_space()
2747 &qdev->tx_ring_shadow_reg_dma); in ql_alloc_shadow_space()
2748 if (qdev->tx_ring_shadow_reg_area == NULL) { in ql_alloc_shadow_space()
2749 netif_err(qdev, ifup, qdev->ndev, in ql_alloc_shadow_space()
2756 pci_free_consistent(qdev->pdev, in ql_alloc_shadow_space()
2758 qdev->rx_ring_shadow_reg_area, in ql_alloc_shadow_space()
2759 qdev->rx_ring_shadow_reg_dma); in ql_alloc_shadow_space()
2763 static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring) in ql_init_tx_ring() argument
2781 static void ql_free_tx_resources(struct ql_adapter *qdev, in ql_free_tx_resources() argument
2785 pci_free_consistent(qdev->pdev, tx_ring->wq_size, in ql_free_tx_resources()
2793 static int ql_alloc_tx_resources(struct ql_adapter *qdev, in ql_alloc_tx_resources() argument
2797 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size, in ql_alloc_tx_resources()
2811 pci_free_consistent(qdev->pdev, tx_ring->wq_size, in ql_alloc_tx_resources()
2815 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n"); in ql_alloc_tx_resources()
2819 static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) in ql_free_lbq_buffers() argument
2831 pci_unmap_page(qdev->pdev, in ql_free_lbq_buffers()
2833 ql_lbq_block_size(qdev), in ql_free_lbq_buffers()
2846 pci_unmap_page(qdev->pdev, rx_ring->pg_chunk.map, in ql_free_lbq_buffers()
2847 ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE); in ql_free_lbq_buffers()
2853 static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) in ql_free_sbq_buffers() argument
2861 netif_err(qdev, ifup, qdev->ndev, in ql_free_sbq_buffers()
2866 pci_unmap_single(qdev->pdev, in ql_free_sbq_buffers()
2879 static void ql_free_rx_buffers(struct ql_adapter *qdev) in ql_free_rx_buffers() argument
2884 for (i = 0; i < qdev->rx_ring_count; i++) { in ql_free_rx_buffers()
2885 rx_ring = &qdev->rx_ring[i]; in ql_free_rx_buffers()
2887 ql_free_lbq_buffers(qdev, rx_ring); in ql_free_rx_buffers()
2889 ql_free_sbq_buffers(qdev, rx_ring); in ql_free_rx_buffers()
2893 static void ql_alloc_rx_buffers(struct ql_adapter *qdev) in ql_alloc_rx_buffers() argument
2898 for (i = 0; i < qdev->rx_ring_count; i++) { in ql_alloc_rx_buffers()
2899 rx_ring = &qdev->rx_ring[i]; in ql_alloc_rx_buffers()
2901 ql_update_buffer_queues(qdev, rx_ring); in ql_alloc_rx_buffers()
2905 static void ql_init_lbq_ring(struct ql_adapter *qdev, in ql_init_lbq_ring() argument
2922 static void ql_init_sbq_ring(struct ql_adapter *qdev, in ql_init_sbq_ring() argument
2939 static void ql_free_rx_resources(struct ql_adapter *qdev, in ql_free_rx_resources() argument
2944 pci_free_consistent(qdev->pdev, in ql_free_rx_resources()
2956 pci_free_consistent(qdev->pdev, in ql_free_rx_resources()
2968 pci_free_consistent(qdev->pdev, in ql_free_rx_resources()
2977 static int ql_alloc_rx_resources(struct ql_adapter *qdev, in ql_alloc_rx_resources() argument
2985 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size, in ql_alloc_rx_resources()
2989 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n"); in ql_alloc_rx_resources()
2998 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size, in ql_alloc_rx_resources()
3002 netif_err(qdev, ifup, qdev->ndev, in ql_alloc_rx_resources()
3016 ql_init_sbq_ring(qdev, rx_ring); in ql_alloc_rx_resources()
3024 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size, in ql_alloc_rx_resources()
3028 netif_err(qdev, ifup, qdev->ndev, in ql_alloc_rx_resources()
3041 ql_init_lbq_ring(qdev, rx_ring); in ql_alloc_rx_resources()
3047 ql_free_rx_resources(qdev, rx_ring); in ql_alloc_rx_resources()
3051 static void ql_tx_ring_clean(struct ql_adapter *qdev) in ql_tx_ring_clean() argument
3061 for (j = 0; j < qdev->tx_ring_count; j++) { in ql_tx_ring_clean()
3062 tx_ring = &qdev->tx_ring[j]; in ql_tx_ring_clean()
3066 netif_err(qdev, ifdown, qdev->ndev, in ql_tx_ring_clean()
3070 ql_unmap_send(qdev, tx_ring_desc, in ql_tx_ring_clean()
3079 static void ql_free_mem_resources(struct ql_adapter *qdev) in ql_free_mem_resources() argument
3083 for (i = 0; i < qdev->tx_ring_count; i++) in ql_free_mem_resources()
3084 ql_free_tx_resources(qdev, &qdev->tx_ring[i]); in ql_free_mem_resources()
3085 for (i = 0; i < qdev->rx_ring_count; i++) in ql_free_mem_resources()
3086 ql_free_rx_resources(qdev, &qdev->rx_ring[i]); in ql_free_mem_resources()
3087 ql_free_shadow_space(qdev); in ql_free_mem_resources()
3090 static int ql_alloc_mem_resources(struct ql_adapter *qdev) in ql_alloc_mem_resources() argument
3095 if (ql_alloc_shadow_space(qdev)) in ql_alloc_mem_resources()
3098 for (i = 0; i < qdev->rx_ring_count; i++) { in ql_alloc_mem_resources()
3099 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) { in ql_alloc_mem_resources()
3100 netif_err(qdev, ifup, qdev->ndev, in ql_alloc_mem_resources()
3106 for (i = 0; i < qdev->tx_ring_count; i++) { in ql_alloc_mem_resources()
3107 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) { in ql_alloc_mem_resources()
3108 netif_err(qdev, ifup, qdev->ndev, in ql_alloc_mem_resources()
3116 ql_free_mem_resources(qdev); in ql_alloc_mem_resources()
3124 static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) in ql_start_rx_ring() argument
3127 void *shadow_reg = qdev->rx_ring_shadow_reg_area + in ql_start_rx_ring()
3129 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma + in ql_start_rx_ring()
3132 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id)); in ql_start_rx_ring()
3231 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs); in ql_start_rx_ring()
3232 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames); in ql_start_rx_ring()
3238 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix, in ql_start_rx_ring()
3240 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs); in ql_start_rx_ring()
3241 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames); in ql_start_rx_ring()
3244 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, in ql_start_rx_ring()
3247 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb), in ql_start_rx_ring()
3250 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n"); in ql_start_rx_ring()
3256 static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring) in ql_start_tx_ring() argument
3260 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id); in ql_start_tx_ring()
3261 void *shadow_reg = qdev->tx_ring_shadow_reg_area + in ql_start_tx_ring()
3263 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma + in ql_start_tx_ring()
3291 ql_init_tx_ring(qdev, tx_ring); in ql_start_tx_ring()
3293 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ, in ql_start_tx_ring()
3296 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n"); in ql_start_tx_ring()
3302 static void ql_disable_msix(struct ql_adapter *qdev) in ql_disable_msix() argument
3304 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) { in ql_disable_msix()
3305 pci_disable_msix(qdev->pdev); in ql_disable_msix()
3306 clear_bit(QL_MSIX_ENABLED, &qdev->flags); in ql_disable_msix()
3307 kfree(qdev->msi_x_entry); in ql_disable_msix()
3308 qdev->msi_x_entry = NULL; in ql_disable_msix()
3309 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) { in ql_disable_msix()
3310 pci_disable_msi(qdev->pdev); in ql_disable_msix()
3311 clear_bit(QL_MSI_ENABLED, &qdev->flags); in ql_disable_msix()
3319 static void ql_enable_msix(struct ql_adapter *qdev) in ql_enable_msix() argument
3328 qdev->msi_x_entry = kcalloc(qdev->intr_count, in ql_enable_msix()
3331 if (!qdev->msi_x_entry) { in ql_enable_msix()
3336 for (i = 0; i < qdev->intr_count; i++) in ql_enable_msix()
3337 qdev->msi_x_entry[i].entry = i; in ql_enable_msix()
3339 err = pci_enable_msix_range(qdev->pdev, qdev->msi_x_entry, in ql_enable_msix()
3340 1, qdev->intr_count); in ql_enable_msix()
3342 kfree(qdev->msi_x_entry); in ql_enable_msix()
3343 qdev->msi_x_entry = NULL; in ql_enable_msix()
3344 netif_warn(qdev, ifup, qdev->ndev, in ql_enable_msix()
3348 qdev->intr_count = err; in ql_enable_msix()
3349 set_bit(QL_MSIX_ENABLED, &qdev->flags); in ql_enable_msix()
3350 netif_info(qdev, ifup, qdev->ndev, in ql_enable_msix()
3352 qdev->intr_count); in ql_enable_msix()
3357 qdev->intr_count = 1; in ql_enable_msix()
3359 if (!pci_enable_msi(qdev->pdev)) { in ql_enable_msix()
3360 set_bit(QL_MSI_ENABLED, &qdev->flags); in ql_enable_msix()
3361 netif_info(qdev, ifup, qdev->ndev, in ql_enable_msix()
3367 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, in ql_enable_msix()
3380 static void ql_set_tx_vect(struct ql_adapter *qdev) in ql_set_tx_vect() argument
3383 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count; in ql_set_tx_vect()
3385 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) { in ql_set_tx_vect()
3387 for (vect = 0, j = 0, i = qdev->rss_ring_count; in ql_set_tx_vect()
3388 i < qdev->rx_ring_count; i++) { in ql_set_tx_vect()
3393 qdev->rx_ring[i].irq = vect; in ql_set_tx_vect()
3400 for (i = 0; i < qdev->rx_ring_count; i++) in ql_set_tx_vect()
3401 qdev->rx_ring[i].irq = 0; in ql_set_tx_vect()
3410 static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx) in ql_set_irq_mask() argument
3413 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count; in ql_set_irq_mask()
3415 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) { in ql_set_irq_mask()
3419 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id); in ql_set_irq_mask()
3424 (1 << qdev->rx_ring[qdev->rss_ring_count + in ql_set_irq_mask()
3431 for (j = 0; j < qdev->rx_ring_count; j++) in ql_set_irq_mask()
3432 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id); in ql_set_irq_mask()
3442 static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev) in ql_resolve_queues_to_irqs() argument
3445 struct intr_context *intr_context = &qdev->intr_context[0]; in ql_resolve_queues_to_irqs()
3447 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) { in ql_resolve_queues_to_irqs()
3452 for (i = 0; i < qdev->intr_count; i++, intr_context++) { in ql_resolve_queues_to_irqs()
3453 qdev->rx_ring[i].irq = i; in ql_resolve_queues_to_irqs()
3455 intr_context->qdev = qdev; in ql_resolve_queues_to_irqs()
3459 ql_set_irq_mask(qdev, intr_context); in ql_resolve_queues_to_irqs()
3484 qdev->ndev->name, i); in ql_resolve_queues_to_irqs()
3491 qdev->ndev->name, i); in ql_resolve_queues_to_irqs()
3500 intr_context->qdev = qdev; in ql_resolve_queues_to_irqs()
3516 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name); in ql_resolve_queues_to_irqs()
3522 ql_set_irq_mask(qdev, intr_context); in ql_resolve_queues_to_irqs()
3527 ql_set_tx_vect(qdev); in ql_resolve_queues_to_irqs()
3530 static void ql_free_irq(struct ql_adapter *qdev) in ql_free_irq() argument
3533 struct intr_context *intr_context = &qdev->intr_context[0]; in ql_free_irq()
3535 for (i = 0; i < qdev->intr_count; i++, intr_context++) { in ql_free_irq()
3537 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) { in ql_free_irq()
3538 free_irq(qdev->msi_x_entry[i].vector, in ql_free_irq()
3539 &qdev->rx_ring[i]); in ql_free_irq()
3541 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]); in ql_free_irq()
3545 ql_disable_msix(qdev); in ql_free_irq()
3548 static int ql_request_irq(struct ql_adapter *qdev) in ql_request_irq() argument
3552 struct pci_dev *pdev = qdev->pdev; in ql_request_irq()
3553 struct intr_context *intr_context = &qdev->intr_context[0]; in ql_request_irq()
3555 ql_resolve_queues_to_irqs(qdev); in ql_request_irq()
3557 for (i = 0; i < qdev->intr_count; i++, intr_context++) { in ql_request_irq()
3559 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) { in ql_request_irq()
3560 status = request_irq(qdev->msi_x_entry[i].vector, in ql_request_irq()
3564 &qdev->rx_ring[i]); in ql_request_irq()
3566 netif_err(qdev, ifup, qdev->ndev, in ql_request_irq()
3572 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, in ql_request_irq()
3574 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, in ql_request_irq()
3576 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, in ql_request_irq()
3579 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, in ql_request_irq()
3581 &qdev->rx_ring[0]); in ql_request_irq()
3585 &qdev-> in ql_request_irq()
3587 intr_context->name, &qdev->rx_ring[0]); in ql_request_irq()
3591 netif_err(qdev, ifup, qdev->ndev, in ql_request_irq()
3594 qdev->rx_ring[0].type == DEFAULT_Q ? in ql_request_irq()
3596 qdev->rx_ring[0].type == TX_Q ? "TX_Q" : in ql_request_irq()
3597 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "", in ql_request_irq()
3604 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!\n"); in ql_request_irq()
3605 ql_free_irq(qdev); in ql_request_irq()
3609 static int ql_start_rss(struct ql_adapter *qdev) in ql_start_rss() argument
3618 struct ricb *ricb = &qdev->ricb; in ql_start_rss()
3634 hash_id[i] = (i & (qdev->rss_ring_count - 1)); in ql_start_rss()
3639 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0); in ql_start_rss()
3641 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n"); in ql_start_rss()
3647 static int ql_clear_routing_entries(struct ql_adapter *qdev) in ql_clear_routing_entries() argument
3651 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); in ql_clear_routing_entries()
3656 status = ql_set_routing_reg(qdev, i, 0, 0); in ql_clear_routing_entries()
3658 netif_err(qdev, ifup, qdev->ndev, in ql_clear_routing_entries()
3663 ql_sem_unlock(qdev, SEM_RT_IDX_MASK); in ql_clear_routing_entries()
3668 static int ql_route_initialize(struct ql_adapter *qdev) in ql_route_initialize() argument
3673 status = ql_clear_routing_entries(qdev); in ql_route_initialize()
3677 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); in ql_route_initialize()
3681 status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT, in ql_route_initialize()
3684 netif_err(qdev, ifup, qdev->ndev, in ql_route_initialize()
3689 status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT, in ql_route_initialize()
3692 netif_err(qdev, ifup, qdev->ndev, in ql_route_initialize()
3697 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1); in ql_route_initialize()
3699 netif_err(qdev, ifup, qdev->ndev, in ql_route_initialize()
3706 if (qdev->rss_ring_count > 1) { in ql_route_initialize()
3707 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT, in ql_route_initialize()
3710 netif_err(qdev, ifup, qdev->ndev, in ql_route_initialize()
3716 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT, in ql_route_initialize()
3719 netif_err(qdev, ifup, qdev->ndev, in ql_route_initialize()
3722 ql_sem_unlock(qdev, SEM_RT_IDX_MASK); in ql_route_initialize()
3726 int ql_cam_route_initialize(struct ql_adapter *qdev) in ql_cam_route_initialize() argument
3734 set = ql_read32(qdev, STS); in ql_cam_route_initialize()
3735 set &= qdev->port_link_up; in ql_cam_route_initialize()
3736 status = ql_set_mac_addr(qdev, set); in ql_cam_route_initialize()
3738 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n"); in ql_cam_route_initialize()
3742 status = ql_route_initialize(qdev); in ql_cam_route_initialize()
3744 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n"); in ql_cam_route_initialize()
3749 static int ql_adapter_initialize(struct ql_adapter *qdev) in ql_adapter_initialize() argument
3760 ql_write32(qdev, SYS, mask | value); in ql_adapter_initialize()
3765 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) { in ql_adapter_initialize()
3769 ql_write32(qdev, NIC_RCV_CFG, (mask | value)); in ql_adapter_initialize()
3772 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI); in ql_adapter_initialize()
3782 ql_write32(qdev, FSC, mask | value); in ql_adapter_initialize()
3784 ql_write32(qdev, SPLT_HDR, SPLT_LEN); in ql_adapter_initialize()
3791 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ); in ql_adapter_initialize()
3796 value = ql_read32(qdev, MGMT_RCV_CFG); in ql_adapter_initialize()
3801 ql_write32(qdev, MGMT_RCV_CFG, mask); in ql_adapter_initialize()
3802 ql_write32(qdev, MGMT_RCV_CFG, mask | value); in ql_adapter_initialize()
3805 if (qdev->pdev->subsystem_device == 0x0068 || in ql_adapter_initialize()
3806 qdev->pdev->subsystem_device == 0x0180) in ql_adapter_initialize()
3807 qdev->wol = WAKE_MAGIC; in ql_adapter_initialize()
3810 for (i = 0; i < qdev->rx_ring_count; i++) { in ql_adapter_initialize()
3811 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]); in ql_adapter_initialize()
3813 netif_err(qdev, ifup, qdev->ndev, in ql_adapter_initialize()
3822 if (qdev->rss_ring_count > 1) { in ql_adapter_initialize()
3823 status = ql_start_rss(qdev); in ql_adapter_initialize()
3825 netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n"); in ql_adapter_initialize()
3831 for (i = 0; i < qdev->tx_ring_count; i++) { in ql_adapter_initialize()
3832 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]); in ql_adapter_initialize()
3834 netif_err(qdev, ifup, qdev->ndev, in ql_adapter_initialize()
3841 status = qdev->nic_ops->port_initialize(qdev); in ql_adapter_initialize()
3843 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n"); in ql_adapter_initialize()
3846 status = ql_cam_route_initialize(qdev); in ql_adapter_initialize()
3848 netif_err(qdev, ifup, qdev->ndev, in ql_adapter_initialize()
3854 for (i = 0; i < qdev->rss_ring_count; i++) in ql_adapter_initialize()
3855 napi_enable(&qdev->rx_ring[i].napi); in ql_adapter_initialize()
3861 static int ql_adapter_reset(struct ql_adapter *qdev) in ql_adapter_reset() argument
3868 status = ql_clear_routing_entries(qdev); in ql_adapter_reset()
3870 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n"); in ql_adapter_reset()
3880 if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) { in ql_adapter_reset()
3882 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP); in ql_adapter_reset()
3885 ql_wait_fifo_empty(qdev); in ql_adapter_reset()
3887 clear_bit(QL_ASIC_RECOVERY, &qdev->flags); in ql_adapter_reset()
3889 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR); in ql_adapter_reset()
3892 value = ql_read32(qdev, RST_FO); in ql_adapter_reset()
3899 netif_err(qdev, ifdown, qdev->ndev, in ql_adapter_reset()
3905 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME); in ql_adapter_reset()
3911 struct ql_adapter *qdev = netdev_priv(ndev); in ql_display_dev_info() local
3913 netif_info(qdev, probe, qdev->ndev, in ql_display_dev_info()
3916 qdev->func, in ql_display_dev_info()
3917 qdev->port, in ql_display_dev_info()
3918 qdev->chip_rev_id & 0x0000000f, in ql_display_dev_info()
3919 qdev->chip_rev_id >> 4 & 0x0000000f, in ql_display_dev_info()
3920 qdev->chip_rev_id >> 8 & 0x0000000f, in ql_display_dev_info()
3921 qdev->chip_rev_id >> 12 & 0x0000000f); in ql_display_dev_info()
3922 netif_info(qdev, probe, qdev->ndev, in ql_display_dev_info()
3926 static int ql_wol(struct ql_adapter *qdev) in ql_wol() argument
3938 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST | in ql_wol()
3940 netif_err(qdev, ifdown, qdev->ndev, in ql_wol()
3942 qdev->wol); in ql_wol()
3946 if (qdev->wol & WAKE_MAGIC) { in ql_wol()
3947 status = ql_mb_wol_set_magic(qdev, 1); in ql_wol()
3949 netif_err(qdev, ifdown, qdev->ndev, in ql_wol()
3951 qdev->ndev->name); in ql_wol()
3954 netif_info(qdev, drv, qdev->ndev, in ql_wol()
3956 qdev->ndev->name); in ql_wol()
3961 if (qdev->wol) { in ql_wol()
3963 status = ql_mb_wol_mode(qdev, wol); in ql_wol()
3964 netif_err(qdev, drv, qdev->ndev, in ql_wol()
3967 wol, qdev->ndev->name); in ql_wol()
3973 static void ql_cancel_all_work_sync(struct ql_adapter *qdev) in ql_cancel_all_work_sync() argument
3979 if (test_bit(QL_ADAPTER_UP, &qdev->flags)) in ql_cancel_all_work_sync()
3980 cancel_delayed_work_sync(&qdev->asic_reset_work); in ql_cancel_all_work_sync()
3981 cancel_delayed_work_sync(&qdev->mpi_reset_work); in ql_cancel_all_work_sync()
3982 cancel_delayed_work_sync(&qdev->mpi_work); in ql_cancel_all_work_sync()
3983 cancel_delayed_work_sync(&qdev->mpi_idc_work); in ql_cancel_all_work_sync()
3984 cancel_delayed_work_sync(&qdev->mpi_core_to_log); in ql_cancel_all_work_sync()
3985 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work); in ql_cancel_all_work_sync()
3988 static int ql_adapter_down(struct ql_adapter *qdev) in ql_adapter_down() argument
3992 ql_link_off(qdev); in ql_adapter_down()
3994 ql_cancel_all_work_sync(qdev); in ql_adapter_down()
3996 for (i = 0; i < qdev->rss_ring_count; i++) in ql_adapter_down()
3997 napi_disable(&qdev->rx_ring[i].napi); in ql_adapter_down()
3999 clear_bit(QL_ADAPTER_UP, &qdev->flags); in ql_adapter_down()
4001 ql_disable_interrupts(qdev); in ql_adapter_down()
4003 ql_tx_ring_clean(qdev); in ql_adapter_down()
4007 for (i = 0; i < qdev->rss_ring_count; i++) in ql_adapter_down()
4008 netif_napi_del(&qdev->rx_ring[i].napi); in ql_adapter_down()
4010 status = ql_adapter_reset(qdev); in ql_adapter_down()
4012 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n", in ql_adapter_down()
4013 qdev->func); in ql_adapter_down()
4014 ql_free_rx_buffers(qdev); in ql_adapter_down()
4019 static int ql_adapter_up(struct ql_adapter *qdev) in ql_adapter_up() argument
4023 err = ql_adapter_initialize(qdev); in ql_adapter_up()
4025 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n"); in ql_adapter_up()
4028 set_bit(QL_ADAPTER_UP, &qdev->flags); in ql_adapter_up()
4029 ql_alloc_rx_buffers(qdev); in ql_adapter_up()
4033 if ((ql_read32(qdev, STS) & qdev->port_init) && in ql_adapter_up()
4034 (ql_read32(qdev, STS) & qdev->port_link_up)) in ql_adapter_up()
4035 ql_link_on(qdev); in ql_adapter_up()
4037 clear_bit(QL_ALLMULTI, &qdev->flags); in ql_adapter_up()
4038 clear_bit(QL_PROMISCUOUS, &qdev->flags); in ql_adapter_up()
4039 qlge_set_multicast_list(qdev->ndev); in ql_adapter_up()
4042 qlge_restore_vlan(qdev); in ql_adapter_up()
4044 ql_enable_interrupts(qdev); in ql_adapter_up()
4045 ql_enable_all_completion_interrupts(qdev); in ql_adapter_up()
4046 netif_tx_start_all_queues(qdev->ndev); in ql_adapter_up()
4050 ql_adapter_reset(qdev); in ql_adapter_up()
4054 static void ql_release_adapter_resources(struct ql_adapter *qdev) in ql_release_adapter_resources() argument
4056 ql_free_mem_resources(qdev); in ql_release_adapter_resources()
4057 ql_free_irq(qdev); in ql_release_adapter_resources()
4060 static int ql_get_adapter_resources(struct ql_adapter *qdev) in ql_get_adapter_resources() argument
4064 if (ql_alloc_mem_resources(qdev)) { in ql_get_adapter_resources()
4065 netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n"); in ql_get_adapter_resources()
4068 status = ql_request_irq(qdev); in ql_get_adapter_resources()
4074 struct ql_adapter *qdev = netdev_priv(ndev); in qlge_close() local
4080 if (test_bit(QL_EEH_FATAL, &qdev->flags)) { in qlge_close()
4081 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n"); in qlge_close()
4082 clear_bit(QL_EEH_FATAL, &qdev->flags); in qlge_close()
4090 while (!test_bit(QL_ADAPTER_UP, &qdev->flags)) in qlge_close()
4092 ql_adapter_down(qdev); in qlge_close()
4093 ql_release_adapter_resources(qdev); in qlge_close()
4097 static int ql_configure_rings(struct ql_adapter *qdev) in ql_configure_rings() argument
4103 unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ? in ql_configure_rings()
4106 qdev->lbq_buf_order = get_order(lbq_buf_len); in ql_configure_rings()
4115 qdev->intr_count = cpu_cnt; in ql_configure_rings()
4116 ql_enable_msix(qdev); in ql_configure_rings()
4118 qdev->rss_ring_count = qdev->intr_count; in ql_configure_rings()
4119 qdev->tx_ring_count = cpu_cnt; in ql_configure_rings()
4120 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count; in ql_configure_rings()
4122 for (i = 0; i < qdev->tx_ring_count; i++) { in ql_configure_rings()
4123 tx_ring = &qdev->tx_ring[i]; in ql_configure_rings()
4125 tx_ring->qdev = qdev; in ql_configure_rings()
4127 tx_ring->wq_len = qdev->tx_ring_size; in ql_configure_rings()
4135 tx_ring->cq_id = qdev->rss_ring_count + i; in ql_configure_rings()
4138 for (i = 0; i < qdev->rx_ring_count; i++) { in ql_configure_rings()
4139 rx_ring = &qdev->rx_ring[i]; in ql_configure_rings()
4141 rx_ring->qdev = qdev; in ql_configure_rings()
4144 if (i < qdev->rss_ring_count) { in ql_configure_rings()
4148 rx_ring->cq_len = qdev->rx_ring_size; in ql_configure_rings()
4165 rx_ring->cq_len = qdev->tx_ring_size; in ql_configure_rings()
4183 struct ql_adapter *qdev = netdev_priv(ndev); in qlge_open() local
4185 err = ql_adapter_reset(qdev); in qlge_open()
4189 err = ql_configure_rings(qdev); in qlge_open()
4193 err = ql_get_adapter_resources(qdev); in qlge_open()
4197 err = ql_adapter_up(qdev); in qlge_open()
4204 ql_release_adapter_resources(qdev); in qlge_open()
4208 static int ql_change_rx_buffers(struct ql_adapter *qdev) in ql_change_rx_buffers() argument
4215 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) { in ql_change_rx_buffers()
4217 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) { in ql_change_rx_buffers()
4218 netif_err(qdev, ifup, qdev->ndev, in ql_change_rx_buffers()
4224 netif_err(qdev, ifup, qdev->ndev, in ql_change_rx_buffers()
4230 status = ql_adapter_down(qdev); in ql_change_rx_buffers()
4235 lbq_buf_len = (qdev->ndev->mtu > 1500) ? in ql_change_rx_buffers()
4237 qdev->lbq_buf_order = get_order(lbq_buf_len); in ql_change_rx_buffers()
4239 for (i = 0; i < qdev->rss_ring_count; i++) { in ql_change_rx_buffers()
4240 rx_ring = &qdev->rx_ring[i]; in ql_change_rx_buffers()
4245 status = ql_adapter_up(qdev); in ql_change_rx_buffers()
4251 netif_alert(qdev, ifup, qdev->ndev, in ql_change_rx_buffers()
4253 set_bit(QL_ADAPTER_UP, &qdev->flags); in ql_change_rx_buffers()
4254 dev_close(qdev->ndev); in ql_change_rx_buffers()
4260 struct ql_adapter *qdev = netdev_priv(ndev); in qlge_change_mtu() local
4264 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n"); in qlge_change_mtu()
4266 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n"); in qlge_change_mtu()
4270 queue_delayed_work(qdev->workqueue, in qlge_change_mtu()
4271 &qdev->mpi_port_cfg_work, 3*HZ); in qlge_change_mtu()
4275 if (!netif_running(qdev->ndev)) { in qlge_change_mtu()
4279 status = ql_change_rx_buffers(qdev); in qlge_change_mtu()
4281 netif_err(qdev, ifup, qdev->ndev, in qlge_change_mtu()
4291 struct ql_adapter *qdev = netdev_priv(ndev); in qlge_get_stats() local
4292 struct rx_ring *rx_ring = &qdev->rx_ring[0]; in qlge_get_stats()
4293 struct tx_ring *tx_ring = &qdev->tx_ring[0]; in qlge_get_stats()
4299 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) { in qlge_get_stats()
4314 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) { in qlge_get_stats()
4327 struct ql_adapter *qdev = netdev_priv(ndev); in qlge_set_multicast_list() local
4331 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); in qlge_set_multicast_list()
4339 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) { in qlge_set_multicast_list()
4341 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) { in qlge_set_multicast_list()
4342 netif_err(qdev, hw, qdev->ndev, in qlge_set_multicast_list()
4345 set_bit(QL_PROMISCUOUS, &qdev->flags); in qlge_set_multicast_list()
4349 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) { in qlge_set_multicast_list()
4351 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) { in qlge_set_multicast_list()
4352 netif_err(qdev, hw, qdev->ndev, in qlge_set_multicast_list()
4355 clear_bit(QL_PROMISCUOUS, &qdev->flags); in qlge_set_multicast_list()
4366 if (!test_bit(QL_ALLMULTI, &qdev->flags)) { in qlge_set_multicast_list()
4368 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) { in qlge_set_multicast_list()
4369 netif_err(qdev, hw, qdev->ndev, in qlge_set_multicast_list()
4372 set_bit(QL_ALLMULTI, &qdev->flags); in qlge_set_multicast_list()
4376 if (test_bit(QL_ALLMULTI, &qdev->flags)) { in qlge_set_multicast_list()
4378 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) { in qlge_set_multicast_list()
4379 netif_err(qdev, hw, qdev->ndev, in qlge_set_multicast_list()
4382 clear_bit(QL_ALLMULTI, &qdev->flags); in qlge_set_multicast_list()
4388 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); in qlge_set_multicast_list()
4393 if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr, in qlge_set_multicast_list()
4395 netif_err(qdev, hw, qdev->ndev, in qlge_set_multicast_list()
4397 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); in qlge_set_multicast_list()
4402 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); in qlge_set_multicast_list()
4404 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) { in qlge_set_multicast_list()
4405 netif_err(qdev, hw, qdev->ndev, in qlge_set_multicast_list()
4408 set_bit(QL_ALLMULTI, &qdev->flags); in qlge_set_multicast_list()
4412 ql_sem_unlock(qdev, SEM_RT_IDX_MASK); in qlge_set_multicast_list()
4417 struct ql_adapter *qdev = netdev_priv(ndev); in qlge_set_mac_address() local
4425 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len); in qlge_set_mac_address()
4427 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); in qlge_set_mac_address()
4430 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr, in qlge_set_mac_address()
4431 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ); in qlge_set_mac_address()
4433 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n"); in qlge_set_mac_address()
4434 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); in qlge_set_mac_address()
4440 struct ql_adapter *qdev = netdev_priv(ndev); in qlge_tx_timeout() local
4441 ql_queue_asic_error(qdev); in qlge_tx_timeout()
4446 struct ql_adapter *qdev = in ql_asic_reset_work() local
4450 status = ql_adapter_down(qdev); in ql_asic_reset_work()
4454 status = ql_adapter_up(qdev); in ql_asic_reset_work()
4459 clear_bit(QL_ALLMULTI, &qdev->flags); in ql_asic_reset_work()
4460 clear_bit(QL_PROMISCUOUS, &qdev->flags); in ql_asic_reset_work()
4461 qlge_set_multicast_list(qdev->ndev); in ql_asic_reset_work()
4466 netif_alert(qdev, ifup, qdev->ndev, in ql_asic_reset_work()
4469 set_bit(QL_ADAPTER_UP, &qdev->flags); in ql_asic_reset_work()
4470 dev_close(qdev->ndev); in ql_asic_reset_work()
4491 static int ql_get_alt_pcie_func(struct ql_adapter *qdev) in ql_get_alt_pcie_func() argument
4497 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG, in ql_get_alt_pcie_func()
4507 if (qdev->func == nic_func1) in ql_get_alt_pcie_func()
4508 qdev->alt_func = nic_func2; in ql_get_alt_pcie_func()
4509 else if (qdev->func == nic_func2) in ql_get_alt_pcie_func()
4510 qdev->alt_func = nic_func1; in ql_get_alt_pcie_func()
4517 static int ql_get_board_info(struct ql_adapter *qdev) in ql_get_board_info() argument
4520 qdev->func = in ql_get_board_info()
4521 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT; in ql_get_board_info()
4522 if (qdev->func > 3) in ql_get_board_info()
4525 status = ql_get_alt_pcie_func(qdev); in ql_get_board_info()
4529 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1; in ql_get_board_info()
4530 if (qdev->port) { in ql_get_board_info()
4531 qdev->xg_sem_mask = SEM_XGMAC1_MASK; in ql_get_board_info()
4532 qdev->port_link_up = STS_PL1; in ql_get_board_info()
4533 qdev->port_init = STS_PI1; in ql_get_board_info()
4534 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI; in ql_get_board_info()
4535 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO; in ql_get_board_info()
4537 qdev->xg_sem_mask = SEM_XGMAC0_MASK; in ql_get_board_info()
4538 qdev->port_link_up = STS_PL0; in ql_get_board_info()
4539 qdev->port_init = STS_PI0; in ql_get_board_info()
4540 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI; in ql_get_board_info()
4541 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO; in ql_get_board_info()
4543 qdev->chip_rev_id = ql_read32(qdev, REV_ID); in ql_get_board_info()
4544 qdev->device_id = qdev->pdev->device; in ql_get_board_info()
4545 if (qdev->device_id == QLGE_DEVICE_ID_8012) in ql_get_board_info()
4546 qdev->nic_ops = &qla8012_nic_ops; in ql_get_board_info()
4547 else if (qdev->device_id == QLGE_DEVICE_ID_8000) in ql_get_board_info()
4548 qdev->nic_ops = &qla8000_nic_ops; in ql_get_board_info()
4555 struct ql_adapter *qdev = netdev_priv(ndev); in ql_release_all() local
4557 if (qdev->workqueue) { in ql_release_all()
4558 destroy_workqueue(qdev->workqueue); in ql_release_all()
4559 qdev->workqueue = NULL; in ql_release_all()
4562 if (qdev->reg_base) in ql_release_all()
4563 iounmap(qdev->reg_base); in ql_release_all()
4564 if (qdev->doorbell_area) in ql_release_all()
4565 iounmap(qdev->doorbell_area); in ql_release_all()
4566 vfree(qdev->mpi_coredump); in ql_release_all()
4573 struct ql_adapter *qdev = netdev_priv(ndev); in ql_init_device() local
4576 memset((void *)qdev, 0, sizeof(*qdev)); in ql_init_device()
4583 qdev->ndev = ndev; in ql_init_device()
4584 qdev->pdev = pdev; in ql_init_device()
4602 set_bit(QL_DMA64, &qdev->flags); in ql_init_device()
4618 qdev->reg_base = in ql_init_device()
4621 if (!qdev->reg_base) { in ql_init_device()
4627 qdev->doorbell_area_size = pci_resource_len(pdev, 3); in ql_init_device()
4628 qdev->doorbell_area = in ql_init_device()
4631 if (!qdev->doorbell_area) { in ql_init_device()
4637 err = ql_get_board_info(qdev); in ql_init_device()
4643 qdev->msg_enable = netif_msg_init(debug, default_msg); in ql_init_device()
4644 spin_lock_init(&qdev->hw_lock); in ql_init_device()
4645 spin_lock_init(&qdev->stats_lock); in ql_init_device()
4648 qdev->mpi_coredump = in ql_init_device()
4650 if (qdev->mpi_coredump == NULL) { in ql_init_device()
4655 set_bit(QL_FRC_COREDUMP, &qdev->flags); in ql_init_device()
4658 err = qdev->nic_ops->get_flash(qdev); in ql_init_device()
4665 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len); in ql_init_device()
4668 qdev->tx_ring_size = NUM_TX_RING_ENTRIES; in ql_init_device()
4669 qdev->rx_ring_size = NUM_RX_RING_ENTRIES; in ql_init_device()
4672 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT; in ql_init_device()
4673 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT; in ql_init_device()
4674 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT; in ql_init_device()
4675 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT; in ql_init_device()
4680 qdev->workqueue = create_singlethread_workqueue(ndev->name); in ql_init_device()
4681 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work); in ql_init_device()
4682 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work); in ql_init_device()
4683 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work); in ql_init_device()
4684 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work); in ql_init_device()
4685 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work); in ql_init_device()
4686 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log); in ql_init_device()
4687 init_completion(&qdev->ide_completion); in ql_init_device()
4688 mutex_init(&qdev->mpi_mutex); in ql_init_device()
4721 struct ql_adapter *qdev = (struct ql_adapter *)data; in ql_timer() local
4724 var = ql_read32(qdev, STS); in ql_timer()
4725 if (pci_channel_offline(qdev->pdev)) { in ql_timer()
4726 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var); in ql_timer()
4730 mod_timer(&qdev->timer, jiffies + (5*HZ)); in ql_timer()
4737 struct ql_adapter *qdev = NULL; in qlge_probe() local
4752 qdev = netdev_priv(ndev); in qlge_probe()
4769 if (test_bit(QL_DMA64, &qdev->flags)) in qlge_probe()
4775 ndev->tx_queue_len = qdev->tx_ring_size; in qlge_probe()
4793 init_timer_deferrable(&qdev->timer); in qlge_probe()
4794 qdev->timer.data = (unsigned long)qdev; in qlge_probe()
4795 qdev->timer.function = ql_timer; in qlge_probe()
4796 qdev->timer.expires = jiffies + (5*HZ); in qlge_probe()
4797 add_timer(&qdev->timer); in qlge_probe()
4798 ql_link_off(qdev); in qlge_probe()
4800 atomic_set(&qdev->lb_count, 0); in qlge_probe()
4818 struct ql_adapter *qdev = netdev_priv(ndev); in qlge_remove() local
4819 del_timer_sync(&qdev->timer); in qlge_remove()
4820 ql_cancel_all_work_sync(qdev); in qlge_remove()
4831 struct ql_adapter *qdev = netdev_priv(ndev); in ql_eeh_close() local
4839 del_timer_sync(&qdev->timer); in ql_eeh_close()
4840 ql_cancel_all_work_sync(qdev); in ql_eeh_close()
4842 for (i = 0; i < qdev->rss_ring_count; i++) in ql_eeh_close()
4843 netif_napi_del(&qdev->rx_ring[i].napi); in ql_eeh_close()
4845 clear_bit(QL_ADAPTER_UP, &qdev->flags); in ql_eeh_close()
4846 ql_tx_ring_clean(qdev); in ql_eeh_close()
4847 ql_free_rx_buffers(qdev); in ql_eeh_close()
4848 ql_release_adapter_resources(qdev); in ql_eeh_close()
4859 struct ql_adapter *qdev = netdev_priv(ndev); in qlge_io_error_detected() local
4874 set_bit(QL_EEH_FATAL, &qdev->flags); in qlge_io_error_detected()
4891 struct ql_adapter *qdev = netdev_priv(ndev); in qlge_io_slot_reset() local
4897 netif_err(qdev, ifup, qdev->ndev, in qlge_io_slot_reset()
4903 if (ql_adapter_reset(qdev)) { in qlge_io_slot_reset()
4904 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n"); in qlge_io_slot_reset()
4905 set_bit(QL_EEH_FATAL, &qdev->flags); in qlge_io_slot_reset()
4915 struct ql_adapter *qdev = netdev_priv(ndev); in qlge_io_resume() local
4921 netif_err(qdev, ifup, qdev->ndev, in qlge_io_resume()
4926 netif_err(qdev, ifup, qdev->ndev, in qlge_io_resume()
4929 mod_timer(&qdev->timer, jiffies + (5*HZ)); in qlge_io_resume()
4942 struct ql_adapter *qdev = netdev_priv(ndev); in qlge_suspend() local
4946 del_timer_sync(&qdev->timer); in qlge_suspend()
4949 err = ql_adapter_down(qdev); in qlge_suspend()
4954 ql_wol(qdev); in qlge_suspend()
4970 struct ql_adapter *qdev = netdev_priv(ndev); in qlge_resume() local
4977 netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n"); in qlge_resume()
4986 err = ql_adapter_up(qdev); in qlge_resume()
4991 mod_timer(&qdev->timer, jiffies + (5*HZ)); in qlge_resume()