Lines Matching refs:efx
87 static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value, in efx_write_buf_tbl() argument
90 efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base, in efx_write_buf_tbl()
101 int efx_farch_test_registers(struct efx_nic *efx, in efx_farch_test_registers() argument
113 efx_reado(efx, &original, address); in efx_farch_test_registers()
124 efx_writeo(efx, ®, address); in efx_farch_test_registers()
125 efx_reado(efx, &buf, address); in efx_farch_test_registers()
134 efx_writeo(efx, ®, address); in efx_farch_test_registers()
135 efx_reado(efx, &buf, address); in efx_farch_test_registers()
141 efx_writeo(efx, &original, address); in efx_farch_test_registers()
147 netif_err(efx, hw, efx->net_dev, in efx_farch_test_registers()
170 efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) in efx_init_special_buffer() argument
183 netif_dbg(efx, probe, efx->net_dev, in efx_init_special_buffer()
190 efx_write_buf_tbl(efx, &buf_desc, index); in efx_init_special_buffer()
196 efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) in efx_fini_special_buffer() argument
205 netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n", in efx_fini_special_buffer()
213 efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD); in efx_fini_special_buffer()
225 static int efx_alloc_special_buffer(struct efx_nic *efx, in efx_alloc_special_buffer() argument
230 struct siena_nic_data *nic_data = efx->nic_data; in efx_alloc_special_buffer()
234 if (efx_nic_alloc_buffer(efx, &buffer->buf, len, GFP_KERNEL)) in efx_alloc_special_buffer()
240 buffer->index = efx->next_buffer_table; in efx_alloc_special_buffer()
241 efx->next_buffer_table += buffer->entries; in efx_alloc_special_buffer()
243 BUG_ON(efx_siena_sriov_enabled(efx) && in efx_alloc_special_buffer()
244 nic_data->vf_buftbl_base < efx->next_buffer_table); in efx_alloc_special_buffer()
247 netif_dbg(efx, probe, efx->net_dev, in efx_alloc_special_buffer()
258 efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) in efx_free_special_buffer() argument
263 netif_dbg(efx, hw, efx->net_dev, in efx_free_special_buffer()
270 efx_nic_free_buffer(efx, &buffer->buf); in efx_free_special_buffer()
288 efx_writed_page(tx_queue->efx, ®, in efx_farch_notify_tx_desc()
306 efx_writeo_page(tx_queue->efx, ®, in efx_farch_push_tx_desc()
359 struct efx_nic *efx = tx_queue->efx; in efx_farch_tx_probe() local
363 return efx_alloc_special_buffer(efx, &tx_queue->txd, in efx_farch_tx_probe()
369 struct efx_nic *efx = tx_queue->efx; in efx_farch_tx_init() local
373 efx_init_special_buffer(efx, &tx_queue->txd); in efx_farch_tx_init()
390 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { in efx_farch_tx_init()
397 efx_writeo_table(efx, ®, efx->type->txd_ptr_tbl_base, in efx_farch_tx_init()
400 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) { in efx_farch_tx_init()
404 efx_reado(efx, ®, FR_AA_TX_CHKSM_CFG); in efx_farch_tx_init()
409 efx_writeo(efx, ®, FR_AA_TX_CHKSM_CFG); in efx_farch_tx_init()
412 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { in efx_farch_tx_init()
418 efx_writeo_table(efx, ®, FR_BZ_TX_PACE_TBL, in efx_farch_tx_init()
425 struct efx_nic *efx = tx_queue->efx; in efx_farch_flush_tx_queue() local
434 efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ); in efx_farch_flush_tx_queue()
439 struct efx_nic *efx = tx_queue->efx; in efx_farch_tx_fini() local
444 efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, in efx_farch_tx_fini()
448 efx_fini_special_buffer(efx, &tx_queue->txd); in efx_farch_tx_fini()
454 efx_free_special_buffer(tx_queue->efx, &tx_queue->txd); in efx_farch_tx_remove()
475 rx_queue->efx->type->rx_buffer_padding, in efx_farch_build_rx_desc()
485 struct efx_nic *efx = rx_queue->efx; in efx_farch_rx_write() local
499 efx_writed_page(efx, ®, FR_AZ_RX_DESC_UPD_DWORD_P0, in efx_farch_rx_write()
505 struct efx_nic *efx = rx_queue->efx; in efx_farch_rx_probe() local
509 return efx_alloc_special_buffer(efx, &rx_queue->rxd, in efx_farch_rx_probe()
516 struct efx_nic *efx = rx_queue->efx; in efx_farch_rx_init() local
517 bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0; in efx_farch_rx_init()
526 jumbo_en = !is_b0 || efx->rx_scatter; in efx_farch_rx_init()
528 netif_dbg(efx, hw, efx->net_dev, in efx_farch_rx_init()
536 efx_init_special_buffer(efx, &rx_queue->rxd); in efx_farch_rx_init()
553 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, in efx_farch_rx_init()
559 struct efx_nic *efx = rx_queue->efx; in efx_farch_flush_rx_queue() local
566 efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ); in efx_farch_flush_rx_queue()
572 struct efx_nic *efx = rx_queue->efx; in efx_farch_rx_fini() local
576 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, in efx_farch_rx_fini()
580 efx_fini_special_buffer(efx, &rx_queue->rxd); in efx_farch_rx_fini()
586 efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd); in efx_farch_rx_remove()
598 static bool efx_farch_flush_wake(struct efx_nic *efx) in efx_farch_flush_wake() argument
603 return (atomic_read(&efx->active_queues) == 0 || in efx_farch_flush_wake()
604 (atomic_read(&efx->rxq_flush_outstanding) < EFX_RX_FLUSH_COUNT in efx_farch_flush_wake()
605 && atomic_read(&efx->rxq_flush_pending) > 0)); in efx_farch_flush_wake()
608 static bool efx_check_tx_flush_complete(struct efx_nic *efx) in efx_check_tx_flush_complete() argument
615 efx_for_each_channel(channel, efx) { in efx_check_tx_flush_complete()
617 efx_reado_table(efx, &txd_ptr_tbl, in efx_check_tx_flush_complete()
623 netif_dbg(efx, hw, efx->net_dev, in efx_check_tx_flush_complete()
632 netif_dbg(efx, hw, efx->net_dev, in efx_check_tx_flush_complete()
652 static int efx_farch_do_flush(struct efx_nic *efx) in efx_farch_do_flush() argument
660 efx_for_each_channel(channel, efx) { in efx_farch_do_flush()
666 atomic_inc(&efx->rxq_flush_pending); in efx_farch_do_flush()
670 while (timeout && atomic_read(&efx->active_queues) > 0) { in efx_farch_do_flush()
675 if (efx_siena_sriov_enabled(efx)) { in efx_farch_do_flush()
676 rc = efx_mcdi_flush_rxqs(efx); in efx_farch_do_flush()
685 efx_for_each_channel(channel, efx) { in efx_farch_do_flush()
687 if (atomic_read(&efx->rxq_flush_outstanding) >= in efx_farch_do_flush()
693 atomic_dec(&efx->rxq_flush_pending); in efx_farch_do_flush()
694 atomic_inc(&efx->rxq_flush_outstanding); in efx_farch_do_flush()
701 timeout = wait_event_timeout(efx->flush_wq, in efx_farch_do_flush()
702 efx_farch_flush_wake(efx), in efx_farch_do_flush()
706 if (atomic_read(&efx->active_queues) && in efx_farch_do_flush()
707 !efx_check_tx_flush_complete(efx)) { in efx_farch_do_flush()
708 netif_err(efx, hw, efx->net_dev, "failed to flush %d queues " in efx_farch_do_flush()
709 "(rx %d+%d)\n", atomic_read(&efx->active_queues), in efx_farch_do_flush()
710 atomic_read(&efx->rxq_flush_outstanding), in efx_farch_do_flush()
711 atomic_read(&efx->rxq_flush_pending)); in efx_farch_do_flush()
714 atomic_set(&efx->active_queues, 0); in efx_farch_do_flush()
715 atomic_set(&efx->rxq_flush_pending, 0); in efx_farch_do_flush()
716 atomic_set(&efx->rxq_flush_outstanding, 0); in efx_farch_do_flush()
722 int efx_farch_fini_dmaq(struct efx_nic *efx) in efx_farch_fini_dmaq() argument
730 if (efx->state != STATE_RECOVERY) { in efx_farch_fini_dmaq()
732 if (efx->pci_dev->is_busmaster) { in efx_farch_fini_dmaq()
733 efx->type->prepare_flush(efx); in efx_farch_fini_dmaq()
734 rc = efx_farch_do_flush(efx); in efx_farch_fini_dmaq()
735 efx->type->finish_flush(efx); in efx_farch_fini_dmaq()
738 efx_for_each_channel(channel, efx) { in efx_farch_fini_dmaq()
763 void efx_farch_finish_flr(struct efx_nic *efx) in efx_farch_finish_flr() argument
765 atomic_set(&efx->rxq_flush_pending, 0); in efx_farch_finish_flr()
766 atomic_set(&efx->rxq_flush_outstanding, 0); in efx_farch_finish_flr()
767 atomic_set(&efx->active_queues, 0); in efx_farch_finish_flr()
786 struct efx_nic *efx = channel->efx; in efx_farch_ev_read_ack() local
794 efx_writed(efx, ®, in efx_farch_ev_read_ack()
795 efx->type->evq_rptr_tbl_base + in efx_farch_ev_read_ack()
800 void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq, in efx_farch_generate_event() argument
812 efx_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV); in efx_farch_generate_event()
822 efx_farch_generate_event(channel->efx, channel->channel, &event); in efx_farch_magic_event()
836 struct efx_nic *efx = channel->efx; in efx_farch_handle_tx_event() local
839 if (unlikely(ACCESS_ONCE(efx->reset_pending))) in efx_farch_handle_tx_event()
857 netif_tx_lock(efx->net_dev); in efx_farch_handle_tx_event()
859 netif_tx_unlock(efx->net_dev); in efx_farch_handle_tx_event()
861 efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR); in efx_farch_handle_tx_event()
863 netif_err(efx, tx_err, efx->net_dev, in efx_farch_handle_tx_event()
877 struct efx_nic *efx = rx_queue->efx; in efx_farch_handle_rx_not_ok() local
897 rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ? in efx_farch_handle_rx_not_ok()
912 else if (!efx->loopback_selftest) { in efx_farch_handle_rx_not_ok()
925 netif_dbg(efx, rx_err, efx->net_dev, in efx_farch_handle_rx_not_ok()
956 struct efx_nic *efx = rx_queue->efx; in efx_farch_handle_rx_bad_index() local
968 netif_info(efx, rx_err, efx->net_dev, in efx_farch_handle_rx_bad_index()
972 efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ? in efx_farch_handle_rx_bad_index()
993 struct efx_nic *efx = channel->efx; in efx_farch_handle_rx_event() local
995 if (unlikely(ACCESS_ONCE(efx->reset_pending))) in efx_farch_handle_rx_event()
1096 efx_farch_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event) in efx_farch_handle_tx_flush_done() argument
1102 if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) { in efx_farch_handle_tx_flush_done()
1103 tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES, in efx_farch_handle_tx_flush_done()
1117 efx_farch_handle_rx_flush_done(struct efx_nic *efx, efx_qword_t *event) in efx_farch_handle_rx_flush_done() argument
1126 if (qid >= efx->n_channels) in efx_farch_handle_rx_flush_done()
1128 channel = efx_get_channel(efx, qid); in efx_farch_handle_rx_flush_done()
1134 netif_info(efx, hw, efx->net_dev, in efx_farch_handle_rx_flush_done()
1137 atomic_inc(&efx->rxq_flush_pending); in efx_farch_handle_rx_flush_done()
1142 atomic_dec(&efx->rxq_flush_outstanding); in efx_farch_handle_rx_flush_done()
1143 if (efx_farch_flush_wake(efx)) in efx_farch_handle_rx_flush_done()
1144 wake_up(&efx->flush_wq); in efx_farch_handle_rx_flush_done()
1150 struct efx_nic *efx = channel->efx; in efx_farch_handle_drain_event() local
1152 WARN_ON(atomic_read(&efx->active_queues) == 0); in efx_farch_handle_drain_event()
1153 atomic_dec(&efx->active_queues); in efx_farch_handle_drain_event()
1154 if (efx_farch_flush_wake(efx)) in efx_farch_handle_drain_event()
1155 wake_up(&efx->flush_wq); in efx_farch_handle_drain_event()
1161 struct efx_nic *efx = channel->efx; in efx_farch_handle_generated_event() local
1182 netif_dbg(efx, hw, efx->net_dev, "channel %d received " in efx_farch_handle_generated_event()
1191 struct efx_nic *efx = channel->efx; in efx_farch_handle_driver_event() local
1200 netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n", in efx_farch_handle_driver_event()
1202 efx_farch_handle_tx_flush_done(efx, event); in efx_farch_handle_driver_event()
1203 efx_siena_sriov_tx_flush_done(efx, event); in efx_farch_handle_driver_event()
1206 netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n", in efx_farch_handle_driver_event()
1208 efx_farch_handle_rx_flush_done(efx, event); in efx_farch_handle_driver_event()
1209 efx_siena_sriov_rx_flush_done(efx, event); in efx_farch_handle_driver_event()
1212 netif_dbg(efx, hw, efx->net_dev, in efx_farch_handle_driver_event()
1217 netif_vdbg(efx, hw, efx->net_dev, in efx_farch_handle_driver_event()
1221 netif_vdbg(efx, hw, efx->net_dev, in efx_farch_handle_driver_event()
1226 netif_vdbg(efx, hw, efx->net_dev, in efx_farch_handle_driver_event()
1231 netif_err(efx, rx_err, efx->net_dev, in efx_farch_handle_driver_event()
1234 atomic_inc(&efx->rx_reset); in efx_farch_handle_driver_event()
1235 efx_schedule_reset(efx, in efx_farch_handle_driver_event()
1236 EFX_WORKAROUND_6555(efx) ? in efx_farch_handle_driver_event()
1242 netif_err(efx, rx_err, efx->net_dev, in efx_farch_handle_driver_event()
1246 efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR); in efx_farch_handle_driver_event()
1248 efx_siena_sriov_desc_fetch_err(efx, ev_sub_data); in efx_farch_handle_driver_event()
1252 netif_err(efx, tx_err, efx->net_dev, in efx_farch_handle_driver_event()
1256 efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR); in efx_farch_handle_driver_event()
1258 efx_siena_sriov_desc_fetch_err(efx, ev_sub_data); in efx_farch_handle_driver_event()
1261 netif_vdbg(efx, hw, efx->net_dev, in efx_farch_handle_driver_event()
1271 struct efx_nic *efx = channel->efx; in efx_farch_ev_process() local
1291 netif_vdbg(channel->efx, intr, channel->efx->net_dev, in efx_farch_ev_process()
1311 if (tx_packets > efx->txq_entries) { in efx_farch_ev_process()
1329 if (efx->type->handle_global_event && in efx_farch_ev_process()
1330 efx->type->handle_global_event(channel, &event)) in efx_farch_ev_process()
1334 netif_err(channel->efx, hw, channel->efx->net_dev, in efx_farch_ev_process()
1349 struct efx_nic *efx = channel->efx; in efx_farch_ev_probe() local
1353 return efx_alloc_special_buffer(efx, &channel->eventq, in efx_farch_ev_probe()
1360 struct efx_nic *efx = channel->efx; in efx_farch_ev_init() local
1362 netif_dbg(efx, hw, efx->net_dev, in efx_farch_ev_init()
1367 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) { in efx_farch_ev_init()
1372 efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); in efx_farch_ev_init()
1376 efx_init_special_buffer(efx, &channel->eventq); in efx_farch_ev_init()
1386 efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, in efx_farch_ev_init()
1395 struct efx_nic *efx = channel->efx; in efx_farch_ev_fini() local
1399 efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, in efx_farch_ev_fini()
1401 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) in efx_farch_ev_fini()
1402 efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); in efx_farch_ev_fini()
1405 efx_fini_special_buffer(efx, &channel->eventq); in efx_farch_ev_fini()
1411 efx_free_special_buffer(channel->efx, &channel->eventq); in efx_farch_ev_remove()
1435 static inline void efx_farch_interrupts(struct efx_nic *efx, in efx_farch_interrupts() argument
1441 FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level, in efx_farch_interrupts()
1444 efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER); in efx_farch_interrupts()
1447 void efx_farch_irq_enable_master(struct efx_nic *efx) in efx_farch_irq_enable_master() argument
1449 EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr)); in efx_farch_irq_enable_master()
1452 efx_farch_interrupts(efx, true, false); in efx_farch_irq_enable_master()
1455 void efx_farch_irq_disable_master(struct efx_nic *efx) in efx_farch_irq_disable_master() argument
1458 efx_farch_interrupts(efx, false, false); in efx_farch_irq_disable_master()
1465 void efx_farch_irq_test_generate(struct efx_nic *efx) in efx_farch_irq_test_generate() argument
1467 efx_farch_interrupts(efx, true, true); in efx_farch_irq_test_generate()
1473 irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx) in efx_farch_fatal_interrupt() argument
1475 struct falcon_nic_data *nic_data = efx->nic_data; in efx_farch_fatal_interrupt()
1476 efx_oword_t *int_ker = efx->irq_status.addr; in efx_farch_fatal_interrupt()
1480 efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER); in efx_farch_fatal_interrupt()
1483 netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status " in efx_farch_fatal_interrupt()
1493 efx_reado(efx, ®, FR_AZ_MEM_STAT); in efx_farch_fatal_interrupt()
1494 netif_err(efx, hw, efx->net_dev, in efx_farch_fatal_interrupt()
1500 pci_clear_master(efx->pci_dev); in efx_farch_fatal_interrupt()
1501 if (efx_nic_is_dual_func(efx)) in efx_farch_fatal_interrupt()
1503 efx_farch_irq_disable_master(efx); in efx_farch_fatal_interrupt()
1506 if (efx->int_error_count == 0 || in efx_farch_fatal_interrupt()
1507 time_after(jiffies, efx->int_error_expire)) { in efx_farch_fatal_interrupt()
1508 efx->int_error_count = 0; in efx_farch_fatal_interrupt()
1509 efx->int_error_expire = in efx_farch_fatal_interrupt()
1512 if (++efx->int_error_count < EFX_MAX_INT_ERRORS) { in efx_farch_fatal_interrupt()
1513 netif_err(efx, hw, efx->net_dev, in efx_farch_fatal_interrupt()
1515 efx_schedule_reset(efx, RESET_TYPE_INT_ERROR); in efx_farch_fatal_interrupt()
1517 netif_err(efx, hw, efx->net_dev, in efx_farch_fatal_interrupt()
1520 efx_schedule_reset(efx, RESET_TYPE_DISABLE); in efx_farch_fatal_interrupt()
1531 struct efx_nic *efx = dev_id; in efx_farch_legacy_interrupt() local
1532 bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled); in efx_farch_legacy_interrupt()
1533 efx_oword_t *int_ker = efx->irq_status.addr; in efx_farch_legacy_interrupt()
1541 efx_readd(efx, ®, FR_BZ_INT_ISR0); in efx_farch_legacy_interrupt()
1548 if (EFX_DWORD_IS_ALL_ONES(reg) && efx_try_recovery(efx) && in efx_farch_legacy_interrupt()
1549 !efx->eeh_disabled_legacy_irq) { in efx_farch_legacy_interrupt()
1550 disable_irq_nosync(efx->legacy_irq); in efx_farch_legacy_interrupt()
1551 efx->eeh_disabled_legacy_irq = true; in efx_farch_legacy_interrupt()
1555 if (queues & (1U << efx->irq_level) && soft_enabled) { in efx_farch_legacy_interrupt()
1558 return efx_farch_fatal_interrupt(efx); in efx_farch_legacy_interrupt()
1559 efx->last_irq_cpu = raw_smp_processor_id(); in efx_farch_legacy_interrupt()
1563 efx->irq_zero_count = 0; in efx_farch_legacy_interrupt()
1567 efx_for_each_channel(channel, efx) { in efx_farch_legacy_interrupt()
1582 if (efx->irq_zero_count++ == 0) in efx_farch_legacy_interrupt()
1587 efx_for_each_channel(channel, efx) { in efx_farch_legacy_interrupt()
1599 netif_vdbg(efx, intr, efx->net_dev, in efx_farch_legacy_interrupt()
1616 struct efx_nic *efx = context->efx; in efx_farch_msi_interrupt() local
1617 efx_oword_t *int_ker = efx->irq_status.addr; in efx_farch_msi_interrupt()
1620 netif_vdbg(efx, intr, efx->net_dev, in efx_farch_msi_interrupt()
1624 if (!likely(ACCESS_ONCE(efx->irq_soft_enabled))) in efx_farch_msi_interrupt()
1628 if (context->index == efx->irq_level) { in efx_farch_msi_interrupt()
1631 return efx_farch_fatal_interrupt(efx); in efx_farch_msi_interrupt()
1632 efx->last_irq_cpu = raw_smp_processor_id(); in efx_farch_msi_interrupt()
1636 efx_schedule_channel_irq(efx->channel[context->index]); in efx_farch_msi_interrupt()
1644 void efx_farch_rx_push_indir_table(struct efx_nic *efx) in efx_farch_rx_push_indir_table() argument
1649 BUG_ON(efx_nic_rev(efx) < EFX_REV_FALCON_B0); in efx_farch_rx_push_indir_table()
1651 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != in efx_farch_rx_push_indir_table()
1656 efx->rx_indir_table[i]); in efx_farch_rx_push_indir_table()
1657 efx_writed(efx, &dword, in efx_farch_rx_push_indir_table()
1672 void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw) in efx_farch_dimension_resources() argument
1677 struct siena_nic_data *nic_data = efx->nic_data; in efx_farch_dimension_resources()
1683 buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE + in efx_farch_dimension_resources()
1684 efx->n_tx_channels * EFX_TXQ_TYPES * EFX_MAX_DMAQ_SIZE + in efx_farch_dimension_resources()
1685 efx->n_channels * EFX_MAX_EVQ_SIZE) in efx_farch_dimension_resources()
1687 vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES); in efx_farch_dimension_resources()
1690 if (efx->type->sriov_wanted(efx)) { in efx_farch_dimension_resources()
1701 efx_vf_size(efx)); in efx_farch_dimension_resources()
1703 (1024U - EFX_VI_BASE) >> efx->vi_scale); in efx_farch_dimension_resources()
1705 if (efx->vf_count > vf_limit) { in efx_farch_dimension_resources()
1706 netif_err(efx, probe, efx->net_dev, in efx_farch_dimension_resources()
1708 efx->vf_count, vf_limit); in efx_farch_dimension_resources()
1709 efx->vf_count = vf_limit; in efx_farch_dimension_resources()
1711 vi_count += efx->vf_count * efx_vf_size(efx); in efx_farch_dimension_resources()
1715 efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES; in efx_farch_dimension_resources()
1716 efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES; in efx_farch_dimension_resources()
1719 u32 efx_farch_fpga_ver(struct efx_nic *efx) in efx_farch_fpga_ver() argument
1722 efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD); in efx_farch_fpga_ver()
1726 void efx_farch_init_common(struct efx_nic *efx) in efx_farch_init_common() argument
1731 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base); in efx_farch_init_common()
1732 efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG); in efx_farch_init_common()
1733 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base); in efx_farch_init_common()
1734 efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG); in efx_farch_init_common()
1739 efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG); in efx_farch_init_common()
1746 efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG); in efx_farch_init_common()
1748 efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM); in efx_farch_init_common()
1753 EFX_INT_MODE_USE_MSI(efx), in efx_farch_init_common()
1754 FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr); in efx_farch_init_common()
1755 efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER); in efx_farch_init_common()
1757 if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx)) in efx_farch_init_common()
1759 efx->irq_level = 0x1f; in efx_farch_init_common()
1762 efx->irq_level = 0; in efx_farch_init_common()
1774 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) in efx_farch_init_common()
1777 efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER); in efx_farch_init_common()
1782 efx_reado(efx, &temp, FR_AZ_TX_RESERVED); in efx_farch_init_common()
1795 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) in efx_farch_init_common()
1797 efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); in efx_farch_init_common()
1799 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { in efx_farch_init_common()
1809 efx_writeo(efx, &temp, FR_BZ_TX_PACE); in efx_farch_init_common()
1887 efx_farch_filter_table_clear_entry(struct efx_nic *efx,
1934 static void efx_farch_filter_push_rx_config(struct efx_nic *efx) in efx_farch_filter_push_rx_config() argument
1936 struct efx_farch_filter_state *state = efx->filter_state; in efx_farch_filter_push_rx_config()
1940 efx_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL); in efx_farch_filter_push_rx_config()
1994 } else if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { in efx_farch_filter_push_rx_config()
2002 efx->rx_scatter); in efx_farch_filter_push_rx_config()
2005 efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL); in efx_farch_filter_push_rx_config()
2008 static void efx_farch_filter_push_tx_limits(struct efx_nic *efx) in efx_farch_filter_push_tx_limits() argument
2010 struct efx_farch_filter_state *state = efx->filter_state; in efx_farch_filter_push_tx_limits()
2014 efx_reado(efx, &tx_cfg, FR_AZ_TX_CFG); in efx_farch_filter_push_tx_limits()
2028 efx_writeo(efx, &tx_cfg, FR_AZ_TX_CFG); in efx_farch_filter_push_tx_limits()
2219 efx_farch_filter_init_rx_auto(struct efx_nic *efx, in efx_farch_filter_init_rx_auto() argument
2227 (efx->n_rx_channels > 1 ? EFX_FILTER_FLAG_RX_RSS : 0) | in efx_farch_filter_init_rx_auto()
2228 (efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0)); in efx_farch_filter_init_rx_auto()
2371 u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx) in efx_farch_filter_get_rx_id_limit() argument
2373 struct efx_farch_filter_state *state = efx->filter_state; in efx_farch_filter_get_rx_id_limit()
2387 s32 efx_farch_filter_insert(struct efx_nic *efx, in efx_farch_filter_insert() argument
2391 struct efx_farch_filter_state *state = efx->filter_state; in efx_farch_filter_insert()
2407 netif_vdbg(efx, hw, efx->net_dev, in efx_farch_filter_insert()
2419 spin_lock_bh(&efx->filter_lock); in efx_farch_filter_insert()
2449 spin_lock_bh(&efx->filter_lock); in efx_farch_filter_insert()
2508 efx_farch_filter_push_rx_config(efx); in efx_farch_filter_insert()
2513 efx_farch_filter_push_tx_limits(efx); in efx_farch_filter_insert()
2515 efx_farch_filter_push_rx_config(efx); in efx_farch_filter_insert()
2518 efx_writeo(efx, &filter, in efx_farch_filter_insert()
2525 efx_farch_filter_table_clear_entry(efx, table, in efx_farch_filter_insert()
2529 netif_vdbg(efx, hw, efx->net_dev, in efx_farch_filter_insert()
2535 spin_unlock_bh(&efx->filter_lock); in efx_farch_filter_insert()
2540 efx_farch_filter_table_clear_entry(struct efx_nic *efx, in efx_farch_filter_table_clear_entry() argument
2553 efx_writeo(efx, &filter, table->offset + table->step * filter_idx); in efx_farch_filter_table_clear_entry()
2564 efx_farch_filter_push_tx_limits(efx); in efx_farch_filter_table_clear_entry()
2566 efx_farch_filter_push_rx_config(efx); in efx_farch_filter_table_clear_entry()
2570 static int efx_farch_filter_remove(struct efx_nic *efx, in efx_farch_filter_remove() argument
2582 efx_farch_filter_init_rx_auto(efx, spec); in efx_farch_filter_remove()
2583 efx_farch_filter_push_rx_config(efx); in efx_farch_filter_remove()
2585 efx_farch_filter_table_clear_entry(efx, table, filter_idx); in efx_farch_filter_remove()
2591 int efx_farch_filter_remove_safe(struct efx_nic *efx, in efx_farch_filter_remove_safe() argument
2595 struct efx_farch_filter_state *state = efx->filter_state; in efx_farch_filter_remove_safe()
2612 spin_lock_bh(&efx->filter_lock); in efx_farch_filter_remove_safe()
2613 rc = efx_farch_filter_remove(efx, table, filter_idx, priority); in efx_farch_filter_remove_safe()
2614 spin_unlock_bh(&efx->filter_lock); in efx_farch_filter_remove_safe()
2619 int efx_farch_filter_get_safe(struct efx_nic *efx, in efx_farch_filter_get_safe() argument
2623 struct efx_farch_filter_state *state = efx->filter_state; in efx_farch_filter_get_safe()
2640 spin_lock_bh(&efx->filter_lock); in efx_farch_filter_get_safe()
2650 spin_unlock_bh(&efx->filter_lock); in efx_farch_filter_get_safe()
2656 efx_farch_filter_table_clear(struct efx_nic *efx, in efx_farch_filter_table_clear() argument
2660 struct efx_farch_filter_state *state = efx->filter_state; in efx_farch_filter_table_clear()
2664 spin_lock_bh(&efx->filter_lock); in efx_farch_filter_table_clear()
2667 efx_farch_filter_remove(efx, table, in efx_farch_filter_table_clear()
2670 spin_unlock_bh(&efx->filter_lock); in efx_farch_filter_table_clear()
2673 int efx_farch_filter_clear_rx(struct efx_nic *efx, in efx_farch_filter_clear_rx() argument
2676 efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_IP, in efx_farch_filter_clear_rx()
2678 efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_MAC, in efx_farch_filter_clear_rx()
2680 efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_DEF, in efx_farch_filter_clear_rx()
2685 u32 efx_farch_filter_count_rx_used(struct efx_nic *efx, in efx_farch_filter_count_rx_used() argument
2688 struct efx_farch_filter_state *state = efx->filter_state; in efx_farch_filter_count_rx_used()
2694 spin_lock_bh(&efx->filter_lock); in efx_farch_filter_count_rx_used()
2707 spin_unlock_bh(&efx->filter_lock); in efx_farch_filter_count_rx_used()
2712 s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx, in efx_farch_filter_get_rx_ids() argument
2716 struct efx_farch_filter_state *state = efx->filter_state; in efx_farch_filter_get_rx_ids()
2722 spin_lock_bh(&efx->filter_lock); in efx_farch_filter_get_rx_ids()
2741 spin_unlock_bh(&efx->filter_lock); in efx_farch_filter_get_rx_ids()
2747 void efx_farch_filter_table_restore(struct efx_nic *efx) in efx_farch_filter_table_restore() argument
2749 struct efx_farch_filter_state *state = efx->filter_state; in efx_farch_filter_table_restore()
2755 spin_lock_bh(&efx->filter_lock); in efx_farch_filter_table_restore()
2768 efx_writeo(efx, &filter, in efx_farch_filter_table_restore()
2773 efx_farch_filter_push_rx_config(efx); in efx_farch_filter_table_restore()
2774 efx_farch_filter_push_tx_limits(efx); in efx_farch_filter_table_restore()
2776 spin_unlock_bh(&efx->filter_lock); in efx_farch_filter_table_restore()
2779 void efx_farch_filter_table_remove(struct efx_nic *efx) in efx_farch_filter_table_remove() argument
2781 struct efx_farch_filter_state *state = efx->filter_state; in efx_farch_filter_table_remove()
2791 int efx_farch_filter_table_probe(struct efx_nic *efx) in efx_farch_filter_table_probe() argument
2800 efx->filter_state = state; in efx_farch_filter_table_probe()
2802 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { in efx_farch_filter_table_probe()
2810 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) { in efx_farch_filter_table_probe()
2851 efx_farch_filter_init_rx_auto(efx, spec); in efx_farch_filter_table_probe()
2856 efx_farch_filter_push_rx_config(efx); in efx_farch_filter_table_probe()
2861 efx_farch_filter_table_remove(efx); in efx_farch_filter_table_probe()
2866 void efx_farch_filter_update_rx_scatter(struct efx_nic *efx) in efx_farch_filter_update_rx_scatter() argument
2868 struct efx_farch_filter_state *state = efx->filter_state; in efx_farch_filter_update_rx_scatter()
2874 spin_lock_bh(&efx->filter_lock); in efx_farch_filter_update_rx_scatter()
2884 efx->n_rx_channels) in efx_farch_filter_update_rx_scatter()
2887 if (efx->rx_scatter) in efx_farch_filter_update_rx_scatter()
2899 efx_writeo(efx, &filter, in efx_farch_filter_update_rx_scatter()
2904 efx_farch_filter_push_rx_config(efx); in efx_farch_filter_update_rx_scatter()
2906 spin_unlock_bh(&efx->filter_lock); in efx_farch_filter_update_rx_scatter()
2911 s32 efx_farch_filter_rfs_insert(struct efx_nic *efx, in efx_farch_filter_rfs_insert() argument
2914 return efx_farch_filter_insert(efx, gen_spec, true); in efx_farch_filter_rfs_insert()
2917 bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id, in efx_farch_filter_rfs_expire_one() argument
2920 struct efx_farch_filter_state *state = efx->filter_state; in efx_farch_filter_rfs_expire_one()
2926 rps_may_expire_flow(efx->net_dev, table->spec[index].dmaq_id, in efx_farch_filter_rfs_expire_one()
2928 efx_farch_filter_table_clear_entry(efx, table, index); in efx_farch_filter_rfs_expire_one()
2937 void efx_farch_filter_sync_rx_mode(struct efx_nic *efx) in efx_farch_filter_sync_rx_mode() argument
2939 struct net_device *net_dev = efx->net_dev; in efx_farch_filter_sync_rx_mode()
2941 union efx_multicast_hash *mc_hash = &efx->multicast_hash; in efx_farch_filter_sync_rx_mode()
2945 if (!efx_dev_registered(efx)) in efx_farch_filter_sync_rx_mode()
2950 efx->unicast_filter = !(net_dev->flags & IFF_PROMISC); in efx_farch_filter_sync_rx_mode()