1/****************************************************************************** 2 * 3 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. 4 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH 5 * 6 * Portions of this file are derived from the ipw3945 project, as well 7 * as portions of the ieee80211 subsystem header files. 8 * 9 * This program is free software; you can redistribute it and/or modify it 10 * under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but WITHOUT 14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 16 * more details. 17 * 18 * You should have received a copy of the GNU General Public License along with 19 * this program; if not, write to the Free Software Foundation, Inc., 20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA 21 * 22 * The full GNU General Public License is included in this distribution in the 23 * file called LICENSE. 24 * 25 * Contact Information: 26 * Intel Linux Wireless <ilw@linux.intel.com> 27 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 28 * 29 *****************************************************************************/ 30#include <linux/sched.h> 31#include <linux/wait.h> 32#include <linux/gfp.h> 33 34#include "iwl-prph.h" 35#include "iwl-io.h" 36#include "internal.h" 37#include "iwl-op-mode.h" 38 39/****************************************************************************** 40 * 41 * RX path functions 42 * 43 ******************************************************************************/ 44 45/* 46 * Rx theory of operation 47 * 48 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs), 49 * each of which point to Receive Buffers to be filled by the NIC. These get 50 * used not only for Rx frames, but for any command response or notification 51 * from the NIC. The driver and NIC manage the Rx buffers by means 52 * of indexes into the circular buffer. 53 * 54 * Rx Queue Indexes 55 * The host/firmware share two index registers for managing the Rx buffers. 56 * 57 * The READ index maps to the first position that the firmware may be writing 58 * to -- the driver can read up to (but not including) this position and get 59 * good data. 60 * The READ index is managed by the firmware once the card is enabled. 61 * 62 * The WRITE index maps to the last position the driver has read from -- the 63 * position preceding WRITE is the last slot the firmware can place a packet. 64 * 65 * The queue is empty (no good data) if WRITE = READ - 1, and is full if 66 * WRITE = READ. 67 * 68 * During initialization, the host sets up the READ queue position to the first 69 * INDEX position, and WRITE to the last (READ - 1 wrapped) 70 * 71 * When the firmware places a packet in a buffer, it will advance the READ index 72 * and fire the RX interrupt. The driver can then query the READ index and 73 * process as many packets as possible, moving the WRITE index forward as it 74 * resets the Rx queue buffers with new memory. 75 * 76 * The management in the driver is as follows: 77 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When 78 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled 79 * to replenish the iwl->rxq->rx_free. 80 * + In iwl_pcie_rx_replenish (scheduled) if 'processed' != 'read' then the 81 * iwl->rxq is replenished and the READ INDEX is updated (updating the 82 * 'processed' and 'read' driver indexes as well) 83 * + A received packet is processed and handed to the kernel network stack, 84 * detached from the iwl->rxq. The driver 'processed' index is updated. 85 * + The Host/Firmware iwl->rxq is replenished at irq thread time from the 86 * rx_free list. If there are no allocated buffers in iwl->rxq->rx_free, 87 * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set. 88 * If there were enough free buffers and RX_STALLED is set it is cleared. 89 * 90 * 91 * Driver sequence: 92 * 93 * iwl_rxq_alloc() Allocates rx_free 94 * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls 95 * iwl_pcie_rxq_restock 96 * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx 97 * queue, updates firmware pointers, and updates 98 * the WRITE index. If insufficient rx_free buffers 99 * are available, schedules iwl_pcie_rx_replenish 100 * 101 * -- enable interrupts -- 102 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the 103 * READ INDEX, detaching the SKB from the pool. 104 * Moves the packet buffer from queue to rx_used. 105 * Calls iwl_pcie_rxq_restock to refill any empty 106 * slots. 107 * ... 108 * 109 */ 110 111/* 112 * iwl_rxq_space - Return number of free slots available in queue. 113 */ 114static int iwl_rxq_space(const struct iwl_rxq *rxq) 115{ 116 /* Make sure RX_QUEUE_SIZE is a power of 2 */ 117 BUILD_BUG_ON(RX_QUEUE_SIZE & (RX_QUEUE_SIZE - 1)); 118 119 /* 120 * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity 121 * between empty and completely full queues. 122 * The following is equivalent to modulo by RX_QUEUE_SIZE and is well 123 * defined for negative dividends. 124 */ 125 return (rxq->read - rxq->write - 1) & (RX_QUEUE_SIZE - 1); 126} 127 128/* 129 * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr 130 */ 131static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr) 132{ 133 return cpu_to_le32((u32)(dma_addr >> 8)); 134} 135 136/* 137 * iwl_pcie_rx_stop - stops the Rx DMA 138 */ 139int iwl_pcie_rx_stop(struct iwl_trans *trans) 140{ 141 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); 142 return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG, 143 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000); 144} 145 146/* 147 * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue 148 */ 149static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans) 150{ 151 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 152 struct iwl_rxq *rxq = &trans_pcie->rxq; 153 u32 reg; 154 155 lockdep_assert_held(&rxq->lock); 156 157 /* 158 * explicitly wake up the NIC if: 159 * 1. shadow registers aren't enabled 160 * 2. there is a chance that the NIC is asleep 161 */ 162 if (!trans->cfg->base_params->shadow_reg_enable && 163 test_bit(STATUS_TPOWER_PMI, &trans->status)) { 164 reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); 165 166 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { 167 IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n", 168 reg); 169 iwl_set_bit(trans, CSR_GP_CNTRL, 170 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 171 rxq->need_update = true; 172 return; 173 } 174 } 175 176 rxq->write_actual = round_down(rxq->write, 8); 177 iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual); 178} 179 180static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans) 181{ 182 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 183 struct iwl_rxq *rxq = &trans_pcie->rxq; 184 185 spin_lock(&rxq->lock); 186 187 if (!rxq->need_update) 188 goto exit_unlock; 189 190 iwl_pcie_rxq_inc_wr_ptr(trans); 191 rxq->need_update = false; 192 193 exit_unlock: 194 spin_unlock(&rxq->lock); 195} 196 197/* 198 * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool 199 * 200 * If there are slots in the RX queue that need to be restocked, 201 * and we have free pre-allocated buffers, fill the ranks as much 202 * as we can, pulling from rx_free. 203 * 204 * This moves the 'write' index forward to catch up with 'processed', and 205 * also updates the memory address in the firmware to reference the new 206 * target buffer. 207 */ 208static void iwl_pcie_rxq_restock(struct iwl_trans *trans) 209{ 210 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 211 struct iwl_rxq *rxq = &trans_pcie->rxq; 212 struct iwl_rx_mem_buffer *rxb; 213 214 /* 215 * If the device isn't enabled - not need to try to add buffers... 216 * This can happen when we stop the device and still have an interrupt 217 * pending. We stop the APM before we sync the interrupts because we 218 * have to (see comment there). On the other hand, since the APM is 219 * stopped, we cannot access the HW (in particular not prph). 220 * So don't try to restock if the APM has been already stopped. 221 */ 222 if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) 223 return; 224 225 spin_lock(&rxq->lock); 226 while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) { 227 /* The overwritten rxb must be a used one */ 228 rxb = rxq->queue[rxq->write]; 229 BUG_ON(rxb && rxb->page); 230 231 /* Get next free Rx buffer, remove from free list */ 232 rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer, 233 list); 234 list_del(&rxb->list); 235 236 /* Point to Rx buffer via next RBD in circular buffer */ 237 rxq->bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma); 238 rxq->queue[rxq->write] = rxb; 239 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; 240 rxq->free_count--; 241 } 242 spin_unlock(&rxq->lock); 243 /* If the pre-allocated buffer pool is dropping low, schedule to 244 * refill it */ 245 if (rxq->free_count <= RX_LOW_WATERMARK) 246 schedule_work(&trans_pcie->rx_replenish); 247 248 /* If we've added more space for the firmware to place data, tell it. 249 * Increment device's write pointer in multiples of 8. */ 250 if (rxq->write_actual != (rxq->write & ~0x7)) { 251 spin_lock(&rxq->lock); 252 iwl_pcie_rxq_inc_wr_ptr(trans); 253 spin_unlock(&rxq->lock); 254 } 255} 256 257/* 258 * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD 259 * 260 * A used RBD is an Rx buffer that has been given to the stack. To use it again 261 * a page must be allocated and the RBD must point to the page. This function 262 * doesn't change the HW pointer but handles the list of pages that is used by 263 * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly 264 * allocated buffers. 265 */ 266static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority) 267{ 268 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 269 struct iwl_rxq *rxq = &trans_pcie->rxq; 270 struct iwl_rx_mem_buffer *rxb; 271 struct page *page; 272 gfp_t gfp_mask = priority; 273 274 while (1) { 275 spin_lock(&rxq->lock); 276 if (list_empty(&rxq->rx_used)) { 277 spin_unlock(&rxq->lock); 278 return; 279 } 280 spin_unlock(&rxq->lock); 281 282 if (rxq->free_count > RX_LOW_WATERMARK) 283 gfp_mask |= __GFP_NOWARN; 284 285 if (trans_pcie->rx_page_order > 0) 286 gfp_mask |= __GFP_COMP; 287 288 /* Alloc a new receive buffer */ 289 page = alloc_pages(gfp_mask, trans_pcie->rx_page_order); 290 if (!page) { 291 if (net_ratelimit()) 292 IWL_DEBUG_INFO(trans, "alloc_pages failed, " 293 "order: %d\n", 294 trans_pcie->rx_page_order); 295 296 if ((rxq->free_count <= RX_LOW_WATERMARK) && 297 net_ratelimit()) 298 IWL_CRIT(trans, "Failed to alloc_pages with %s." 299 "Only %u free buffers remaining.\n", 300 priority == GFP_ATOMIC ? 301 "GFP_ATOMIC" : "GFP_KERNEL", 302 rxq->free_count); 303 /* We don't reschedule replenish work here -- we will 304 * call the restock method and if it still needs 305 * more buffers it will schedule replenish */ 306 return; 307 } 308 309 spin_lock(&rxq->lock); 310 311 if (list_empty(&rxq->rx_used)) { 312 spin_unlock(&rxq->lock); 313 __free_pages(page, trans_pcie->rx_page_order); 314 return; 315 } 316 rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer, 317 list); 318 list_del(&rxb->list); 319 spin_unlock(&rxq->lock); 320 321 BUG_ON(rxb->page); 322 rxb->page = page; 323 /* Get physical address of the RB */ 324 rxb->page_dma = 325 dma_map_page(trans->dev, page, 0, 326 PAGE_SIZE << trans_pcie->rx_page_order, 327 DMA_FROM_DEVICE); 328 if (dma_mapping_error(trans->dev, rxb->page_dma)) { 329 rxb->page = NULL; 330 spin_lock(&rxq->lock); 331 list_add(&rxb->list, &rxq->rx_used); 332 spin_unlock(&rxq->lock); 333 __free_pages(page, trans_pcie->rx_page_order); 334 return; 335 } 336 /* dma address must be no more than 36 bits */ 337 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); 338 /* and also 256 byte aligned! */ 339 BUG_ON(rxb->page_dma & DMA_BIT_MASK(8)); 340 341 spin_lock(&rxq->lock); 342 343 list_add_tail(&rxb->list, &rxq->rx_free); 344 rxq->free_count++; 345 346 spin_unlock(&rxq->lock); 347 } 348} 349 350static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans) 351{ 352 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 353 struct iwl_rxq *rxq = &trans_pcie->rxq; 354 int i; 355 356 lockdep_assert_held(&rxq->lock); 357 358 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { 359 if (!rxq->pool[i].page) 360 continue; 361 dma_unmap_page(trans->dev, rxq->pool[i].page_dma, 362 PAGE_SIZE << trans_pcie->rx_page_order, 363 DMA_FROM_DEVICE); 364 __free_pages(rxq->pool[i].page, trans_pcie->rx_page_order); 365 rxq->pool[i].page = NULL; 366 } 367} 368 369/* 370 * iwl_pcie_rx_replenish - Move all used buffers from rx_used to rx_free 371 * 372 * When moving to rx_free an page is allocated for the slot. 373 * 374 * Also restock the Rx queue via iwl_pcie_rxq_restock. 375 * This is called as a scheduled work item (except for during initialization) 376 */ 377static void iwl_pcie_rx_replenish(struct iwl_trans *trans, gfp_t gfp) 378{ 379 iwl_pcie_rxq_alloc_rbs(trans, gfp); 380 381 iwl_pcie_rxq_restock(trans); 382} 383 384static void iwl_pcie_rx_replenish_work(struct work_struct *data) 385{ 386 struct iwl_trans_pcie *trans_pcie = 387 container_of(data, struct iwl_trans_pcie, rx_replenish); 388 389 iwl_pcie_rx_replenish(trans_pcie->trans, GFP_KERNEL); 390} 391 392static int iwl_pcie_rx_alloc(struct iwl_trans *trans) 393{ 394 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 395 struct iwl_rxq *rxq = &trans_pcie->rxq; 396 struct device *dev = trans->dev; 397 398 memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq)); 399 400 spin_lock_init(&rxq->lock); 401 402 if (WARN_ON(rxq->bd || rxq->rb_stts)) 403 return -EINVAL; 404 405 /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */ 406 rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE, 407 &rxq->bd_dma, GFP_KERNEL); 408 if (!rxq->bd) 409 goto err_bd; 410 411 /*Allocate the driver's pointer to receive buffer status */ 412 rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts), 413 &rxq->rb_stts_dma, GFP_KERNEL); 414 if (!rxq->rb_stts) 415 goto err_rb_stts; 416 417 return 0; 418 419err_rb_stts: 420 dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE, 421 rxq->bd, rxq->bd_dma); 422 rxq->bd_dma = 0; 423 rxq->bd = NULL; 424err_bd: 425 return -ENOMEM; 426} 427 428static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq) 429{ 430 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 431 u32 rb_size; 432 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */ 433 434 if (trans_pcie->rx_buf_size_8k) 435 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K; 436 else 437 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; 438 439 /* Stop Rx DMA */ 440 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); 441 /* reset and flush pointers */ 442 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0); 443 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0); 444 iwl_write_direct32(trans, FH_RSCSR_CHNL0_RDPTR, 0); 445 446 /* Reset driver's Rx queue write index */ 447 iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); 448 449 /* Tell device where to find RBD circular buffer in DRAM */ 450 iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG, 451 (u32)(rxq->bd_dma >> 8)); 452 453 /* Tell device where in DRAM to update its Rx status */ 454 iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG, 455 rxq->rb_stts_dma >> 4); 456 457 /* Enable Rx DMA 458 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in 459 * the credit mechanism in 5000 HW RX FIFO 460 * Direct rx interrupts to hosts 461 * Rx buffer size 4 or 8k 462 * RB timeout 0x10 463 * 256 RBDs 464 */ 465 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 466 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | 467 FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | 468 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | 469 rb_size| 470 (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)| 471 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS)); 472 473 /* Set interrupt coalescing timer to default (2048 usecs) */ 474 iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); 475 476 /* W/A for interrupt coalescing bug in 7260 and 3160 */ 477 if (trans->cfg->host_interrupt_operation_mode) 478 iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE); 479} 480 481static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq) 482{ 483 int i; 484 485 lockdep_assert_held(&rxq->lock); 486 487 INIT_LIST_HEAD(&rxq->rx_free); 488 INIT_LIST_HEAD(&rxq->rx_used); 489 rxq->free_count = 0; 490 491 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) 492 list_add(&rxq->pool[i].list, &rxq->rx_used); 493} 494 495int iwl_pcie_rx_init(struct iwl_trans *trans) 496{ 497 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 498 struct iwl_rxq *rxq = &trans_pcie->rxq; 499 int i, err; 500 501 if (!rxq->bd) { 502 err = iwl_pcie_rx_alloc(trans); 503 if (err) 504 return err; 505 } 506 507 spin_lock(&rxq->lock); 508 509 INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work); 510 511 /* free all first - we might be reconfigured for a different size */ 512 iwl_pcie_rxq_free_rbs(trans); 513 iwl_pcie_rx_init_rxb_lists(rxq); 514 515 for (i = 0; i < RX_QUEUE_SIZE; i++) 516 rxq->queue[i] = NULL; 517 518 /* Set us so that we have processed and used all buffers, but have 519 * not restocked the Rx queue with fresh buffers */ 520 rxq->read = rxq->write = 0; 521 rxq->write_actual = 0; 522 memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts)); 523 spin_unlock(&rxq->lock); 524 525 iwl_pcie_rx_replenish(trans, GFP_KERNEL); 526 527 iwl_pcie_rx_hw_init(trans, rxq); 528 529 spin_lock(&rxq->lock); 530 iwl_pcie_rxq_inc_wr_ptr(trans); 531 spin_unlock(&rxq->lock); 532 533 return 0; 534} 535 536void iwl_pcie_rx_free(struct iwl_trans *trans) 537{ 538 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 539 struct iwl_rxq *rxq = &trans_pcie->rxq; 540 541 /*if rxq->bd is NULL, it means that nothing has been allocated, 542 * exit now */ 543 if (!rxq->bd) { 544 IWL_DEBUG_INFO(trans, "Free NULL rx context\n"); 545 return; 546 } 547 548 cancel_work_sync(&trans_pcie->rx_replenish); 549 550 spin_lock(&rxq->lock); 551 iwl_pcie_rxq_free_rbs(trans); 552 spin_unlock(&rxq->lock); 553 554 dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE, 555 rxq->bd, rxq->bd_dma); 556 rxq->bd_dma = 0; 557 rxq->bd = NULL; 558 559 if (rxq->rb_stts) 560 dma_free_coherent(trans->dev, 561 sizeof(struct iwl_rb_status), 562 rxq->rb_stts, rxq->rb_stts_dma); 563 else 564 IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n"); 565 rxq->rb_stts_dma = 0; 566 rxq->rb_stts = NULL; 567} 568 569static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, 570 struct iwl_rx_mem_buffer *rxb) 571{ 572 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 573 struct iwl_rxq *rxq = &trans_pcie->rxq; 574 struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; 575 bool page_stolen = false; 576 int max_len = PAGE_SIZE << trans_pcie->rx_page_order; 577 u32 offset = 0; 578 579 if (WARN_ON(!rxb)) 580 return; 581 582 dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE); 583 584 while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) { 585 struct iwl_rx_packet *pkt; 586 struct iwl_device_cmd *cmd; 587 u16 sequence; 588 bool reclaim; 589 int index, cmd_index, err, len; 590 struct iwl_rx_cmd_buffer rxcb = { 591 ._offset = offset, 592 ._rx_page_order = trans_pcie->rx_page_order, 593 ._page = rxb->page, 594 ._page_stolen = false, 595 .truesize = max_len, 596 }; 597 598 pkt = rxb_addr(&rxcb); 599 600 if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID)) 601 break; 602 603 IWL_DEBUG_RX(trans, 604 "cmd at offset %d: %s (0x%.2x, seq 0x%x)\n", 605 rxcb._offset, 606 get_cmd_string(trans_pcie, pkt->hdr.cmd), 607 pkt->hdr.cmd, le16_to_cpu(pkt->hdr.sequence)); 608 609 len = iwl_rx_packet_len(pkt); 610 len += sizeof(u32); /* account for status word */ 611 trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len); 612 trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len); 613 614 /* Reclaim a command buffer only if this packet is a response 615 * to a (driver-originated) command. 616 * If the packet (e.g. Rx frame) originated from uCode, 617 * there is no command buffer to reclaim. 618 * Ucode should set SEQ_RX_FRAME bit if ucode-originated, 619 * but apparently a few don't get set; catch them here. */ 620 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME); 621 if (reclaim) { 622 int i; 623 624 for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) { 625 if (trans_pcie->no_reclaim_cmds[i] == 626 pkt->hdr.cmd) { 627 reclaim = false; 628 break; 629 } 630 } 631 } 632 633 sequence = le16_to_cpu(pkt->hdr.sequence); 634 index = SEQ_TO_INDEX(sequence); 635 cmd_index = get_cmd_index(&txq->q, index); 636 637 if (reclaim) 638 cmd = txq->entries[cmd_index].cmd; 639 else 640 cmd = NULL; 641 642 err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd); 643 644 if (reclaim) { 645 kzfree(txq->entries[cmd_index].free_buf); 646 txq->entries[cmd_index].free_buf = NULL; 647 } 648 649 /* 650 * After here, we should always check rxcb._page_stolen, 651 * if it is true then one of the handlers took the page. 652 */ 653 654 if (reclaim) { 655 /* Invoke any callbacks, transfer the buffer to caller, 656 * and fire off the (possibly) blocking 657 * iwl_trans_send_cmd() 658 * as we reclaim the driver command queue */ 659 if (!rxcb._page_stolen) 660 iwl_pcie_hcmd_complete(trans, &rxcb, err); 661 else 662 IWL_WARN(trans, "Claim null rxb?\n"); 663 } 664 665 page_stolen |= rxcb._page_stolen; 666 offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN); 667 } 668 669 /* page was stolen from us -- free our reference */ 670 if (page_stolen) { 671 __free_pages(rxb->page, trans_pcie->rx_page_order); 672 rxb->page = NULL; 673 } 674 675 /* Reuse the page if possible. For notification packets and 676 * SKBs that fail to Rx correctly, add them back into the 677 * rx_free list for reuse later. */ 678 if (rxb->page != NULL) { 679 rxb->page_dma = 680 dma_map_page(trans->dev, rxb->page, 0, 681 PAGE_SIZE << trans_pcie->rx_page_order, 682 DMA_FROM_DEVICE); 683 if (dma_mapping_error(trans->dev, rxb->page_dma)) { 684 /* 685 * free the page(s) as well to not break 686 * the invariant that the items on the used 687 * list have no page(s) 688 */ 689 __free_pages(rxb->page, trans_pcie->rx_page_order); 690 rxb->page = NULL; 691 list_add_tail(&rxb->list, &rxq->rx_used); 692 } else { 693 list_add_tail(&rxb->list, &rxq->rx_free); 694 rxq->free_count++; 695 } 696 } else 697 list_add_tail(&rxb->list, &rxq->rx_used); 698} 699 700/* 701 * iwl_pcie_rx_handle - Main entry function for receiving responses from fw 702 */ 703static void iwl_pcie_rx_handle(struct iwl_trans *trans) 704{ 705 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 706 struct iwl_rxq *rxq = &trans_pcie->rxq; 707 u32 r, i; 708 u8 fill_rx = 0; 709 u32 count = 8; 710 int total_empty; 711 712restart: 713 spin_lock(&rxq->lock); 714 /* uCode's read index (stored in shared DRAM) indicates the last Rx 715 * buffer that the driver may process (last buffer filled by ucode). */ 716 r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF; 717 i = rxq->read; 718 719 /* Rx interrupt, but nothing sent from uCode */ 720 if (i == r) 721 IWL_DEBUG_RX(trans, "HW = SW = %d\n", r); 722 723 /* calculate total frames need to be restock after handling RX */ 724 total_empty = r - rxq->write_actual; 725 if (total_empty < 0) 726 total_empty += RX_QUEUE_SIZE; 727 728 if (total_empty > (RX_QUEUE_SIZE / 2)) 729 fill_rx = 1; 730 731 while (i != r) { 732 struct iwl_rx_mem_buffer *rxb; 733 734 rxb = rxq->queue[i]; 735 rxq->queue[i] = NULL; 736 737 IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d (%p)\n", 738 r, i, rxb); 739 iwl_pcie_rx_handle_rb(trans, rxb); 740 741 i = (i + 1) & RX_QUEUE_MASK; 742 /* If there are a lot of unused frames, 743 * restock the Rx queue so ucode wont assert. */ 744 if (fill_rx) { 745 count++; 746 if (count >= 8) { 747 rxq->read = i; 748 spin_unlock(&rxq->lock); 749 iwl_pcie_rx_replenish(trans, GFP_ATOMIC); 750 count = 0; 751 goto restart; 752 } 753 } 754 } 755 756 /* Backtrack one entry */ 757 rxq->read = i; 758 spin_unlock(&rxq->lock); 759 760 if (fill_rx) 761 iwl_pcie_rx_replenish(trans, GFP_ATOMIC); 762 else 763 iwl_pcie_rxq_restock(trans); 764 765 if (trans_pcie->napi.poll) 766 napi_gro_flush(&trans_pcie->napi, false); 767} 768 769/* 770 * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card 771 */ 772static void iwl_pcie_irq_handle_error(struct iwl_trans *trans) 773{ 774 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 775 776 /* W/A for WiFi/WiMAX coex and WiMAX own the RF */ 777 if (trans->cfg->internal_wimax_coex && 778 (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) & 779 APMS_CLK_VAL_MRB_FUNC_MODE) || 780 (iwl_read_prph(trans, APMG_PS_CTRL_REG) & 781 APMG_PS_CTRL_VAL_RESET_REQ))) { 782 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 783 iwl_op_mode_wimax_active(trans->op_mode); 784 wake_up(&trans_pcie->wait_command_queue); 785 return; 786 } 787 788 iwl_pcie_dump_csr(trans); 789 iwl_dump_fh(trans, NULL); 790 791 local_bh_disable(); 792 /* The STATUS_FW_ERROR bit is set in this function. This must happen 793 * before we wake up the command caller, to ensure a proper cleanup. */ 794 iwl_trans_fw_error(trans); 795 local_bh_enable(); 796 797 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 798 wake_up(&trans_pcie->wait_command_queue); 799} 800 801static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans) 802{ 803 u32 inta; 804 805 lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock); 806 807 trace_iwlwifi_dev_irq(trans->dev); 808 809 /* Discover which interrupts are active/pending */ 810 inta = iwl_read32(trans, CSR_INT); 811 812 /* the thread will service interrupts and re-enable them */ 813 return inta; 814} 815 816/* a device (PCI-E) page is 4096 bytes long */ 817#define ICT_SHIFT 12 818#define ICT_SIZE (1 << ICT_SHIFT) 819#define ICT_COUNT (ICT_SIZE / sizeof(u32)) 820 821/* interrupt handler using ict table, with this interrupt driver will 822 * stop using INTA register to get device's interrupt, reading this register 823 * is expensive, device will write interrupts in ICT dram table, increment 824 * index then will fire interrupt to driver, driver will OR all ICT table 825 * entries from current index up to table entry with 0 value. the result is 826 * the interrupt we need to service, driver will set the entries back to 0 and 827 * set index. 828 */ 829static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans) 830{ 831 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 832 u32 inta; 833 u32 val = 0; 834 u32 read; 835 836 trace_iwlwifi_dev_irq(trans->dev); 837 838 /* Ignore interrupt if there's nothing in NIC to service. 839 * This may be due to IRQ shared with another device, 840 * or due to sporadic interrupts thrown from our NIC. */ 841 read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); 842 trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read); 843 if (!read) 844 return 0; 845 846 /* 847 * Collect all entries up to the first 0, starting from ict_index; 848 * note we already read at ict_index. 849 */ 850 do { 851 val |= read; 852 IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n", 853 trans_pcie->ict_index, read); 854 trans_pcie->ict_tbl[trans_pcie->ict_index] = 0; 855 trans_pcie->ict_index = 856 ((trans_pcie->ict_index + 1) & (ICT_COUNT - 1)); 857 858 read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); 859 trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, 860 read); 861 } while (read); 862 863 /* We should not get this value, just ignore it. */ 864 if (val == 0xffffffff) 865 val = 0; 866 867 /* 868 * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit 869 * (bit 15 before shifting it to 31) to clear when using interrupt 870 * coalescing. fortunately, bits 18 and 19 stay set when this happens 871 * so we use them to decide on the real state of the Rx bit. 872 * In order words, bit 15 is set if bit 18 or bit 19 are set. 873 */ 874 if (val & 0xC0000) 875 val |= 0x8000; 876 877 inta = (0xff & val) | ((0xff00 & val) << 16); 878 return inta; 879} 880 881irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) 882{ 883 struct iwl_trans *trans = dev_id; 884 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 885 struct isr_statistics *isr_stats = &trans_pcie->isr_stats; 886 u32 inta = 0; 887 u32 handled = 0; 888 889 lock_map_acquire(&trans->sync_cmd_lockdep_map); 890 891 spin_lock(&trans_pcie->irq_lock); 892 893 /* dram interrupt table not set yet, 894 * use legacy interrupt. 895 */ 896 if (likely(trans_pcie->use_ict)) 897 inta = iwl_pcie_int_cause_ict(trans); 898 else 899 inta = iwl_pcie_int_cause_non_ict(trans); 900 901 if (iwl_have_debug_level(IWL_DL_ISR)) { 902 IWL_DEBUG_ISR(trans, 903 "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n", 904 inta, trans_pcie->inta_mask, 905 iwl_read32(trans, CSR_INT_MASK), 906 iwl_read32(trans, CSR_FH_INT_STATUS)); 907 if (inta & (~trans_pcie->inta_mask)) 908 IWL_DEBUG_ISR(trans, 909 "We got a masked interrupt (0x%08x)\n", 910 inta & (~trans_pcie->inta_mask)); 911 } 912 913 inta &= trans_pcie->inta_mask; 914 915 /* 916 * Ignore interrupt if there's nothing in NIC to service. 917 * This may be due to IRQ shared with another device, 918 * or due to sporadic interrupts thrown from our NIC. 919 */ 920 if (unlikely(!inta)) { 921 IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n"); 922 /* 923 * Re-enable interrupts here since we don't 924 * have anything to service 925 */ 926 if (test_bit(STATUS_INT_ENABLED, &trans->status)) 927 iwl_enable_interrupts(trans); 928 spin_unlock(&trans_pcie->irq_lock); 929 lock_map_release(&trans->sync_cmd_lockdep_map); 930 return IRQ_NONE; 931 } 932 933 if (unlikely(inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0)) { 934 /* 935 * Hardware disappeared. It might have 936 * already raised an interrupt. 937 */ 938 IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta); 939 spin_unlock(&trans_pcie->irq_lock); 940 goto out; 941 } 942 943 /* Ack/clear/reset pending uCode interrupts. 944 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, 945 */ 946 /* There is a hardware bug in the interrupt mask function that some 947 * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if 948 * they are disabled in the CSR_INT_MASK register. Furthermore the 949 * ICT interrupt handling mechanism has another bug that might cause 950 * these unmasked interrupts fail to be detected. We workaround the 951 * hardware bugs here by ACKing all the possible interrupts so that 952 * interrupt coalescing can still be achieved. 953 */ 954 iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask); 955 956 if (iwl_have_debug_level(IWL_DL_ISR)) 957 IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n", 958 inta, iwl_read32(trans, CSR_INT_MASK)); 959 960 spin_unlock(&trans_pcie->irq_lock); 961 962 /* Now service all interrupt bits discovered above. */ 963 if (inta & CSR_INT_BIT_HW_ERR) { 964 IWL_ERR(trans, "Hardware error detected. Restarting.\n"); 965 966 /* Tell the device to stop sending interrupts */ 967 iwl_disable_interrupts(trans); 968 969 isr_stats->hw++; 970 iwl_pcie_irq_handle_error(trans); 971 972 handled |= CSR_INT_BIT_HW_ERR; 973 974 goto out; 975 } 976 977 if (iwl_have_debug_level(IWL_DL_ISR)) { 978 /* NIC fires this, but we don't use it, redundant with WAKEUP */ 979 if (inta & CSR_INT_BIT_SCD) { 980 IWL_DEBUG_ISR(trans, 981 "Scheduler finished to transmit the frame/frames.\n"); 982 isr_stats->sch++; 983 } 984 985 /* Alive notification via Rx interrupt will do the real work */ 986 if (inta & CSR_INT_BIT_ALIVE) { 987 IWL_DEBUG_ISR(trans, "Alive interrupt\n"); 988 isr_stats->alive++; 989 } 990 } 991 992 /* Safely ignore these bits for debug checks below */ 993 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE); 994 995 /* HW RF KILL switch toggled */ 996 if (inta & CSR_INT_BIT_RF_KILL) { 997 bool hw_rfkill; 998 999 hw_rfkill = iwl_is_rfkill_set(trans); 1000 IWL_WARN(trans, "RF_KILL bit toggled to %s.\n", 1001 hw_rfkill ? "disable radio" : "enable radio"); 1002 1003 isr_stats->rfkill++; 1004 1005 iwl_trans_pcie_rf_kill(trans, hw_rfkill); 1006 if (hw_rfkill) { 1007 set_bit(STATUS_RFKILL, &trans->status); 1008 if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE, 1009 &trans->status)) 1010 IWL_DEBUG_RF_KILL(trans, 1011 "Rfkill while SYNC HCMD in flight\n"); 1012 wake_up(&trans_pcie->wait_command_queue); 1013 } else { 1014 clear_bit(STATUS_RFKILL, &trans->status); 1015 } 1016 1017 handled |= CSR_INT_BIT_RF_KILL; 1018 } 1019 1020 /* Chip got too hot and stopped itself */ 1021 if (inta & CSR_INT_BIT_CT_KILL) { 1022 IWL_ERR(trans, "Microcode CT kill error detected.\n"); 1023 isr_stats->ctkill++; 1024 handled |= CSR_INT_BIT_CT_KILL; 1025 } 1026 1027 /* Error detected by uCode */ 1028 if (inta & CSR_INT_BIT_SW_ERR) { 1029 IWL_ERR(trans, "Microcode SW error detected. " 1030 " Restarting 0x%X.\n", inta); 1031 isr_stats->sw++; 1032 iwl_pcie_irq_handle_error(trans); 1033 handled |= CSR_INT_BIT_SW_ERR; 1034 } 1035 1036 /* uCode wakes up after power-down sleep */ 1037 if (inta & CSR_INT_BIT_WAKEUP) { 1038 IWL_DEBUG_ISR(trans, "Wakeup interrupt\n"); 1039 iwl_pcie_rxq_check_wrptr(trans); 1040 iwl_pcie_txq_check_wrptrs(trans); 1041 1042 isr_stats->wakeup++; 1043 1044 handled |= CSR_INT_BIT_WAKEUP; 1045 } 1046 1047 /* All uCode command responses, including Tx command responses, 1048 * Rx "responses" (frame-received notification), and other 1049 * notifications from uCode come through here*/ 1050 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX | 1051 CSR_INT_BIT_RX_PERIODIC)) { 1052 IWL_DEBUG_ISR(trans, "Rx interrupt\n"); 1053 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { 1054 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); 1055 iwl_write32(trans, CSR_FH_INT_STATUS, 1056 CSR_FH_INT_RX_MASK); 1057 } 1058 if (inta & CSR_INT_BIT_RX_PERIODIC) { 1059 handled |= CSR_INT_BIT_RX_PERIODIC; 1060 iwl_write32(trans, 1061 CSR_INT, CSR_INT_BIT_RX_PERIODIC); 1062 } 1063 /* Sending RX interrupt require many steps to be done in the 1064 * the device: 1065 * 1- write interrupt to current index in ICT table. 1066 * 2- dma RX frame. 1067 * 3- update RX shared data to indicate last write index. 1068 * 4- send interrupt. 1069 * This could lead to RX race, driver could receive RX interrupt 1070 * but the shared data changes does not reflect this; 1071 * periodic interrupt will detect any dangling Rx activity. 1072 */ 1073 1074 /* Disable periodic interrupt; we use it as just a one-shot. */ 1075 iwl_write8(trans, CSR_INT_PERIODIC_REG, 1076 CSR_INT_PERIODIC_DIS); 1077 1078 /* 1079 * Enable periodic interrupt in 8 msec only if we received 1080 * real RX interrupt (instead of just periodic int), to catch 1081 * any dangling Rx interrupt. If it was just the periodic 1082 * interrupt, there was no dangling Rx activity, and no need 1083 * to extend the periodic interrupt; one-shot is enough. 1084 */ 1085 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) 1086 iwl_write8(trans, CSR_INT_PERIODIC_REG, 1087 CSR_INT_PERIODIC_ENA); 1088 1089 isr_stats->rx++; 1090 1091 local_bh_disable(); 1092 iwl_pcie_rx_handle(trans); 1093 local_bh_enable(); 1094 } 1095 1096 /* This "Tx" DMA channel is used only for loading uCode */ 1097 if (inta & CSR_INT_BIT_FH_TX) { 1098 iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK); 1099 IWL_DEBUG_ISR(trans, "uCode load interrupt\n"); 1100 isr_stats->tx++; 1101 handled |= CSR_INT_BIT_FH_TX; 1102 /* Wake up uCode load routine, now that load is complete */ 1103 trans_pcie->ucode_write_complete = true; 1104 wake_up(&trans_pcie->ucode_write_waitq); 1105 } 1106 1107 if (inta & ~handled) { 1108 IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled); 1109 isr_stats->unhandled++; 1110 } 1111 1112 if (inta & ~(trans_pcie->inta_mask)) { 1113 IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n", 1114 inta & ~trans_pcie->inta_mask); 1115 } 1116 1117 /* Re-enable all interrupts */ 1118 /* only Re-enable if disabled by irq */ 1119 if (test_bit(STATUS_INT_ENABLED, &trans->status)) 1120 iwl_enable_interrupts(trans); 1121 /* Re-enable RF_KILL if it occurred */ 1122 else if (handled & CSR_INT_BIT_RF_KILL) 1123 iwl_enable_rfkill_int(trans); 1124 1125out: 1126 lock_map_release(&trans->sync_cmd_lockdep_map); 1127 return IRQ_HANDLED; 1128} 1129 1130/****************************************************************************** 1131 * 1132 * ICT functions 1133 * 1134 ******************************************************************************/ 1135 1136/* Free dram table */ 1137void iwl_pcie_free_ict(struct iwl_trans *trans) 1138{ 1139 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1140 1141 if (trans_pcie->ict_tbl) { 1142 dma_free_coherent(trans->dev, ICT_SIZE, 1143 trans_pcie->ict_tbl, 1144 trans_pcie->ict_tbl_dma); 1145 trans_pcie->ict_tbl = NULL; 1146 trans_pcie->ict_tbl_dma = 0; 1147 } 1148} 1149 1150/* 1151 * allocate dram shared table, it is an aligned memory 1152 * block of ICT_SIZE. 1153 * also reset all data related to ICT table interrupt. 1154 */ 1155int iwl_pcie_alloc_ict(struct iwl_trans *trans) 1156{ 1157 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1158 1159 trans_pcie->ict_tbl = 1160 dma_zalloc_coherent(trans->dev, ICT_SIZE, 1161 &trans_pcie->ict_tbl_dma, 1162 GFP_KERNEL); 1163 if (!trans_pcie->ict_tbl) 1164 return -ENOMEM; 1165 1166 /* just an API sanity check ... it is guaranteed to be aligned */ 1167 if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) { 1168 iwl_pcie_free_ict(trans); 1169 return -EINVAL; 1170 } 1171 1172 IWL_DEBUG_ISR(trans, "ict dma addr %Lx ict vir addr %p\n", 1173 (unsigned long long)trans_pcie->ict_tbl_dma, 1174 trans_pcie->ict_tbl); 1175 1176 return 0; 1177} 1178 1179/* Device is going up inform it about using ICT interrupt table, 1180 * also we need to tell the driver to start using ICT interrupt. 1181 */ 1182void iwl_pcie_reset_ict(struct iwl_trans *trans) 1183{ 1184 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1185 u32 val; 1186 1187 if (!trans_pcie->ict_tbl) 1188 return; 1189 1190 spin_lock(&trans_pcie->irq_lock); 1191 iwl_disable_interrupts(trans); 1192 1193 memset(trans_pcie->ict_tbl, 0, ICT_SIZE); 1194 1195 val = trans_pcie->ict_tbl_dma >> ICT_SHIFT; 1196 1197 val |= CSR_DRAM_INT_TBL_ENABLE; 1198 val |= CSR_DRAM_INIT_TBL_WRAP_CHECK; 1199 1200 IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val); 1201 1202 iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val); 1203 trans_pcie->use_ict = true; 1204 trans_pcie->ict_index = 0; 1205 iwl_write32(trans, CSR_INT, trans_pcie->inta_mask); 1206 iwl_enable_interrupts(trans); 1207 spin_unlock(&trans_pcie->irq_lock); 1208} 1209 1210/* Device is going down disable ict interrupt usage */ 1211void iwl_pcie_disable_ict(struct iwl_trans *trans) 1212{ 1213 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1214 1215 spin_lock(&trans_pcie->irq_lock); 1216 trans_pcie->use_ict = false; 1217 spin_unlock(&trans_pcie->irq_lock); 1218} 1219 1220irqreturn_t iwl_pcie_isr(int irq, void *data) 1221{ 1222 struct iwl_trans *trans = data; 1223 1224 if (!trans) 1225 return IRQ_NONE; 1226 1227 /* Disable (but don't clear!) interrupts here to avoid 1228 * back-to-back ISRs and sporadic interrupts from our NIC. 1229 * If we have something to service, the tasklet will re-enable ints. 1230 * If we *don't* have something, we'll re-enable before leaving here. 1231 */ 1232 iwl_write32(trans, CSR_INT_MASK, 0x00000000); 1233 1234 return IRQ_WAKE_THREAD; 1235} 1236