root/drivers/net/wireless/realtek/rtw88/pci.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. rtw_pci_get_tx_qsel
  2. rtw_pci_read8
  3. rtw_pci_read16
  4. rtw_pci_read32
  5. rtw_pci_write8
  6. rtw_pci_write16
  7. rtw_pci_write32
  8. rtw_pci_get_tx_desc
  9. rtw_pci_free_tx_ring_skbs
  10. rtw_pci_free_tx_ring
  11. rtw_pci_free_rx_ring_skbs
  12. rtw_pci_free_rx_ring
  13. rtw_pci_free_trx_ring
  14. rtw_pci_init_tx_ring
  15. rtw_pci_reset_rx_desc
  16. rtw_pci_sync_rx_desc_device
  17. rtw_pci_init_rx_ring
  18. rtw_pci_init_trx_ring
  19. rtw_pci_deinit
  20. rtw_pci_init
  21. rtw_pci_reset_buf_desc
  22. rtw_pci_reset_trx_ring
  23. rtw_pci_enable_interrupt
  24. rtw_pci_disable_interrupt
  25. rtw_pci_setup
  26. rtw_pci_dma_reset
  27. rtw_pci_dma_release
  28. rtw_pci_start
  29. rtw_pci_stop
  30. rtw_hw_queue_mapping
  31. rtw_pci_release_rsvd_page
  32. rtw_pci_dma_check
  33. rtw_pci_xmit
  34. rtw_pci_write_data_rsvd_page
  35. rtw_pci_write_data_h2c
  36. rtw_pci_tx
  37. rtw_pci_tx_isr
  38. rtw_pci_rx_isr
  39. rtw_pci_irq_recognized
  40. rtw_pci_interrupt_handler
  41. rtw_pci_interrupt_threadfn
  42. rtw_pci_io_mapping
  43. rtw_pci_io_unmapping
  44. rtw_dbi_write8
  45. rtw_mdio_write
  46. rtw_pci_phy_cfg
  47. rtw_pci_claim
  48. rtw_pci_declaim
  49. rtw_pci_setup_resource
  50. rtw_pci_destroy
  51. rtw_pci_request_irq
  52. rtw_pci_free_irq
  53. rtw_pci_probe
  54. rtw_pci_remove

   1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
   2 /* Copyright(c) 2018-2019  Realtek Corporation
   3  */
   4 
   5 #include <linux/module.h>
   6 #include <linux/pci.h>
   7 #include "main.h"
   8 #include "pci.h"
   9 #include "tx.h"
  10 #include "rx.h"
  11 #include "fw.h"
  12 #include "debug.h"
  13 
  14 static bool rtw_disable_msi;
  15 module_param_named(disable_msi, rtw_disable_msi, bool, 0644);
  16 MODULE_PARM_DESC(disable_msi, "Set Y to disable MSI interrupt support");
  17 
  18 static u32 rtw_pci_tx_queue_idx_addr[] = {
  19         [RTW_TX_QUEUE_BK]       = RTK_PCI_TXBD_IDX_BKQ,
  20         [RTW_TX_QUEUE_BE]       = RTK_PCI_TXBD_IDX_BEQ,
  21         [RTW_TX_QUEUE_VI]       = RTK_PCI_TXBD_IDX_VIQ,
  22         [RTW_TX_QUEUE_VO]       = RTK_PCI_TXBD_IDX_VOQ,
  23         [RTW_TX_QUEUE_MGMT]     = RTK_PCI_TXBD_IDX_MGMTQ,
  24         [RTW_TX_QUEUE_HI0]      = RTK_PCI_TXBD_IDX_HI0Q,
  25         [RTW_TX_QUEUE_H2C]      = RTK_PCI_TXBD_IDX_H2CQ,
  26 };
  27 
  28 static u8 rtw_pci_get_tx_qsel(struct sk_buff *skb, u8 queue)
  29 {
  30         switch (queue) {
  31         case RTW_TX_QUEUE_BCN:
  32                 return TX_DESC_QSEL_BEACON;
  33         case RTW_TX_QUEUE_H2C:
  34                 return TX_DESC_QSEL_H2C;
  35         case RTW_TX_QUEUE_MGMT:
  36                 return TX_DESC_QSEL_MGMT;
  37         case RTW_TX_QUEUE_HI0:
  38                 return TX_DESC_QSEL_HIGH;
  39         default:
  40                 return skb->priority;
  41         }
  42 };
  43 
  44 static u8 rtw_pci_read8(struct rtw_dev *rtwdev, u32 addr)
  45 {
  46         struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
  47 
  48         return readb(rtwpci->mmap + addr);
  49 }
  50 
  51 static u16 rtw_pci_read16(struct rtw_dev *rtwdev, u32 addr)
  52 {
  53         struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
  54 
  55         return readw(rtwpci->mmap + addr);
  56 }
  57 
  58 static u32 rtw_pci_read32(struct rtw_dev *rtwdev, u32 addr)
  59 {
  60         struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
  61 
  62         return readl(rtwpci->mmap + addr);
  63 }
  64 
  65 static void rtw_pci_write8(struct rtw_dev *rtwdev, u32 addr, u8 val)
  66 {
  67         struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
  68 
  69         writeb(val, rtwpci->mmap + addr);
  70 }
  71 
  72 static void rtw_pci_write16(struct rtw_dev *rtwdev, u32 addr, u16 val)
  73 {
  74         struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
  75 
  76         writew(val, rtwpci->mmap + addr);
  77 }
  78 
  79 static void rtw_pci_write32(struct rtw_dev *rtwdev, u32 addr, u32 val)
  80 {
  81         struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
  82 
  83         writel(val, rtwpci->mmap + addr);
  84 }
  85 
  86 static inline void *rtw_pci_get_tx_desc(struct rtw_pci_tx_ring *tx_ring, u8 idx)
  87 {
  88         int offset = tx_ring->r.desc_size * idx;
  89 
  90         return tx_ring->r.head + offset;
  91 }
  92 
  93 static void rtw_pci_free_tx_ring_skbs(struct rtw_dev *rtwdev,
  94                                       struct rtw_pci_tx_ring *tx_ring)
  95 {
  96         struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
  97         struct rtw_pci_tx_data *tx_data;
  98         struct sk_buff *skb, *tmp;
  99         dma_addr_t dma;
 100 
 101         /* free every skb remained in tx list */
 102         skb_queue_walk_safe(&tx_ring->queue, skb, tmp) {
 103                 __skb_unlink(skb, &tx_ring->queue);
 104                 tx_data = rtw_pci_get_tx_data(skb);
 105                 dma = tx_data->dma;
 106 
 107                 pci_unmap_single(pdev, dma, skb->len, PCI_DMA_TODEVICE);
 108                 dev_kfree_skb_any(skb);
 109         }
 110 }
 111 
 112 static void rtw_pci_free_tx_ring(struct rtw_dev *rtwdev,
 113                                  struct rtw_pci_tx_ring *tx_ring)
 114 {
 115         struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
 116         u8 *head = tx_ring->r.head;
 117         u32 len = tx_ring->r.len;
 118         int ring_sz = len * tx_ring->r.desc_size;
 119 
 120         rtw_pci_free_tx_ring_skbs(rtwdev, tx_ring);
 121 
 122         /* free the ring itself */
 123         pci_free_consistent(pdev, ring_sz, head, tx_ring->r.dma);
 124         tx_ring->r.head = NULL;
 125 }
 126 
 127 static void rtw_pci_free_rx_ring_skbs(struct rtw_dev *rtwdev,
 128                                       struct rtw_pci_rx_ring *rx_ring)
 129 {
 130         struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
 131         struct sk_buff *skb;
 132         int buf_sz = RTK_PCI_RX_BUF_SIZE;
 133         dma_addr_t dma;
 134         int i;
 135 
 136         for (i = 0; i < rx_ring->r.len; i++) {
 137                 skb = rx_ring->buf[i];
 138                 if (!skb)
 139                         continue;
 140 
 141                 dma = *((dma_addr_t *)skb->cb);
 142                 pci_unmap_single(pdev, dma, buf_sz, PCI_DMA_FROMDEVICE);
 143                 dev_kfree_skb(skb);
 144                 rx_ring->buf[i] = NULL;
 145         }
 146 }
 147 
 148 static void rtw_pci_free_rx_ring(struct rtw_dev *rtwdev,
 149                                  struct rtw_pci_rx_ring *rx_ring)
 150 {
 151         struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
 152         u8 *head = rx_ring->r.head;
 153         int ring_sz = rx_ring->r.desc_size * rx_ring->r.len;
 154 
 155         rtw_pci_free_rx_ring_skbs(rtwdev, rx_ring);
 156 
 157         pci_free_consistent(pdev, ring_sz, head, rx_ring->r.dma);
 158 }
 159 
 160 static void rtw_pci_free_trx_ring(struct rtw_dev *rtwdev)
 161 {
 162         struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 163         struct rtw_pci_tx_ring *tx_ring;
 164         struct rtw_pci_rx_ring *rx_ring;
 165         int i;
 166 
 167         for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++) {
 168                 tx_ring = &rtwpci->tx_rings[i];
 169                 rtw_pci_free_tx_ring(rtwdev, tx_ring);
 170         }
 171 
 172         for (i = 0; i < RTK_MAX_RX_QUEUE_NUM; i++) {
 173                 rx_ring = &rtwpci->rx_rings[i];
 174                 rtw_pci_free_rx_ring(rtwdev, rx_ring);
 175         }
 176 }
 177 
 178 static int rtw_pci_init_tx_ring(struct rtw_dev *rtwdev,
 179                                 struct rtw_pci_tx_ring *tx_ring,
 180                                 u8 desc_size, u32 len)
 181 {
 182         struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
 183         int ring_sz = desc_size * len;
 184         dma_addr_t dma;
 185         u8 *head;
 186 
 187         head = pci_zalloc_consistent(pdev, ring_sz, &dma);
 188         if (!head) {
 189                 rtw_err(rtwdev, "failed to allocate tx ring\n");
 190                 return -ENOMEM;
 191         }
 192 
 193         skb_queue_head_init(&tx_ring->queue);
 194         tx_ring->r.head = head;
 195         tx_ring->r.dma = dma;
 196         tx_ring->r.len = len;
 197         tx_ring->r.desc_size = desc_size;
 198         tx_ring->r.wp = 0;
 199         tx_ring->r.rp = 0;
 200 
 201         return 0;
 202 }
 203 
 204 static int rtw_pci_reset_rx_desc(struct rtw_dev *rtwdev, struct sk_buff *skb,
 205                                  struct rtw_pci_rx_ring *rx_ring,
 206                                  u32 idx, u32 desc_sz)
 207 {
 208         struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
 209         struct rtw_pci_rx_buffer_desc *buf_desc;
 210         int buf_sz = RTK_PCI_RX_BUF_SIZE;
 211         dma_addr_t dma;
 212 
 213         if (!skb)
 214                 return -EINVAL;
 215 
 216         dma = pci_map_single(pdev, skb->data, buf_sz, PCI_DMA_FROMDEVICE);
 217         if (pci_dma_mapping_error(pdev, dma))
 218                 return -EBUSY;
 219 
 220         *((dma_addr_t *)skb->cb) = dma;
 221         buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
 222                                                      idx * desc_sz);
 223         memset(buf_desc, 0, sizeof(*buf_desc));
 224         buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE);
 225         buf_desc->dma = cpu_to_le32(dma);
 226 
 227         return 0;
 228 }
 229 
 230 static void rtw_pci_sync_rx_desc_device(struct rtw_dev *rtwdev, dma_addr_t dma,
 231                                         struct rtw_pci_rx_ring *rx_ring,
 232                                         u32 idx, u32 desc_sz)
 233 {
 234         struct device *dev = rtwdev->dev;
 235         struct rtw_pci_rx_buffer_desc *buf_desc;
 236         int buf_sz = RTK_PCI_RX_BUF_SIZE;
 237 
 238         dma_sync_single_for_device(dev, dma, buf_sz, DMA_FROM_DEVICE);
 239 
 240         buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
 241                                                      idx * desc_sz);
 242         memset(buf_desc, 0, sizeof(*buf_desc));
 243         buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE);
 244         buf_desc->dma = cpu_to_le32(dma);
 245 }
 246 
 247 static int rtw_pci_init_rx_ring(struct rtw_dev *rtwdev,
 248                                 struct rtw_pci_rx_ring *rx_ring,
 249                                 u8 desc_size, u32 len)
 250 {
 251         struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
 252         struct sk_buff *skb = NULL;
 253         dma_addr_t dma;
 254         u8 *head;
 255         int ring_sz = desc_size * len;
 256         int buf_sz = RTK_PCI_RX_BUF_SIZE;
 257         int i, allocated;
 258         int ret = 0;
 259 
 260         head = pci_zalloc_consistent(pdev, ring_sz, &dma);
 261         if (!head) {
 262                 rtw_err(rtwdev, "failed to allocate rx ring\n");
 263                 return -ENOMEM;
 264         }
 265         rx_ring->r.head = head;
 266 
 267         for (i = 0; i < len; i++) {
 268                 skb = dev_alloc_skb(buf_sz);
 269                 if (!skb) {
 270                         allocated = i;
 271                         ret = -ENOMEM;
 272                         goto err_out;
 273                 }
 274 
 275                 memset(skb->data, 0, buf_sz);
 276                 rx_ring->buf[i] = skb;
 277                 ret = rtw_pci_reset_rx_desc(rtwdev, skb, rx_ring, i, desc_size);
 278                 if (ret) {
 279                         allocated = i;
 280                         dev_kfree_skb_any(skb);
 281                         goto err_out;
 282                 }
 283         }
 284 
 285         rx_ring->r.dma = dma;
 286         rx_ring->r.len = len;
 287         rx_ring->r.desc_size = desc_size;
 288         rx_ring->r.wp = 0;
 289         rx_ring->r.rp = 0;
 290 
 291         return 0;
 292 
 293 err_out:
 294         for (i = 0; i < allocated; i++) {
 295                 skb = rx_ring->buf[i];
 296                 if (!skb)
 297                         continue;
 298                 dma = *((dma_addr_t *)skb->cb);
 299                 pci_unmap_single(pdev, dma, buf_sz, PCI_DMA_FROMDEVICE);
 300                 dev_kfree_skb_any(skb);
 301                 rx_ring->buf[i] = NULL;
 302         }
 303         pci_free_consistent(pdev, ring_sz, head, dma);
 304 
 305         rtw_err(rtwdev, "failed to init rx buffer\n");
 306 
 307         return ret;
 308 }
 309 
 310 static int rtw_pci_init_trx_ring(struct rtw_dev *rtwdev)
 311 {
 312         struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 313         struct rtw_pci_tx_ring *tx_ring;
 314         struct rtw_pci_rx_ring *rx_ring;
 315         struct rtw_chip_info *chip = rtwdev->chip;
 316         int i = 0, j = 0, tx_alloced = 0, rx_alloced = 0;
 317         int tx_desc_size, rx_desc_size;
 318         u32 len;
 319         int ret;
 320 
 321         tx_desc_size = chip->tx_buf_desc_sz;
 322 
 323         for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++) {
 324                 tx_ring = &rtwpci->tx_rings[i];
 325                 len = max_num_of_tx_queue(i);
 326                 ret = rtw_pci_init_tx_ring(rtwdev, tx_ring, tx_desc_size, len);
 327                 if (ret)
 328                         goto out;
 329         }
 330 
 331         rx_desc_size = chip->rx_buf_desc_sz;
 332 
 333         for (j = 0; j < RTK_MAX_RX_QUEUE_NUM; j++) {
 334                 rx_ring = &rtwpci->rx_rings[j];
 335                 ret = rtw_pci_init_rx_ring(rtwdev, rx_ring, rx_desc_size,
 336                                            RTK_MAX_RX_DESC_NUM);
 337                 if (ret)
 338                         goto out;
 339         }
 340 
 341         return 0;
 342 
 343 out:
 344         tx_alloced = i;
 345         for (i = 0; i < tx_alloced; i++) {
 346                 tx_ring = &rtwpci->tx_rings[i];
 347                 rtw_pci_free_tx_ring(rtwdev, tx_ring);
 348         }
 349 
 350         rx_alloced = j;
 351         for (j = 0; j < rx_alloced; j++) {
 352                 rx_ring = &rtwpci->rx_rings[j];
 353                 rtw_pci_free_rx_ring(rtwdev, rx_ring);
 354         }
 355 
 356         return ret;
 357 }
 358 
 359 static void rtw_pci_deinit(struct rtw_dev *rtwdev)
 360 {
 361         rtw_pci_free_trx_ring(rtwdev);
 362 }
 363 
 364 static int rtw_pci_init(struct rtw_dev *rtwdev)
 365 {
 366         struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 367         int ret = 0;
 368 
 369         rtwpci->irq_mask[0] = IMR_HIGHDOK |
 370                               IMR_MGNTDOK |
 371                               IMR_BKDOK |
 372                               IMR_BEDOK |
 373                               IMR_VIDOK |
 374                               IMR_VODOK |
 375                               IMR_ROK |
 376                               IMR_BCNDMAINT_E |
 377                               0;
 378         rtwpci->irq_mask[1] = IMR_TXFOVW |
 379                               0;
 380         rtwpci->irq_mask[3] = IMR_H2CDOK |
 381                               0;
 382         spin_lock_init(&rtwpci->irq_lock);
 383         ret = rtw_pci_init_trx_ring(rtwdev);
 384 
 385         return ret;
 386 }
 387 
 388 static void rtw_pci_reset_buf_desc(struct rtw_dev *rtwdev)
 389 {
 390         struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 391         u32 len;
 392         u8 tmp;
 393         dma_addr_t dma;
 394 
 395         tmp = rtw_read8(rtwdev, RTK_PCI_CTRL + 3);
 396         rtw_write8(rtwdev, RTK_PCI_CTRL + 3, tmp | 0xf7);
 397 
 398         dma = rtwpci->tx_rings[RTW_TX_QUEUE_BCN].r.dma;
 399         rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BCNQ, dma);
 400 
 401         len = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.len;
 402         dma = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.dma;
 403         rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.rp = 0;
 404         rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.wp = 0;
 405         rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_H2CQ, len);
 406         rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_H2CQ, dma);
 407 
 408         len = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.len;
 409         dma = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.dma;
 410         rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.rp = 0;
 411         rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.wp = 0;
 412         rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BKQ, len);
 413         rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BKQ, dma);
 414 
 415         len = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.len;
 416         dma = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.dma;
 417         rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.rp = 0;
 418         rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.wp = 0;
 419         rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BEQ, len);
 420         rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BEQ, dma);
 421 
 422         len = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.len;
 423         dma = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.dma;
 424         rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.rp = 0;
 425         rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.wp = 0;
 426         rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VOQ, len);
 427         rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VOQ, dma);
 428 
 429         len = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.len;
 430         dma = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.dma;
 431         rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.rp = 0;
 432         rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.wp = 0;
 433         rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VIQ, len);
 434         rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VIQ, dma);
 435 
 436         len = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.len;
 437         dma = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.dma;
 438         rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.rp = 0;
 439         rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.wp = 0;
 440         rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_MGMTQ, len);
 441         rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_MGMTQ, dma);
 442 
 443         len = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.len;
 444         dma = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.dma;
 445         rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.rp = 0;
 446         rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.wp = 0;
 447         rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_HI0Q, len);
 448         rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_HI0Q, dma);
 449 
 450         len = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.len;
 451         dma = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.dma;
 452         rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.rp = 0;
 453         rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.wp = 0;
 454         rtw_write16(rtwdev, RTK_PCI_RXBD_NUM_MPDUQ, len & 0xfff);
 455         rtw_write32(rtwdev, RTK_PCI_RXBD_DESA_MPDUQ, dma);
 456 
 457         /* reset read/write point */
 458         rtw_write32(rtwdev, RTK_PCI_TXBD_RWPTR_CLR, 0xffffffff);
 459 
 460         /* rest H2C Queue index */
 461         rtw_write32_set(rtwdev, RTK_PCI_TXBD_H2CQ_CSR, BIT_CLR_H2CQ_HOST_IDX);
 462         rtw_write32_set(rtwdev, RTK_PCI_TXBD_H2CQ_CSR, BIT_CLR_H2CQ_HW_IDX);
 463 }
 464 
 465 static void rtw_pci_reset_trx_ring(struct rtw_dev *rtwdev)
 466 {
 467         rtw_pci_reset_buf_desc(rtwdev);
 468 }
 469 
 470 static void rtw_pci_enable_interrupt(struct rtw_dev *rtwdev,
 471                                      struct rtw_pci *rtwpci)
 472 {
 473         rtw_write32(rtwdev, RTK_PCI_HIMR0, rtwpci->irq_mask[0]);
 474         rtw_write32(rtwdev, RTK_PCI_HIMR1, rtwpci->irq_mask[1]);
 475         rtw_write32(rtwdev, RTK_PCI_HIMR3, rtwpci->irq_mask[3]);
 476         rtwpci->irq_enabled = true;
 477 }
 478 
 479 static void rtw_pci_disable_interrupt(struct rtw_dev *rtwdev,
 480                                       struct rtw_pci *rtwpci)
 481 {
 482         rtw_write32(rtwdev, RTK_PCI_HIMR0, 0);
 483         rtw_write32(rtwdev, RTK_PCI_HIMR1, 0);
 484         rtw_write32(rtwdev, RTK_PCI_HIMR3, 0);
 485         rtwpci->irq_enabled = false;
 486 }
 487 
 488 static int rtw_pci_setup(struct rtw_dev *rtwdev)
 489 {
 490         rtw_pci_reset_trx_ring(rtwdev);
 491 
 492         return 0;
 493 }
 494 
 495 static void rtw_pci_dma_reset(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci)
 496 {
 497         /* reset dma and rx tag */
 498         rtw_write32_set(rtwdev, RTK_PCI_CTRL,
 499                         BIT_RST_TRXDMA_INTF | BIT_RX_TAG_EN);
 500         rtwpci->rx_tag = 0;
 501 }
 502 
 503 static void rtw_pci_dma_release(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci)
 504 {
 505         struct rtw_pci_tx_ring *tx_ring;
 506         u8 queue;
 507 
 508         for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) {
 509                 tx_ring = &rtwpci->tx_rings[queue];
 510                 rtw_pci_free_tx_ring_skbs(rtwdev, tx_ring);
 511         }
 512 }
 513 
 514 static int rtw_pci_start(struct rtw_dev *rtwdev)
 515 {
 516         struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 517         unsigned long flags;
 518 
 519         rtw_pci_dma_reset(rtwdev, rtwpci);
 520 
 521         spin_lock_irqsave(&rtwpci->irq_lock, flags);
 522         rtw_pci_enable_interrupt(rtwdev, rtwpci);
 523         spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
 524 
 525         return 0;
 526 }
 527 
 528 static void rtw_pci_stop(struct rtw_dev *rtwdev)
 529 {
 530         struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 531         unsigned long flags;
 532 
 533         spin_lock_irqsave(&rtwpci->irq_lock, flags);
 534         rtw_pci_disable_interrupt(rtwdev, rtwpci);
 535         rtw_pci_dma_release(rtwdev, rtwpci);
 536         spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
 537 }
 538 
 539 static u8 ac_to_hwq[] = {
 540         [IEEE80211_AC_VO] = RTW_TX_QUEUE_VO,
 541         [IEEE80211_AC_VI] = RTW_TX_QUEUE_VI,
 542         [IEEE80211_AC_BE] = RTW_TX_QUEUE_BE,
 543         [IEEE80211_AC_BK] = RTW_TX_QUEUE_BK,
 544 };
 545 
 546 static u8 rtw_hw_queue_mapping(struct sk_buff *skb)
 547 {
 548         struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 549         __le16 fc = hdr->frame_control;
 550         u8 q_mapping = skb_get_queue_mapping(skb);
 551         u8 queue;
 552 
 553         if (unlikely(ieee80211_is_beacon(fc)))
 554                 queue = RTW_TX_QUEUE_BCN;
 555         else if (unlikely(ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc)))
 556                 queue = RTW_TX_QUEUE_MGMT;
 557         else if (WARN_ON_ONCE(q_mapping >= ARRAY_SIZE(ac_to_hwq)))
 558                 queue = ac_to_hwq[IEEE80211_AC_BE];
 559         else
 560                 queue = ac_to_hwq[q_mapping];
 561 
 562         return queue;
 563 }
 564 
 565 static void rtw_pci_release_rsvd_page(struct rtw_pci *rtwpci,
 566                                       struct rtw_pci_tx_ring *ring)
 567 {
 568         struct sk_buff *prev = skb_dequeue(&ring->queue);
 569         struct rtw_pci_tx_data *tx_data;
 570         dma_addr_t dma;
 571 
 572         if (!prev)
 573                 return;
 574 
 575         tx_data = rtw_pci_get_tx_data(prev);
 576         dma = tx_data->dma;
 577         pci_unmap_single(rtwpci->pdev, dma, prev->len,
 578                          PCI_DMA_TODEVICE);
 579         dev_kfree_skb_any(prev);
 580 }
 581 
 582 static void rtw_pci_dma_check(struct rtw_dev *rtwdev,
 583                               struct rtw_pci_rx_ring *rx_ring,
 584                               u32 idx)
 585 {
 586         struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 587         struct rtw_chip_info *chip = rtwdev->chip;
 588         struct rtw_pci_rx_buffer_desc *buf_desc;
 589         u32 desc_sz = chip->rx_buf_desc_sz;
 590         u16 total_pkt_size;
 591 
 592         buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
 593                                                      idx * desc_sz);
 594         total_pkt_size = le16_to_cpu(buf_desc->total_pkt_size);
 595 
 596         /* rx tag mismatch, throw a warning */
 597         if (total_pkt_size != rtwpci->rx_tag)
 598                 rtw_warn(rtwdev, "pci bus timeout, check dma status\n");
 599 
 600         rtwpci->rx_tag = (rtwpci->rx_tag + 1) % RX_TAG_MAX;
 601 }
 602 
 603 static int rtw_pci_xmit(struct rtw_dev *rtwdev,
 604                         struct rtw_tx_pkt_info *pkt_info,
 605                         struct sk_buff *skb, u8 queue)
 606 {
 607         struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 608         struct rtw_chip_info *chip = rtwdev->chip;
 609         struct rtw_pci_tx_ring *ring;
 610         struct rtw_pci_tx_data *tx_data;
 611         dma_addr_t dma;
 612         u32 tx_pkt_desc_sz = chip->tx_pkt_desc_sz;
 613         u32 tx_buf_desc_sz = chip->tx_buf_desc_sz;
 614         u32 size;
 615         u32 psb_len;
 616         u8 *pkt_desc;
 617         struct rtw_pci_tx_buffer_desc *buf_desc;
 618         u32 bd_idx;
 619 
 620         ring = &rtwpci->tx_rings[queue];
 621 
 622         size = skb->len;
 623 
 624         if (queue == RTW_TX_QUEUE_BCN)
 625                 rtw_pci_release_rsvd_page(rtwpci, ring);
 626         else if (!avail_desc(ring->r.wp, ring->r.rp, ring->r.len))
 627                 return -ENOSPC;
 628 
 629         pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz);
 630         memset(pkt_desc, 0, tx_pkt_desc_sz);
 631         pkt_info->qsel = rtw_pci_get_tx_qsel(skb, queue);
 632         rtw_tx_fill_tx_desc(pkt_info, skb);
 633         dma = pci_map_single(rtwpci->pdev, skb->data, skb->len,
 634                              PCI_DMA_TODEVICE);
 635         if (pci_dma_mapping_error(rtwpci->pdev, dma))
 636                 return -EBUSY;
 637 
 638         /* after this we got dma mapped, there is no way back */
 639         buf_desc = get_tx_buffer_desc(ring, tx_buf_desc_sz);
 640         memset(buf_desc, 0, tx_buf_desc_sz);
 641         psb_len = (skb->len - 1) / 128 + 1;
 642         if (queue == RTW_TX_QUEUE_BCN)
 643                 psb_len |= 1 << RTK_PCI_TXBD_OWN_OFFSET;
 644 
 645         buf_desc[0].psb_len = cpu_to_le16(psb_len);
 646         buf_desc[0].buf_size = cpu_to_le16(tx_pkt_desc_sz);
 647         buf_desc[0].dma = cpu_to_le32(dma);
 648         buf_desc[1].buf_size = cpu_to_le16(size);
 649         buf_desc[1].dma = cpu_to_le32(dma + tx_pkt_desc_sz);
 650 
 651         tx_data = rtw_pci_get_tx_data(skb);
 652         tx_data->dma = dma;
 653         tx_data->sn = pkt_info->sn;
 654         skb_queue_tail(&ring->queue, skb);
 655 
 656         /* kick off tx queue */
 657         if (queue != RTW_TX_QUEUE_BCN) {
 658                 if (++ring->r.wp >= ring->r.len)
 659                         ring->r.wp = 0;
 660                 bd_idx = rtw_pci_tx_queue_idx_addr[queue];
 661                 rtw_write16(rtwdev, bd_idx, ring->r.wp & 0xfff);
 662         } else {
 663                 u32 reg_bcn_work;
 664 
 665                 reg_bcn_work = rtw_read8(rtwdev, RTK_PCI_TXBD_BCN_WORK);
 666                 reg_bcn_work |= BIT_PCI_BCNQ_FLAG;
 667                 rtw_write8(rtwdev, RTK_PCI_TXBD_BCN_WORK, reg_bcn_work);
 668         }
 669 
 670         return 0;
 671 }
 672 
 673 static int rtw_pci_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf,
 674                                         u32 size)
 675 {
 676         struct sk_buff *skb;
 677         struct rtw_tx_pkt_info pkt_info;
 678         u32 tx_pkt_desc_sz;
 679         u32 length;
 680 
 681         tx_pkt_desc_sz = rtwdev->chip->tx_pkt_desc_sz;
 682         length = size + tx_pkt_desc_sz;
 683         skb = dev_alloc_skb(length);
 684         if (!skb)
 685                 return -ENOMEM;
 686 
 687         skb_reserve(skb, tx_pkt_desc_sz);
 688         memcpy((u8 *)skb_put(skb, size), buf, size);
 689         memset(&pkt_info, 0, sizeof(pkt_info));
 690         pkt_info.tx_pkt_size = size;
 691         pkt_info.offset = tx_pkt_desc_sz;
 692 
 693         return rtw_pci_xmit(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_BCN);
 694 }
 695 
 696 static int rtw_pci_write_data_h2c(struct rtw_dev *rtwdev, u8 *buf, u32 size)
 697 {
 698         struct sk_buff *skb;
 699         struct rtw_tx_pkt_info pkt_info;
 700         u32 tx_pkt_desc_sz;
 701         u32 length;
 702 
 703         tx_pkt_desc_sz = rtwdev->chip->tx_pkt_desc_sz;
 704         length = size + tx_pkt_desc_sz;
 705         skb = dev_alloc_skb(length);
 706         if (!skb)
 707                 return -ENOMEM;
 708 
 709         skb_reserve(skb, tx_pkt_desc_sz);
 710         memcpy((u8 *)skb_put(skb, size), buf, size);
 711         memset(&pkt_info, 0, sizeof(pkt_info));
 712         pkt_info.tx_pkt_size = size;
 713 
 714         return rtw_pci_xmit(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_H2C);
 715 }
 716 
 717 static int rtw_pci_tx(struct rtw_dev *rtwdev,
 718                       struct rtw_tx_pkt_info *pkt_info,
 719                       struct sk_buff *skb)
 720 {
 721         struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 722         struct rtw_pci_tx_ring *ring;
 723         u8 queue = rtw_hw_queue_mapping(skb);
 724         int ret;
 725 
 726         ret = rtw_pci_xmit(rtwdev, pkt_info, skb, queue);
 727         if (ret)
 728                 return ret;
 729 
 730         ring = &rtwpci->tx_rings[queue];
 731         if (avail_desc(ring->r.wp, ring->r.rp, ring->r.len) < 2) {
 732                 ieee80211_stop_queue(rtwdev->hw, skb_get_queue_mapping(skb));
 733                 ring->queue_stopped = true;
 734         }
 735 
 736         return 0;
 737 }
 738 
 739 static void rtw_pci_tx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
 740                            u8 hw_queue)
 741 {
 742         struct ieee80211_hw *hw = rtwdev->hw;
 743         struct ieee80211_tx_info *info;
 744         struct rtw_pci_tx_ring *ring;
 745         struct rtw_pci_tx_data *tx_data;
 746         struct sk_buff *skb;
 747         u32 count;
 748         u32 bd_idx_addr;
 749         u32 bd_idx, cur_rp;
 750         u16 q_map;
 751 
 752         ring = &rtwpci->tx_rings[hw_queue];
 753 
 754         bd_idx_addr = rtw_pci_tx_queue_idx_addr[hw_queue];
 755         bd_idx = rtw_read32(rtwdev, bd_idx_addr);
 756         cur_rp = bd_idx >> 16;
 757         cur_rp &= 0xfff;
 758         if (cur_rp >= ring->r.rp)
 759                 count = cur_rp - ring->r.rp;
 760         else
 761                 count = ring->r.len - (ring->r.rp - cur_rp);
 762 
 763         while (count--) {
 764                 skb = skb_dequeue(&ring->queue);
 765                 if (!skb) {
 766                         rtw_err(rtwdev, "failed to dequeue %d skb TX queue %d, BD=0x%08x, rp %d -> %d\n",
 767                                 count, hw_queue, bd_idx, ring->r.rp, cur_rp);
 768                         break;
 769                 }
 770                 tx_data = rtw_pci_get_tx_data(skb);
 771                 pci_unmap_single(rtwpci->pdev, tx_data->dma, skb->len,
 772                                  PCI_DMA_TODEVICE);
 773 
 774                 /* just free command packets from host to card */
 775                 if (hw_queue == RTW_TX_QUEUE_H2C) {
 776                         dev_kfree_skb_irq(skb);
 777                         continue;
 778                 }
 779 
 780                 if (ring->queue_stopped &&
 781                     avail_desc(ring->r.wp, ring->r.rp, ring->r.len) > 4) {
 782                         q_map = skb_get_queue_mapping(skb);
 783                         ieee80211_wake_queue(hw, q_map);
 784                         ring->queue_stopped = false;
 785                 }
 786 
 787                 skb_pull(skb, rtwdev->chip->tx_pkt_desc_sz);
 788 
 789                 info = IEEE80211_SKB_CB(skb);
 790 
 791                 /* enqueue to wait for tx report */
 792                 if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
 793                         rtw_tx_report_enqueue(rtwdev, skb, tx_data->sn);
 794                         continue;
 795                 }
 796 
 797                 /* always ACK for others, then they won't be marked as drop */
 798                 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
 799                         info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
 800                 else
 801                         info->flags |= IEEE80211_TX_STAT_ACK;
 802 
 803                 ieee80211_tx_info_clear_status(info);
 804                 ieee80211_tx_status_irqsafe(hw, skb);
 805         }
 806 
 807         ring->r.rp = cur_rp;
 808 }
 809 
 810 static void rtw_pci_rx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
 811                            u8 hw_queue)
 812 {
 813         struct rtw_chip_info *chip = rtwdev->chip;
 814         struct rtw_pci_rx_ring *ring;
 815         struct rtw_rx_pkt_stat pkt_stat;
 816         struct ieee80211_rx_status rx_status;
 817         struct sk_buff *skb, *new;
 818         u32 cur_wp, cur_rp, tmp;
 819         u32 count;
 820         u32 pkt_offset;
 821         u32 pkt_desc_sz = chip->rx_pkt_desc_sz;
 822         u32 buf_desc_sz = chip->rx_buf_desc_sz;
 823         u32 new_len;
 824         u8 *rx_desc;
 825         dma_addr_t dma;
 826 
 827         ring = &rtwpci->rx_rings[RTW_RX_QUEUE_MPDU];
 828 
 829         tmp = rtw_read32(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ);
 830         cur_wp = tmp >> 16;
 831         cur_wp &= 0xfff;
 832         if (cur_wp >= ring->r.wp)
 833                 count = cur_wp - ring->r.wp;
 834         else
 835                 count = ring->r.len - (ring->r.wp - cur_wp);
 836 
 837         cur_rp = ring->r.rp;
 838         while (count--) {
 839                 rtw_pci_dma_check(rtwdev, ring, cur_rp);
 840                 skb = ring->buf[cur_rp];
 841                 dma = *((dma_addr_t *)skb->cb);
 842                 dma_sync_single_for_cpu(rtwdev->dev, dma, RTK_PCI_RX_BUF_SIZE,
 843                                         DMA_FROM_DEVICE);
 844                 rx_desc = skb->data;
 845                 chip->ops->query_rx_desc(rtwdev, rx_desc, &pkt_stat, &rx_status);
 846 
 847                 /* offset from rx_desc to payload */
 848                 pkt_offset = pkt_desc_sz + pkt_stat.drv_info_sz +
 849                              pkt_stat.shift;
 850 
 851                 /* allocate a new skb for this frame,
 852                  * discard the frame if none available
 853                  */
 854                 new_len = pkt_stat.pkt_len + pkt_offset;
 855                 new = dev_alloc_skb(new_len);
 856                 if (WARN_ONCE(!new, "rx routine starvation\n"))
 857                         goto next_rp;
 858 
 859                 /* put the DMA data including rx_desc from phy to new skb */
 860                 skb_put_data(new, skb->data, new_len);
 861 
 862                 if (pkt_stat.is_c2h) {
 863                         rtw_fw_c2h_cmd_rx_irqsafe(rtwdev, pkt_offset, new);
 864                 } else {
 865                         /* remove rx_desc */
 866                         skb_pull(new, pkt_offset);
 867 
 868                         rtw_rx_stats(rtwdev, pkt_stat.vif, new);
 869                         memcpy(new->cb, &rx_status, sizeof(rx_status));
 870                         ieee80211_rx_irqsafe(rtwdev->hw, new);
 871                 }
 872 
 873 next_rp:
 874                 /* new skb delivered to mac80211, re-enable original skb DMA */
 875                 rtw_pci_sync_rx_desc_device(rtwdev, dma, ring, cur_rp,
 876                                             buf_desc_sz);
 877 
 878                 /* host read next element in ring */
 879                 if (++cur_rp >= ring->r.len)
 880                         cur_rp = 0;
 881         }
 882 
 883         ring->r.rp = cur_rp;
 884         ring->r.wp = cur_wp;
 885         rtw_write16(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ, ring->r.rp);
 886 }
 887 
 888 static void rtw_pci_irq_recognized(struct rtw_dev *rtwdev,
 889                                    struct rtw_pci *rtwpci, u32 *irq_status)
 890 {
 891         irq_status[0] = rtw_read32(rtwdev, RTK_PCI_HISR0);
 892         irq_status[1] = rtw_read32(rtwdev, RTK_PCI_HISR1);
 893         irq_status[3] = rtw_read32(rtwdev, RTK_PCI_HISR3);
 894         irq_status[0] &= rtwpci->irq_mask[0];
 895         irq_status[1] &= rtwpci->irq_mask[1];
 896         irq_status[3] &= rtwpci->irq_mask[3];
 897         rtw_write32(rtwdev, RTK_PCI_HISR0, irq_status[0]);
 898         rtw_write32(rtwdev, RTK_PCI_HISR1, irq_status[1]);
 899         rtw_write32(rtwdev, RTK_PCI_HISR3, irq_status[3]);
 900 }
 901 
 902 static irqreturn_t rtw_pci_interrupt_handler(int irq, void *dev)
 903 {
 904         struct rtw_dev *rtwdev = dev;
 905         struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 906 
 907         spin_lock(&rtwpci->irq_lock);
 908         if (!rtwpci->irq_enabled)
 909                 goto out;
 910 
 911         /* disable RTW PCI interrupt to avoid more interrupts before the end of
 912          * thread function
 913          *
 914          * disable HIMR here to also avoid new HISR flag being raised before
 915          * the HISRs have been Write-1-cleared for MSI. If not all of the HISRs
 916          * are cleared, the edge-triggered interrupt will not be generated when
 917          * a new HISR flag is set.
 918          */
 919         rtw_pci_disable_interrupt(rtwdev, rtwpci);
 920 out:
 921         spin_unlock(&rtwpci->irq_lock);
 922 
 923         return IRQ_WAKE_THREAD;
 924 }
 925 
 926 static irqreturn_t rtw_pci_interrupt_threadfn(int irq, void *dev)
 927 {
 928         struct rtw_dev *rtwdev = dev;
 929         struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 930         unsigned long flags;
 931         u32 irq_status[4];
 932 
 933         spin_lock_irqsave(&rtwpci->irq_lock, flags);
 934         rtw_pci_irq_recognized(rtwdev, rtwpci, irq_status);
 935 
 936         if (irq_status[0] & IMR_MGNTDOK)
 937                 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_MGMT);
 938         if (irq_status[0] & IMR_HIGHDOK)
 939                 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_HI0);
 940         if (irq_status[0] & IMR_BEDOK)
 941                 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_BE);
 942         if (irq_status[0] & IMR_BKDOK)
 943                 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_BK);
 944         if (irq_status[0] & IMR_VODOK)
 945                 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_VO);
 946         if (irq_status[0] & IMR_VIDOK)
 947                 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_VI);
 948         if (irq_status[3] & IMR_H2CDOK)
 949                 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_H2C);
 950         if (irq_status[0] & IMR_ROK)
 951                 rtw_pci_rx_isr(rtwdev, rtwpci, RTW_RX_QUEUE_MPDU);
 952 
 953         /* all of the jobs for this interrupt have been done */
 954         rtw_pci_enable_interrupt(rtwdev, rtwpci);
 955         spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
 956 
 957         return IRQ_HANDLED;
 958 }
 959 
 960 static int rtw_pci_io_mapping(struct rtw_dev *rtwdev,
 961                               struct pci_dev *pdev)
 962 {
 963         struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 964         unsigned long len;
 965         u8 bar_id = 2;
 966         int ret;
 967 
 968         ret = pci_request_regions(pdev, KBUILD_MODNAME);
 969         if (ret) {
 970                 rtw_err(rtwdev, "failed to request pci regions\n");
 971                 return ret;
 972         }
 973 
 974         len = pci_resource_len(pdev, bar_id);
 975         rtwpci->mmap = pci_iomap(pdev, bar_id, len);
 976         if (!rtwpci->mmap) {
 977                 rtw_err(rtwdev, "failed to map pci memory\n");
 978                 return -ENOMEM;
 979         }
 980 
 981         return 0;
 982 }
 983 
 984 static void rtw_pci_io_unmapping(struct rtw_dev *rtwdev,
 985                                  struct pci_dev *pdev)
 986 {
 987         struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 988 
 989         if (rtwpci->mmap) {
 990                 pci_iounmap(pdev, rtwpci->mmap);
 991                 pci_release_regions(pdev);
 992         }
 993 }
 994 
 995 static void rtw_dbi_write8(struct rtw_dev *rtwdev, u16 addr, u8 data)
 996 {
 997         u16 write_addr;
 998         u16 remainder = addr & 0x3;
 999         u8 flag;
1000         u8 cnt = 20;
1001 
1002         write_addr = ((addr & 0x0ffc) | (BIT(0) << (remainder + 12)));
1003         rtw_write8(rtwdev, REG_DBI_WDATA_V1 + remainder, data);
1004         rtw_write16(rtwdev, REG_DBI_FLAG_V1, write_addr);
1005         rtw_write8(rtwdev, REG_DBI_FLAG_V1 + 2, 0x01);
1006 
1007         flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2);
1008         while (flag && (cnt != 0)) {
1009                 udelay(10);
1010                 flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2);
1011                 cnt--;
1012         }
1013 
1014         WARN(flag, "DBI write fail\n");
1015 }
1016 
1017 static void rtw_mdio_write(struct rtw_dev *rtwdev, u8 addr, u16 data, bool g1)
1018 {
1019         u8 page;
1020         u8 wflag;
1021         u8 cnt;
1022 
1023         rtw_write16(rtwdev, REG_MDIO_V1, data);
1024 
1025         page = addr < 0x20 ? 0 : 1;
1026         page += g1 ? 0 : 2;
1027         rtw_write8(rtwdev, REG_PCIE_MIX_CFG, addr & 0x1f);
1028         rtw_write8(rtwdev, REG_PCIE_MIX_CFG + 3, page);
1029 
1030         rtw_write32_mask(rtwdev, REG_PCIE_MIX_CFG, BIT_MDIO_WFLAG_V1, 1);
1031         wflag = rtw_read32_mask(rtwdev, REG_PCIE_MIX_CFG, BIT_MDIO_WFLAG_V1);
1032 
1033         cnt = 20;
1034         while (wflag && (cnt != 0)) {
1035                 udelay(10);
1036                 wflag = rtw_read32_mask(rtwdev, REG_PCIE_MIX_CFG,
1037                                         BIT_MDIO_WFLAG_V1);
1038                 cnt--;
1039         }
1040 
1041         WARN(wflag, "MDIO write fail\n");
1042 }
1043 
1044 static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev)
1045 {
1046         struct rtw_chip_info *chip = rtwdev->chip;
1047         struct rtw_intf_phy_para *para;
1048         u16 cut;
1049         u16 value;
1050         u16 offset;
1051         int i;
1052 
1053         cut = BIT(0) << rtwdev->hal.cut_version;
1054 
1055         for (i = 0; i < chip->intf_table->n_gen1_para; i++) {
1056                 para = &chip->intf_table->gen1_para[i];
1057                 if (!(para->cut_mask & cut))
1058                         continue;
1059                 if (para->offset == 0xffff)
1060                         break;
1061                 offset = para->offset;
1062                 value = para->value;
1063                 if (para->ip_sel == RTW_IP_SEL_PHY)
1064                         rtw_mdio_write(rtwdev, offset, value, true);
1065                 else
1066                         rtw_dbi_write8(rtwdev, offset, value);
1067         }
1068 
1069         for (i = 0; i < chip->intf_table->n_gen2_para; i++) {
1070                 para = &chip->intf_table->gen2_para[i];
1071                 if (!(para->cut_mask & cut))
1072                         continue;
1073                 if (para->offset == 0xffff)
1074                         break;
1075                 offset = para->offset;
1076                 value = para->value;
1077                 if (para->ip_sel == RTW_IP_SEL_PHY)
1078                         rtw_mdio_write(rtwdev, offset, value, false);
1079                 else
1080                         rtw_dbi_write8(rtwdev, offset, value);
1081         }
1082 }
1083 
1084 static int rtw_pci_claim(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1085 {
1086         int ret;
1087 
1088         ret = pci_enable_device(pdev);
1089         if (ret) {
1090                 rtw_err(rtwdev, "failed to enable pci device\n");
1091                 return ret;
1092         }
1093 
1094         pci_set_master(pdev);
1095         pci_set_drvdata(pdev, rtwdev->hw);
1096         SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev);
1097 
1098         return 0;
1099 }
1100 
1101 static void rtw_pci_declaim(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1102 {
1103         pci_clear_master(pdev);
1104         pci_disable_device(pdev);
1105 }
1106 
1107 static int rtw_pci_setup_resource(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1108 {
1109         struct rtw_pci *rtwpci;
1110         int ret;
1111 
1112         rtwpci = (struct rtw_pci *)rtwdev->priv;
1113         rtwpci->pdev = pdev;
1114 
1115         /* after this driver can access to hw registers */
1116         ret = rtw_pci_io_mapping(rtwdev, pdev);
1117         if (ret) {
1118                 rtw_err(rtwdev, "failed to request pci io region\n");
1119                 goto err_out;
1120         }
1121 
1122         ret = rtw_pci_init(rtwdev);
1123         if (ret) {
1124                 rtw_err(rtwdev, "failed to allocate pci resources\n");
1125                 goto err_io_unmap;
1126         }
1127 
1128         rtw_pci_phy_cfg(rtwdev);
1129 
1130         return 0;
1131 
1132 err_io_unmap:
1133         rtw_pci_io_unmapping(rtwdev, pdev);
1134 
1135 err_out:
1136         return ret;
1137 }
1138 
1139 static void rtw_pci_destroy(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1140 {
1141         rtw_pci_deinit(rtwdev);
1142         rtw_pci_io_unmapping(rtwdev, pdev);
1143 }
1144 
1145 static struct rtw_hci_ops rtw_pci_ops = {
1146         .tx = rtw_pci_tx,
1147         .setup = rtw_pci_setup,
1148         .start = rtw_pci_start,
1149         .stop = rtw_pci_stop,
1150 
1151         .read8 = rtw_pci_read8,
1152         .read16 = rtw_pci_read16,
1153         .read32 = rtw_pci_read32,
1154         .write8 = rtw_pci_write8,
1155         .write16 = rtw_pci_write16,
1156         .write32 = rtw_pci_write32,
1157         .write_data_rsvd_page = rtw_pci_write_data_rsvd_page,
1158         .write_data_h2c = rtw_pci_write_data_h2c,
1159 };
1160 
1161 static int rtw_pci_request_irq(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1162 {
1163         unsigned int flags = PCI_IRQ_LEGACY;
1164         int ret;
1165 
1166         if (!rtw_disable_msi)
1167                 flags |= PCI_IRQ_MSI;
1168 
1169         ret = pci_alloc_irq_vectors(pdev, 1, 1, flags);
1170         if (ret < 0) {
1171                 rtw_err(rtwdev, "failed to alloc PCI irq vectors\n");
1172                 return ret;
1173         }
1174 
1175         ret = devm_request_threaded_irq(rtwdev->dev, pdev->irq,
1176                                         rtw_pci_interrupt_handler,
1177                                         rtw_pci_interrupt_threadfn,
1178                                         IRQF_SHARED, KBUILD_MODNAME, rtwdev);
1179         if (ret) {
1180                 rtw_err(rtwdev, "failed to request irq %d\n", ret);
1181                 pci_free_irq_vectors(pdev);
1182         }
1183 
1184         return ret;
1185 }
1186 
1187 static void rtw_pci_free_irq(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1188 {
1189         devm_free_irq(rtwdev->dev, pdev->irq, rtwdev);
1190         pci_free_irq_vectors(pdev);
1191 }
1192 
1193 static int rtw_pci_probe(struct pci_dev *pdev,
1194                          const struct pci_device_id *id)
1195 {
1196         struct ieee80211_hw *hw;
1197         struct rtw_dev *rtwdev;
1198         int drv_data_size;
1199         int ret;
1200 
1201         drv_data_size = sizeof(struct rtw_dev) + sizeof(struct rtw_pci);
1202         hw = ieee80211_alloc_hw(drv_data_size, &rtw_ops);
1203         if (!hw) {
1204                 dev_err(&pdev->dev, "failed to allocate hw\n");
1205                 return -ENOMEM;
1206         }
1207 
1208         rtwdev = hw->priv;
1209         rtwdev->hw = hw;
1210         rtwdev->dev = &pdev->dev;
1211         rtwdev->chip = (struct rtw_chip_info *)id->driver_data;
1212         rtwdev->hci.ops = &rtw_pci_ops;
1213         rtwdev->hci.type = RTW_HCI_TYPE_PCIE;
1214 
1215         ret = rtw_core_init(rtwdev);
1216         if (ret)
1217                 goto err_release_hw;
1218 
1219         rtw_dbg(rtwdev, RTW_DBG_PCI,
1220                 "rtw88 pci probe: vendor=0x%4.04X device=0x%4.04X rev=%d\n",
1221                 pdev->vendor, pdev->device, pdev->revision);
1222 
1223         ret = rtw_pci_claim(rtwdev, pdev);
1224         if (ret) {
1225                 rtw_err(rtwdev, "failed to claim pci device\n");
1226                 goto err_deinit_core;
1227         }
1228 
1229         ret = rtw_pci_setup_resource(rtwdev, pdev);
1230         if (ret) {
1231                 rtw_err(rtwdev, "failed to setup pci resources\n");
1232                 goto err_pci_declaim;
1233         }
1234 
1235         ret = rtw_chip_info_setup(rtwdev);
1236         if (ret) {
1237                 rtw_err(rtwdev, "failed to setup chip information\n");
1238                 goto err_destroy_pci;
1239         }
1240 
1241         ret = rtw_register_hw(rtwdev, hw);
1242         if (ret) {
1243                 rtw_err(rtwdev, "failed to register hw\n");
1244                 goto err_destroy_pci;
1245         }
1246 
1247         ret = rtw_pci_request_irq(rtwdev, pdev);
1248         if (ret) {
1249                 ieee80211_unregister_hw(hw);
1250                 goto err_destroy_pci;
1251         }
1252 
1253         return 0;
1254 
1255 err_destroy_pci:
1256         rtw_pci_destroy(rtwdev, pdev);
1257 
1258 err_pci_declaim:
1259         rtw_pci_declaim(rtwdev, pdev);
1260 
1261 err_deinit_core:
1262         rtw_core_deinit(rtwdev);
1263 
1264 err_release_hw:
1265         ieee80211_free_hw(hw);
1266 
1267         return ret;
1268 }
1269 
1270 static void rtw_pci_remove(struct pci_dev *pdev)
1271 {
1272         struct ieee80211_hw *hw = pci_get_drvdata(pdev);
1273         struct rtw_dev *rtwdev;
1274         struct rtw_pci *rtwpci;
1275 
1276         if (!hw)
1277                 return;
1278 
1279         rtwdev = hw->priv;
1280         rtwpci = (struct rtw_pci *)rtwdev->priv;
1281 
1282         rtw_unregister_hw(rtwdev, hw);
1283         rtw_pci_disable_interrupt(rtwdev, rtwpci);
1284         rtw_pci_destroy(rtwdev, pdev);
1285         rtw_pci_declaim(rtwdev, pdev);
1286         rtw_pci_free_irq(rtwdev, pdev);
1287         rtw_core_deinit(rtwdev);
1288         ieee80211_free_hw(hw);
1289 }
1290 
1291 static const struct pci_device_id rtw_pci_id_table[] = {
1292 #ifdef CONFIG_RTW88_8822BE
1293         { RTK_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xB822, rtw8822b_hw_spec) },
1294 #endif
1295 #ifdef CONFIG_RTW88_8822CE
1296         { RTK_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xC822, rtw8822c_hw_spec) },
1297 #endif
1298         {},
1299 };
1300 MODULE_DEVICE_TABLE(pci, rtw_pci_id_table);
1301 
1302 static struct pci_driver rtw_pci_driver = {
1303         .name = "rtw_pci",
1304         .id_table = rtw_pci_id_table,
1305         .probe = rtw_pci_probe,
1306         .remove = rtw_pci_remove,
1307 };
1308 module_pci_driver(rtw_pci_driver);
1309 
1310 MODULE_AUTHOR("Realtek Corporation");
1311 MODULE_DESCRIPTION("Realtek 802.11ac wireless PCI driver");
1312 MODULE_LICENSE("Dual BSD/GPL");

/* [<][>][^][v][top][bottom][index][help] */