root/drivers/pci/controller/pcie-iproc.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. iproc_data
  2. iproc_pcie_reg_is_invalid
  3. iproc_pcie_reg_offset
  4. iproc_pcie_read_reg
  5. iproc_pcie_write_reg
  6. iproc_pcie_apb_err_disable
  7. iproc_pcie_map_ep_cfg_reg
  8. iproc_pcie_cfg_retry
  9. iproc_pcie_fix_cap
  10. iproc_pcie_config_read
  11. iproc_pcie_map_cfg_bus
  12. iproc_pcie_bus_map_cfg_bus
  13. iproc_pci_raw_config_read32
  14. iproc_pci_raw_config_write32
  15. iproc_pcie_config_read32
  16. iproc_pcie_config_write32
  17. iproc_pcie_perst_ctrl
  18. iproc_pcie_shutdown
  19. iproc_pcie_check_link
  20. iproc_pcie_enable
  21. iproc_pcie_ob_is_valid
  22. iproc_pcie_ob_write
  23. iproc_pcie_setup_ob
  24. iproc_pcie_map_ranges
  25. iproc_pcie_ib_is_in_use
  26. iproc_pcie_ib_check_type
  27. iproc_pcie_ib_write
  28. iproc_pcie_setup_ib
  29. iproc_pcie_add_dma_range
  30. iproc_pcie_map_dma_ranges
  31. iproce_pcie_get_msi
  32. iproc_pcie_paxb_v2_msi_steer
  33. iproc_pcie_paxc_v2_msi_steer
  34. iproc_pcie_msi_steer
  35. iproc_pcie_msi_enable
  36. iproc_pcie_msi_disable
  37. iproc_pcie_rev_init
  38. iproc_pcie_setup
  39. iproc_pcie_remove
  40. quirk_paxc_disable_msi_parsing
  41. quirk_paxc_bridge

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * Copyright (C) 2014 Hauke Mehrtens <hauke@hauke-m.de>
   4  * Copyright (C) 2015 Broadcom Corporation
   5  */
   6 
   7 #include <linux/kernel.h>
   8 #include <linux/pci.h>
   9 #include <linux/msi.h>
  10 #include <linux/clk.h>
  11 #include <linux/module.h>
  12 #include <linux/mbus.h>
  13 #include <linux/slab.h>
  14 #include <linux/delay.h>
  15 #include <linux/interrupt.h>
  16 #include <linux/irqchip/arm-gic-v3.h>
  17 #include <linux/platform_device.h>
  18 #include <linux/of_address.h>
  19 #include <linux/of_pci.h>
  20 #include <linux/of_irq.h>
  21 #include <linux/of_platform.h>
  22 #include <linux/phy/phy.h>
  23 
  24 #include "pcie-iproc.h"
  25 
  26 #define EP_PERST_SOURCE_SELECT_SHIFT    2
  27 #define EP_PERST_SOURCE_SELECT          BIT(EP_PERST_SOURCE_SELECT_SHIFT)
  28 #define EP_MODE_SURVIVE_PERST_SHIFT     1
  29 #define EP_MODE_SURVIVE_PERST           BIT(EP_MODE_SURVIVE_PERST_SHIFT)
  30 #define RC_PCIE_RST_OUTPUT_SHIFT        0
  31 #define RC_PCIE_RST_OUTPUT              BIT(RC_PCIE_RST_OUTPUT_SHIFT)
  32 #define PAXC_RESET_MASK                 0x7f
  33 
  34 #define GIC_V3_CFG_SHIFT                0
  35 #define GIC_V3_CFG                      BIT(GIC_V3_CFG_SHIFT)
  36 
  37 #define MSI_ENABLE_CFG_SHIFT            0
  38 #define MSI_ENABLE_CFG                  BIT(MSI_ENABLE_CFG_SHIFT)
  39 
  40 #define CFG_IND_ADDR_MASK               0x00001ffc
  41 
  42 #define CFG_ADDR_BUS_NUM_SHIFT          20
  43 #define CFG_ADDR_BUS_NUM_MASK           0x0ff00000
  44 #define CFG_ADDR_DEV_NUM_SHIFT          15
  45 #define CFG_ADDR_DEV_NUM_MASK           0x000f8000
  46 #define CFG_ADDR_FUNC_NUM_SHIFT         12
  47 #define CFG_ADDR_FUNC_NUM_MASK          0x00007000
  48 #define CFG_ADDR_REG_NUM_SHIFT          2
  49 #define CFG_ADDR_REG_NUM_MASK           0x00000ffc
  50 #define CFG_ADDR_CFG_TYPE_SHIFT         0
  51 #define CFG_ADDR_CFG_TYPE_MASK          0x00000003
  52 
  53 #define SYS_RC_INTX_MASK                0xf
  54 
  55 #define PCIE_PHYLINKUP_SHIFT            3
  56 #define PCIE_PHYLINKUP                  BIT(PCIE_PHYLINKUP_SHIFT)
  57 #define PCIE_DL_ACTIVE_SHIFT            2
  58 #define PCIE_DL_ACTIVE                  BIT(PCIE_DL_ACTIVE_SHIFT)
  59 
  60 #define APB_ERR_EN_SHIFT                0
  61 #define APB_ERR_EN                      BIT(APB_ERR_EN_SHIFT)
  62 
  63 #define CFG_RD_SUCCESS                  0
  64 #define CFG_RD_UR                       1
  65 #define CFG_RD_CRS                      2
  66 #define CFG_RD_CA                       3
  67 #define CFG_RETRY_STATUS                0xffff0001
  68 #define CFG_RETRY_STATUS_TIMEOUT_US     500000 /* 500 milliseconds */
  69 
  70 /* derive the enum index of the outbound/inbound mapping registers */
  71 #define MAP_REG(base_reg, index)        ((base_reg) + (index) * 2)
  72 
  73 /*
  74  * Maximum number of outbound mapping window sizes that can be supported by any
  75  * OARR/OMAP mapping pair
  76  */
  77 #define MAX_NUM_OB_WINDOW_SIZES         4
  78 
  79 #define OARR_VALID_SHIFT                0
  80 #define OARR_VALID                      BIT(OARR_VALID_SHIFT)
  81 #define OARR_SIZE_CFG_SHIFT             1
  82 
  83 /*
  84  * Maximum number of inbound mapping region sizes that can be supported by an
  85  * IARR
  86  */
  87 #define MAX_NUM_IB_REGION_SIZES         9
  88 
  89 #define IMAP_VALID_SHIFT                0
  90 #define IMAP_VALID                      BIT(IMAP_VALID_SHIFT)
  91 
  92 #define IPROC_PCI_PM_CAP                0x48
  93 #define IPROC_PCI_PM_CAP_MASK           0xffff
  94 #define IPROC_PCI_EXP_CAP               0xac
  95 
  96 #define IPROC_PCIE_REG_INVALID          0xffff
  97 
  98 /**
  99  * iProc PCIe outbound mapping controller specific parameters
 100  *
 101  * @window_sizes: list of supported outbound mapping window sizes in MB
 102  * @nr_sizes: number of supported outbound mapping window sizes
 103  */
 104 struct iproc_pcie_ob_map {
 105         resource_size_t window_sizes[MAX_NUM_OB_WINDOW_SIZES];
 106         unsigned int nr_sizes;
 107 };
 108 
 109 static const struct iproc_pcie_ob_map paxb_ob_map[] = {
 110         {
 111                 /* OARR0/OMAP0 */
 112                 .window_sizes = { 128, 256 },
 113                 .nr_sizes = 2,
 114         },
 115         {
 116                 /* OARR1/OMAP1 */
 117                 .window_sizes = { 128, 256 },
 118                 .nr_sizes = 2,
 119         },
 120 };
 121 
 122 static const struct iproc_pcie_ob_map paxb_v2_ob_map[] = {
 123         {
 124                 /* OARR0/OMAP0 */
 125                 .window_sizes = { 128, 256 },
 126                 .nr_sizes = 2,
 127         },
 128         {
 129                 /* OARR1/OMAP1 */
 130                 .window_sizes = { 128, 256 },
 131                 .nr_sizes = 2,
 132         },
 133         {
 134                 /* OARR2/OMAP2 */
 135                 .window_sizes = { 128, 256, 512, 1024 },
 136                 .nr_sizes = 4,
 137         },
 138         {
 139                 /* OARR3/OMAP3 */
 140                 .window_sizes = { 128, 256, 512, 1024 },
 141                 .nr_sizes = 4,
 142         },
 143 };
 144 
 145 /**
 146  * iProc PCIe inbound mapping type
 147  */
 148 enum iproc_pcie_ib_map_type {
 149         /* for DDR memory */
 150         IPROC_PCIE_IB_MAP_MEM = 0,
 151 
 152         /* for device I/O memory */
 153         IPROC_PCIE_IB_MAP_IO,
 154 
 155         /* invalid or unused */
 156         IPROC_PCIE_IB_MAP_INVALID
 157 };
 158 
 159 /**
 160  * iProc PCIe inbound mapping controller specific parameters
 161  *
 162  * @type: inbound mapping region type
 163  * @size_unit: inbound mapping region size unit, could be SZ_1K, SZ_1M, or
 164  * SZ_1G
 165  * @region_sizes: list of supported inbound mapping region sizes in KB, MB, or
 166  * GB, depending on the size unit
 167  * @nr_sizes: number of supported inbound mapping region sizes
 168  * @nr_windows: number of supported inbound mapping windows for the region
 169  * @imap_addr_offset: register offset between the upper and lower 32-bit
 170  * IMAP address registers
 171  * @imap_window_offset: register offset between each IMAP window
 172  */
 173 struct iproc_pcie_ib_map {
 174         enum iproc_pcie_ib_map_type type;
 175         unsigned int size_unit;
 176         resource_size_t region_sizes[MAX_NUM_IB_REGION_SIZES];
 177         unsigned int nr_sizes;
 178         unsigned int nr_windows;
 179         u16 imap_addr_offset;
 180         u16 imap_window_offset;
 181 };
 182 
 183 static const struct iproc_pcie_ib_map paxb_v2_ib_map[] = {
 184         {
 185                 /* IARR0/IMAP0 */
 186                 .type = IPROC_PCIE_IB_MAP_IO,
 187                 .size_unit = SZ_1K,
 188                 .region_sizes = { 32 },
 189                 .nr_sizes = 1,
 190                 .nr_windows = 8,
 191                 .imap_addr_offset = 0x40,
 192                 .imap_window_offset = 0x4,
 193         },
 194         {
 195                 /* IARR1/IMAP1 (currently unused) */
 196                 .type = IPROC_PCIE_IB_MAP_INVALID,
 197         },
 198         {
 199                 /* IARR2/IMAP2 */
 200                 .type = IPROC_PCIE_IB_MAP_MEM,
 201                 .size_unit = SZ_1M,
 202                 .region_sizes = { 64, 128, 256, 512, 1024, 2048, 4096, 8192,
 203                                   16384 },
 204                 .nr_sizes = 9,
 205                 .nr_windows = 1,
 206                 .imap_addr_offset = 0x4,
 207                 .imap_window_offset = 0x8,
 208         },
 209         {
 210                 /* IARR3/IMAP3 */
 211                 .type = IPROC_PCIE_IB_MAP_MEM,
 212                 .size_unit = SZ_1G,
 213                 .region_sizes = { 1, 2, 4, 8, 16, 32 },
 214                 .nr_sizes = 6,
 215                 .nr_windows = 8,
 216                 .imap_addr_offset = 0x4,
 217                 .imap_window_offset = 0x8,
 218         },
 219         {
 220                 /* IARR4/IMAP4 */
 221                 .type = IPROC_PCIE_IB_MAP_MEM,
 222                 .size_unit = SZ_1G,
 223                 .region_sizes = { 32, 64, 128, 256, 512 },
 224                 .nr_sizes = 5,
 225                 .nr_windows = 8,
 226                 .imap_addr_offset = 0x4,
 227                 .imap_window_offset = 0x8,
 228         },
 229 };
 230 
 231 /*
 232  * iProc PCIe host registers
 233  */
 234 enum iproc_pcie_reg {
 235         /* clock/reset signal control */
 236         IPROC_PCIE_CLK_CTRL = 0,
 237 
 238         /*
 239          * To allow MSI to be steered to an external MSI controller (e.g., ARM
 240          * GICv3 ITS)
 241          */
 242         IPROC_PCIE_MSI_GIC_MODE,
 243 
 244         /*
 245          * IPROC_PCIE_MSI_BASE_ADDR and IPROC_PCIE_MSI_WINDOW_SIZE define the
 246          * window where the MSI posted writes are written, for the writes to be
 247          * interpreted as MSI writes.
 248          */
 249         IPROC_PCIE_MSI_BASE_ADDR,
 250         IPROC_PCIE_MSI_WINDOW_SIZE,
 251 
 252         /*
 253          * To hold the address of the register where the MSI writes are
 254          * programed.  When ARM GICv3 ITS is used, this should be programmed
 255          * with the address of the GITS_TRANSLATER register.
 256          */
 257         IPROC_PCIE_MSI_ADDR_LO,
 258         IPROC_PCIE_MSI_ADDR_HI,
 259 
 260         /* enable MSI */
 261         IPROC_PCIE_MSI_EN_CFG,
 262 
 263         /* allow access to root complex configuration space */
 264         IPROC_PCIE_CFG_IND_ADDR,
 265         IPROC_PCIE_CFG_IND_DATA,
 266 
 267         /* allow access to device configuration space */
 268         IPROC_PCIE_CFG_ADDR,
 269         IPROC_PCIE_CFG_DATA,
 270 
 271         /* enable INTx */
 272         IPROC_PCIE_INTX_EN,
 273 
 274         /* outbound address mapping */
 275         IPROC_PCIE_OARR0,
 276         IPROC_PCIE_OMAP0,
 277         IPROC_PCIE_OARR1,
 278         IPROC_PCIE_OMAP1,
 279         IPROC_PCIE_OARR2,
 280         IPROC_PCIE_OMAP2,
 281         IPROC_PCIE_OARR3,
 282         IPROC_PCIE_OMAP3,
 283 
 284         /* inbound address mapping */
 285         IPROC_PCIE_IARR0,
 286         IPROC_PCIE_IMAP0,
 287         IPROC_PCIE_IARR1,
 288         IPROC_PCIE_IMAP1,
 289         IPROC_PCIE_IARR2,
 290         IPROC_PCIE_IMAP2,
 291         IPROC_PCIE_IARR3,
 292         IPROC_PCIE_IMAP3,
 293         IPROC_PCIE_IARR4,
 294         IPROC_PCIE_IMAP4,
 295 
 296         /* config read status */
 297         IPROC_PCIE_CFG_RD_STATUS,
 298 
 299         /* link status */
 300         IPROC_PCIE_LINK_STATUS,
 301 
 302         /* enable APB error for unsupported requests */
 303         IPROC_PCIE_APB_ERR_EN,
 304 
 305         /* total number of core registers */
 306         IPROC_PCIE_MAX_NUM_REG,
 307 };
 308 
 309 /* iProc PCIe PAXB BCMA registers */
 310 static const u16 iproc_pcie_reg_paxb_bcma[] = {
 311         [IPROC_PCIE_CLK_CTRL]           = 0x000,
 312         [IPROC_PCIE_CFG_IND_ADDR]       = 0x120,
 313         [IPROC_PCIE_CFG_IND_DATA]       = 0x124,
 314         [IPROC_PCIE_CFG_ADDR]           = 0x1f8,
 315         [IPROC_PCIE_CFG_DATA]           = 0x1fc,
 316         [IPROC_PCIE_INTX_EN]            = 0x330,
 317         [IPROC_PCIE_LINK_STATUS]        = 0xf0c,
 318 };
 319 
 320 /* iProc PCIe PAXB registers */
 321 static const u16 iproc_pcie_reg_paxb[] = {
 322         [IPROC_PCIE_CLK_CTRL]           = 0x000,
 323         [IPROC_PCIE_CFG_IND_ADDR]       = 0x120,
 324         [IPROC_PCIE_CFG_IND_DATA]       = 0x124,
 325         [IPROC_PCIE_CFG_ADDR]           = 0x1f8,
 326         [IPROC_PCIE_CFG_DATA]           = 0x1fc,
 327         [IPROC_PCIE_INTX_EN]            = 0x330,
 328         [IPROC_PCIE_OARR0]              = 0xd20,
 329         [IPROC_PCIE_OMAP0]              = 0xd40,
 330         [IPROC_PCIE_OARR1]              = 0xd28,
 331         [IPROC_PCIE_OMAP1]              = 0xd48,
 332         [IPROC_PCIE_LINK_STATUS]        = 0xf0c,
 333         [IPROC_PCIE_APB_ERR_EN]         = 0xf40,
 334 };
 335 
 336 /* iProc PCIe PAXB v2 registers */
 337 static const u16 iproc_pcie_reg_paxb_v2[] = {
 338         [IPROC_PCIE_CLK_CTRL]           = 0x000,
 339         [IPROC_PCIE_CFG_IND_ADDR]       = 0x120,
 340         [IPROC_PCIE_CFG_IND_DATA]       = 0x124,
 341         [IPROC_PCIE_CFG_ADDR]           = 0x1f8,
 342         [IPROC_PCIE_CFG_DATA]           = 0x1fc,
 343         [IPROC_PCIE_INTX_EN]            = 0x330,
 344         [IPROC_PCIE_OARR0]              = 0xd20,
 345         [IPROC_PCIE_OMAP0]              = 0xd40,
 346         [IPROC_PCIE_OARR1]              = 0xd28,
 347         [IPROC_PCIE_OMAP1]              = 0xd48,
 348         [IPROC_PCIE_OARR2]              = 0xd60,
 349         [IPROC_PCIE_OMAP2]              = 0xd68,
 350         [IPROC_PCIE_OARR3]              = 0xdf0,
 351         [IPROC_PCIE_OMAP3]              = 0xdf8,
 352         [IPROC_PCIE_IARR0]              = 0xd00,
 353         [IPROC_PCIE_IMAP0]              = 0xc00,
 354         [IPROC_PCIE_IARR2]              = 0xd10,
 355         [IPROC_PCIE_IMAP2]              = 0xcc0,
 356         [IPROC_PCIE_IARR3]              = 0xe00,
 357         [IPROC_PCIE_IMAP3]              = 0xe08,
 358         [IPROC_PCIE_IARR4]              = 0xe68,
 359         [IPROC_PCIE_IMAP4]              = 0xe70,
 360         [IPROC_PCIE_CFG_RD_STATUS]      = 0xee0,
 361         [IPROC_PCIE_LINK_STATUS]        = 0xf0c,
 362         [IPROC_PCIE_APB_ERR_EN]         = 0xf40,
 363 };
 364 
 365 /* iProc PCIe PAXC v1 registers */
 366 static const u16 iproc_pcie_reg_paxc[] = {
 367         [IPROC_PCIE_CLK_CTRL]           = 0x000,
 368         [IPROC_PCIE_CFG_IND_ADDR]       = 0x1f0,
 369         [IPROC_PCIE_CFG_IND_DATA]       = 0x1f4,
 370         [IPROC_PCIE_CFG_ADDR]           = 0x1f8,
 371         [IPROC_PCIE_CFG_DATA]           = 0x1fc,
 372 };
 373 
 374 /* iProc PCIe PAXC v2 registers */
 375 static const u16 iproc_pcie_reg_paxc_v2[] = {
 376         [IPROC_PCIE_MSI_GIC_MODE]       = 0x050,
 377         [IPROC_PCIE_MSI_BASE_ADDR]      = 0x074,
 378         [IPROC_PCIE_MSI_WINDOW_SIZE]    = 0x078,
 379         [IPROC_PCIE_MSI_ADDR_LO]        = 0x07c,
 380         [IPROC_PCIE_MSI_ADDR_HI]        = 0x080,
 381         [IPROC_PCIE_MSI_EN_CFG]         = 0x09c,
 382         [IPROC_PCIE_CFG_IND_ADDR]       = 0x1f0,
 383         [IPROC_PCIE_CFG_IND_DATA]       = 0x1f4,
 384         [IPROC_PCIE_CFG_ADDR]           = 0x1f8,
 385         [IPROC_PCIE_CFG_DATA]           = 0x1fc,
 386 };
 387 
 388 /*
 389  * List of device IDs of controllers that have corrupted capability list that
 390  * require SW fixup
 391  */
 392 static const u16 iproc_pcie_corrupt_cap_did[] = {
 393         0x16cd,
 394         0x16f0,
 395         0xd802,
 396         0xd804
 397 };
 398 
 399 static inline struct iproc_pcie *iproc_data(struct pci_bus *bus)
 400 {
 401         struct iproc_pcie *pcie = bus->sysdata;
 402         return pcie;
 403 }
 404 
 405 static inline bool iproc_pcie_reg_is_invalid(u16 reg_offset)
 406 {
 407         return !!(reg_offset == IPROC_PCIE_REG_INVALID);
 408 }
 409 
 410 static inline u16 iproc_pcie_reg_offset(struct iproc_pcie *pcie,
 411                                         enum iproc_pcie_reg reg)
 412 {
 413         return pcie->reg_offsets[reg];
 414 }
 415 
 416 static inline u32 iproc_pcie_read_reg(struct iproc_pcie *pcie,
 417                                       enum iproc_pcie_reg reg)
 418 {
 419         u16 offset = iproc_pcie_reg_offset(pcie, reg);
 420 
 421         if (iproc_pcie_reg_is_invalid(offset))
 422                 return 0;
 423 
 424         return readl(pcie->base + offset);
 425 }
 426 
 427 static inline void iproc_pcie_write_reg(struct iproc_pcie *pcie,
 428                                         enum iproc_pcie_reg reg, u32 val)
 429 {
 430         u16 offset = iproc_pcie_reg_offset(pcie, reg);
 431 
 432         if (iproc_pcie_reg_is_invalid(offset))
 433                 return;
 434 
 435         writel(val, pcie->base + offset);
 436 }
 437 
 438 /**
 439  * APB error forwarding can be disabled during access of configuration
 440  * registers of the endpoint device, to prevent unsupported requests
 441  * (typically seen during enumeration with multi-function devices) from
 442  * triggering a system exception.
 443  */
 444 static inline void iproc_pcie_apb_err_disable(struct pci_bus *bus,
 445                                               bool disable)
 446 {
 447         struct iproc_pcie *pcie = iproc_data(bus);
 448         u32 val;
 449 
 450         if (bus->number && pcie->has_apb_err_disable) {
 451                 val = iproc_pcie_read_reg(pcie, IPROC_PCIE_APB_ERR_EN);
 452                 if (disable)
 453                         val &= ~APB_ERR_EN;
 454                 else
 455                         val |= APB_ERR_EN;
 456                 iproc_pcie_write_reg(pcie, IPROC_PCIE_APB_ERR_EN, val);
 457         }
 458 }
 459 
 460 static void __iomem *iproc_pcie_map_ep_cfg_reg(struct iproc_pcie *pcie,
 461                                                unsigned int busno,
 462                                                unsigned int slot,
 463                                                unsigned int fn,
 464                                                int where)
 465 {
 466         u16 offset;
 467         u32 val;
 468 
 469         /* EP device access */
 470         val = (busno << CFG_ADDR_BUS_NUM_SHIFT) |
 471                 (slot << CFG_ADDR_DEV_NUM_SHIFT) |
 472                 (fn << CFG_ADDR_FUNC_NUM_SHIFT) |
 473                 (where & CFG_ADDR_REG_NUM_MASK) |
 474                 (1 & CFG_ADDR_CFG_TYPE_MASK);
 475 
 476         iproc_pcie_write_reg(pcie, IPROC_PCIE_CFG_ADDR, val);
 477         offset = iproc_pcie_reg_offset(pcie, IPROC_PCIE_CFG_DATA);
 478 
 479         if (iproc_pcie_reg_is_invalid(offset))
 480                 return NULL;
 481 
 482         return (pcie->base + offset);
 483 }
 484 
 485 static unsigned int iproc_pcie_cfg_retry(struct iproc_pcie *pcie,
 486                                          void __iomem *cfg_data_p)
 487 {
 488         int timeout = CFG_RETRY_STATUS_TIMEOUT_US;
 489         unsigned int data;
 490         u32 status;
 491 
 492         /*
 493          * As per PCIe spec r3.1, sec 2.3.2, CRS Software Visibility only
 494          * affects config reads of the Vendor ID.  For config writes or any
 495          * other config reads, the Root may automatically reissue the
 496          * configuration request again as a new request.
 497          *
 498          * For config reads, this hardware returns CFG_RETRY_STATUS data
 499          * when it receives a CRS completion, regardless of the address of
 500          * the read or the CRS Software Visibility Enable bit.  As a
 501          * partial workaround for this, we retry in software any read that
 502          * returns CFG_RETRY_STATUS.
 503          *
 504          * Note that a non-Vendor ID config register may have a value of
 505          * CFG_RETRY_STATUS.  If we read that, we can't distinguish it from
 506          * a CRS completion, so we will incorrectly retry the read and
 507          * eventually return the wrong data (0xffffffff).
 508          */
 509         data = readl(cfg_data_p);
 510         while (data == CFG_RETRY_STATUS && timeout--) {
 511                 /*
 512                  * CRS state is set in CFG_RD status register
 513                  * This will handle the case where CFG_RETRY_STATUS is
 514                  * valid config data.
 515                  */
 516                 status = iproc_pcie_read_reg(pcie, IPROC_PCIE_CFG_RD_STATUS);
 517                 if (status != CFG_RD_CRS)
 518                         return data;
 519 
 520                 udelay(1);
 521                 data = readl(cfg_data_p);
 522         }
 523 
 524         if (data == CFG_RETRY_STATUS)
 525                 data = 0xffffffff;
 526 
 527         return data;
 528 }
 529 
 530 static void iproc_pcie_fix_cap(struct iproc_pcie *pcie, int where, u32 *val)
 531 {
 532         u32 i, dev_id;
 533 
 534         switch (where & ~0x3) {
 535         case PCI_VENDOR_ID:
 536                 dev_id = *val >> 16;
 537 
 538                 /*
 539                  * Activate fixup for those controllers that have corrupted
 540                  * capability list registers
 541                  */
 542                 for (i = 0; i < ARRAY_SIZE(iproc_pcie_corrupt_cap_did); i++)
 543                         if (dev_id == iproc_pcie_corrupt_cap_did[i])
 544                                 pcie->fix_paxc_cap = true;
 545                 break;
 546 
 547         case IPROC_PCI_PM_CAP:
 548                 if (pcie->fix_paxc_cap) {
 549                         /* advertise PM, force next capability to PCIe */
 550                         *val &= ~IPROC_PCI_PM_CAP_MASK;
 551                         *val |= IPROC_PCI_EXP_CAP << 8 | PCI_CAP_ID_PM;
 552                 }
 553                 break;
 554 
 555         case IPROC_PCI_EXP_CAP:
 556                 if (pcie->fix_paxc_cap) {
 557                         /* advertise root port, version 2, terminate here */
 558                         *val = (PCI_EXP_TYPE_ROOT_PORT << 4 | 2) << 16 |
 559                                 PCI_CAP_ID_EXP;
 560                 }
 561                 break;
 562 
 563         case IPROC_PCI_EXP_CAP + PCI_EXP_RTCTL:
 564                 /* Don't advertise CRS SV support */
 565                 *val &= ~(PCI_EXP_RTCAP_CRSVIS << 16);
 566                 break;
 567 
 568         default:
 569                 break;
 570         }
 571 }
 572 
 573 static int iproc_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
 574                                   int where, int size, u32 *val)
 575 {
 576         struct iproc_pcie *pcie = iproc_data(bus);
 577         unsigned int slot = PCI_SLOT(devfn);
 578         unsigned int fn = PCI_FUNC(devfn);
 579         unsigned int busno = bus->number;
 580         void __iomem *cfg_data_p;
 581         unsigned int data;
 582         int ret;
 583 
 584         /* root complex access */
 585         if (busno == 0) {
 586                 ret = pci_generic_config_read32(bus, devfn, where, size, val);
 587                 if (ret == PCIBIOS_SUCCESSFUL)
 588                         iproc_pcie_fix_cap(pcie, where, val);
 589 
 590                 return ret;
 591         }
 592 
 593         cfg_data_p = iproc_pcie_map_ep_cfg_reg(pcie, busno, slot, fn, where);
 594 
 595         if (!cfg_data_p)
 596                 return PCIBIOS_DEVICE_NOT_FOUND;
 597 
 598         data = iproc_pcie_cfg_retry(pcie, cfg_data_p);
 599 
 600         *val = data;
 601         if (size <= 2)
 602                 *val = (data >> (8 * (where & 3))) & ((1 << (size * 8)) - 1);
 603 
 604         /*
 605          * For PAXC and PAXCv2, the total number of PFs that one can enumerate
 606          * depends on the firmware configuration. Unfortunately, due to an ASIC
 607          * bug, unconfigured PFs cannot be properly hidden from the root
 608          * complex. As a result, write access to these PFs will cause bus lock
 609          * up on the embedded processor
 610          *
 611          * Since all unconfigured PFs are left with an incorrect, staled device
 612          * ID of 0x168e (PCI_DEVICE_ID_NX2_57810), we try to catch those access
 613          * early here and reject them all
 614          */
 615 #define DEVICE_ID_MASK     0xffff0000
 616 #define DEVICE_ID_SHIFT    16
 617         if (pcie->rej_unconfig_pf &&
 618             (where & CFG_ADDR_REG_NUM_MASK) == PCI_VENDOR_ID)
 619                 if ((*val & DEVICE_ID_MASK) ==
 620                     (PCI_DEVICE_ID_NX2_57810 << DEVICE_ID_SHIFT))
 621                         return PCIBIOS_FUNC_NOT_SUPPORTED;
 622 
 623         return PCIBIOS_SUCCESSFUL;
 624 }
 625 
 626 /**
 627  * Note access to the configuration registers are protected at the higher layer
 628  * by 'pci_lock' in drivers/pci/access.c
 629  */
 630 static void __iomem *iproc_pcie_map_cfg_bus(struct iproc_pcie *pcie,
 631                                             int busno, unsigned int devfn,
 632                                             int where)
 633 {
 634         unsigned slot = PCI_SLOT(devfn);
 635         unsigned fn = PCI_FUNC(devfn);
 636         u16 offset;
 637 
 638         /* root complex access */
 639         if (busno == 0) {
 640                 if (slot > 0 || fn > 0)
 641                         return NULL;
 642 
 643                 iproc_pcie_write_reg(pcie, IPROC_PCIE_CFG_IND_ADDR,
 644                                      where & CFG_IND_ADDR_MASK);
 645                 offset = iproc_pcie_reg_offset(pcie, IPROC_PCIE_CFG_IND_DATA);
 646                 if (iproc_pcie_reg_is_invalid(offset))
 647                         return NULL;
 648                 else
 649                         return (pcie->base + offset);
 650         }
 651 
 652         return iproc_pcie_map_ep_cfg_reg(pcie, busno, slot, fn, where);
 653 }
 654 
 655 static void __iomem *iproc_pcie_bus_map_cfg_bus(struct pci_bus *bus,
 656                                                 unsigned int devfn,
 657                                                 int where)
 658 {
 659         return iproc_pcie_map_cfg_bus(iproc_data(bus), bus->number, devfn,
 660                                       where);
 661 }
 662 
 663 static int iproc_pci_raw_config_read32(struct iproc_pcie *pcie,
 664                                        unsigned int devfn, int where,
 665                                        int size, u32 *val)
 666 {
 667         void __iomem *addr;
 668 
 669         addr = iproc_pcie_map_cfg_bus(pcie, 0, devfn, where & ~0x3);
 670         if (!addr) {
 671                 *val = ~0;
 672                 return PCIBIOS_DEVICE_NOT_FOUND;
 673         }
 674 
 675         *val = readl(addr);
 676 
 677         if (size <= 2)
 678                 *val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1);
 679 
 680         return PCIBIOS_SUCCESSFUL;
 681 }
 682 
 683 static int iproc_pci_raw_config_write32(struct iproc_pcie *pcie,
 684                                         unsigned int devfn, int where,
 685                                         int size, u32 val)
 686 {
 687         void __iomem *addr;
 688         u32 mask, tmp;
 689 
 690         addr = iproc_pcie_map_cfg_bus(pcie, 0, devfn, where & ~0x3);
 691         if (!addr)
 692                 return PCIBIOS_DEVICE_NOT_FOUND;
 693 
 694         if (size == 4) {
 695                 writel(val, addr);
 696                 return PCIBIOS_SUCCESSFUL;
 697         }
 698 
 699         mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8));
 700         tmp = readl(addr) & mask;
 701         tmp |= val << ((where & 0x3) * 8);
 702         writel(tmp, addr);
 703 
 704         return PCIBIOS_SUCCESSFUL;
 705 }
 706 
 707 static int iproc_pcie_config_read32(struct pci_bus *bus, unsigned int devfn,
 708                                     int where, int size, u32 *val)
 709 {
 710         int ret;
 711         struct iproc_pcie *pcie = iproc_data(bus);
 712 
 713         iproc_pcie_apb_err_disable(bus, true);
 714         if (pcie->iproc_cfg_read)
 715                 ret = iproc_pcie_config_read(bus, devfn, where, size, val);
 716         else
 717                 ret = pci_generic_config_read32(bus, devfn, where, size, val);
 718         iproc_pcie_apb_err_disable(bus, false);
 719 
 720         return ret;
 721 }
 722 
 723 static int iproc_pcie_config_write32(struct pci_bus *bus, unsigned int devfn,
 724                                      int where, int size, u32 val)
 725 {
 726         int ret;
 727 
 728         iproc_pcie_apb_err_disable(bus, true);
 729         ret = pci_generic_config_write32(bus, devfn, where, size, val);
 730         iproc_pcie_apb_err_disable(bus, false);
 731 
 732         return ret;
 733 }
 734 
 735 static struct pci_ops iproc_pcie_ops = {
 736         .map_bus = iproc_pcie_bus_map_cfg_bus,
 737         .read = iproc_pcie_config_read32,
 738         .write = iproc_pcie_config_write32,
 739 };
 740 
 741 static void iproc_pcie_perst_ctrl(struct iproc_pcie *pcie, bool assert)
 742 {
 743         u32 val;
 744 
 745         /*
 746          * PAXC and the internal emulated endpoint device downstream should not
 747          * be reset.  If firmware has been loaded on the endpoint device at an
 748          * earlier boot stage, reset here causes issues.
 749          */
 750         if (pcie->ep_is_internal)
 751                 return;
 752 
 753         if (assert) {
 754                 val = iproc_pcie_read_reg(pcie, IPROC_PCIE_CLK_CTRL);
 755                 val &= ~EP_PERST_SOURCE_SELECT & ~EP_MODE_SURVIVE_PERST &
 756                         ~RC_PCIE_RST_OUTPUT;
 757                 iproc_pcie_write_reg(pcie, IPROC_PCIE_CLK_CTRL, val);
 758                 udelay(250);
 759         } else {
 760                 val = iproc_pcie_read_reg(pcie, IPROC_PCIE_CLK_CTRL);
 761                 val |= RC_PCIE_RST_OUTPUT;
 762                 iproc_pcie_write_reg(pcie, IPROC_PCIE_CLK_CTRL, val);
 763                 msleep(100);
 764         }
 765 }
 766 
 767 int iproc_pcie_shutdown(struct iproc_pcie *pcie)
 768 {
 769         iproc_pcie_perst_ctrl(pcie, true);
 770         msleep(500);
 771 
 772         return 0;
 773 }
 774 EXPORT_SYMBOL_GPL(iproc_pcie_shutdown);
 775 
 776 static int iproc_pcie_check_link(struct iproc_pcie *pcie)
 777 {
 778         struct device *dev = pcie->dev;
 779         u32 hdr_type, link_ctrl, link_status, class, val;
 780         bool link_is_active = false;
 781 
 782         /*
 783          * PAXC connects to emulated endpoint devices directly and does not
 784          * have a Serdes.  Therefore skip the link detection logic here.
 785          */
 786         if (pcie->ep_is_internal)
 787                 return 0;
 788 
 789         val = iproc_pcie_read_reg(pcie, IPROC_PCIE_LINK_STATUS);
 790         if (!(val & PCIE_PHYLINKUP) || !(val & PCIE_DL_ACTIVE)) {
 791                 dev_err(dev, "PHY or data link is INACTIVE!\n");
 792                 return -ENODEV;
 793         }
 794 
 795         /* make sure we are not in EP mode */
 796         iproc_pci_raw_config_read32(pcie, 0, PCI_HEADER_TYPE, 1, &hdr_type);
 797         if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE) {
 798                 dev_err(dev, "in EP mode, hdr=%#02x\n", hdr_type);
 799                 return -EFAULT;
 800         }
 801 
 802         /* force class to PCI_CLASS_BRIDGE_PCI (0x0604) */
 803 #define PCI_BRIDGE_CTRL_REG_OFFSET      0x43c
 804 #define PCI_CLASS_BRIDGE_MASK           0xffff00
 805 #define PCI_CLASS_BRIDGE_SHIFT          8
 806         iproc_pci_raw_config_read32(pcie, 0, PCI_BRIDGE_CTRL_REG_OFFSET,
 807                                     4, &class);
 808         class &= ~PCI_CLASS_BRIDGE_MASK;
 809         class |= (PCI_CLASS_BRIDGE_PCI << PCI_CLASS_BRIDGE_SHIFT);
 810         iproc_pci_raw_config_write32(pcie, 0, PCI_BRIDGE_CTRL_REG_OFFSET,
 811                                      4, class);
 812 
 813         /* check link status to see if link is active */
 814         iproc_pci_raw_config_read32(pcie, 0, IPROC_PCI_EXP_CAP + PCI_EXP_LNKSTA,
 815                                     2, &link_status);
 816         if (link_status & PCI_EXP_LNKSTA_NLW)
 817                 link_is_active = true;
 818 
 819         if (!link_is_active) {
 820                 /* try GEN 1 link speed */
 821 #define PCI_TARGET_LINK_SPEED_MASK      0xf
 822 #define PCI_TARGET_LINK_SPEED_GEN2      0x2
 823 #define PCI_TARGET_LINK_SPEED_GEN1      0x1
 824                 iproc_pci_raw_config_read32(pcie, 0,
 825                                             IPROC_PCI_EXP_CAP + PCI_EXP_LNKCTL2,
 826                                             4, &link_ctrl);
 827                 if ((link_ctrl & PCI_TARGET_LINK_SPEED_MASK) ==
 828                     PCI_TARGET_LINK_SPEED_GEN2) {
 829                         link_ctrl &= ~PCI_TARGET_LINK_SPEED_MASK;
 830                         link_ctrl |= PCI_TARGET_LINK_SPEED_GEN1;
 831                         iproc_pci_raw_config_write32(pcie, 0,
 832                                         IPROC_PCI_EXP_CAP + PCI_EXP_LNKCTL2,
 833                                         4, link_ctrl);
 834                         msleep(100);
 835 
 836                         iproc_pci_raw_config_read32(pcie, 0,
 837                                         IPROC_PCI_EXP_CAP + PCI_EXP_LNKSTA,
 838                                         2, &link_status);
 839                         if (link_status & PCI_EXP_LNKSTA_NLW)
 840                                 link_is_active = true;
 841                 }
 842         }
 843 
 844         dev_info(dev, "link: %s\n", link_is_active ? "UP" : "DOWN");
 845 
 846         return link_is_active ? 0 : -ENODEV;
 847 }
 848 
 849 static void iproc_pcie_enable(struct iproc_pcie *pcie)
 850 {
 851         iproc_pcie_write_reg(pcie, IPROC_PCIE_INTX_EN, SYS_RC_INTX_MASK);
 852 }
 853 
 854 static inline bool iproc_pcie_ob_is_valid(struct iproc_pcie *pcie,
 855                                           int window_idx)
 856 {
 857         u32 val;
 858 
 859         val = iproc_pcie_read_reg(pcie, MAP_REG(IPROC_PCIE_OARR0, window_idx));
 860 
 861         return !!(val & OARR_VALID);
 862 }
 863 
 864 static inline int iproc_pcie_ob_write(struct iproc_pcie *pcie, int window_idx,
 865                                       int size_idx, u64 axi_addr, u64 pci_addr)
 866 {
 867         struct device *dev = pcie->dev;
 868         u16 oarr_offset, omap_offset;
 869 
 870         /*
 871          * Derive the OARR/OMAP offset from the first pair (OARR0/OMAP0) based
 872          * on window index.
 873          */
 874         oarr_offset = iproc_pcie_reg_offset(pcie, MAP_REG(IPROC_PCIE_OARR0,
 875                                                           window_idx));
 876         omap_offset = iproc_pcie_reg_offset(pcie, MAP_REG(IPROC_PCIE_OMAP0,
 877                                                           window_idx));
 878         if (iproc_pcie_reg_is_invalid(oarr_offset) ||
 879             iproc_pcie_reg_is_invalid(omap_offset))
 880                 return -EINVAL;
 881 
 882         /*
 883          * Program the OARR registers.  The upper 32-bit OARR register is
 884          * always right after the lower 32-bit OARR register.
 885          */
 886         writel(lower_32_bits(axi_addr) | (size_idx << OARR_SIZE_CFG_SHIFT) |
 887                OARR_VALID, pcie->base + oarr_offset);
 888         writel(upper_32_bits(axi_addr), pcie->base + oarr_offset + 4);
 889 
 890         /* now program the OMAP registers */
 891         writel(lower_32_bits(pci_addr), pcie->base + omap_offset);
 892         writel(upper_32_bits(pci_addr), pcie->base + omap_offset + 4);
 893 
 894         dev_dbg(dev, "ob window [%d]: offset 0x%x axi %pap pci %pap\n",
 895                 window_idx, oarr_offset, &axi_addr, &pci_addr);
 896         dev_dbg(dev, "oarr lo 0x%x oarr hi 0x%x\n",
 897                 readl(pcie->base + oarr_offset),
 898                 readl(pcie->base + oarr_offset + 4));
 899         dev_dbg(dev, "omap lo 0x%x omap hi 0x%x\n",
 900                 readl(pcie->base + omap_offset),
 901                 readl(pcie->base + omap_offset + 4));
 902 
 903         return 0;
 904 }
 905 
 906 /**
 907  * Some iProc SoCs require the SW to configure the outbound address mapping
 908  *
 909  * Outbound address translation:
 910  *
 911  * iproc_pcie_address = axi_address - axi_offset
 912  * OARR = iproc_pcie_address
 913  * OMAP = pci_addr
 914  *
 915  * axi_addr -> iproc_pcie_address -> OARR -> OMAP -> pci_address
 916  */
 917 static int iproc_pcie_setup_ob(struct iproc_pcie *pcie, u64 axi_addr,
 918                                u64 pci_addr, resource_size_t size)
 919 {
 920         struct iproc_pcie_ob *ob = &pcie->ob;
 921         struct device *dev = pcie->dev;
 922         int ret = -EINVAL, window_idx, size_idx;
 923 
 924         if (axi_addr < ob->axi_offset) {
 925                 dev_err(dev, "axi address %pap less than offset %pap\n",
 926                         &axi_addr, &ob->axi_offset);
 927                 return -EINVAL;
 928         }
 929 
 930         /*
 931          * Translate the AXI address to the internal address used by the iProc
 932          * PCIe core before programming the OARR
 933          */
 934         axi_addr -= ob->axi_offset;
 935 
 936         /* iterate through all OARR/OMAP mapping windows */
 937         for (window_idx = ob->nr_windows - 1; window_idx >= 0; window_idx--) {
 938                 const struct iproc_pcie_ob_map *ob_map =
 939                         &pcie->ob_map[window_idx];
 940 
 941                 /*
 942                  * If current outbound window is already in use, move on to the
 943                  * next one.
 944                  */
 945                 if (iproc_pcie_ob_is_valid(pcie, window_idx))
 946                         continue;
 947 
 948                 /*
 949                  * Iterate through all supported window sizes within the
 950                  * OARR/OMAP pair to find a match.  Go through the window sizes
 951                  * in a descending order.
 952                  */
 953                 for (size_idx = ob_map->nr_sizes - 1; size_idx >= 0;
 954                      size_idx--) {
 955                         resource_size_t window_size =
 956                                 ob_map->window_sizes[size_idx] * SZ_1M;
 957 
 958                         /*
 959                          * Keep iterating until we reach the last window and
 960                          * with the minimal window size at index zero. In this
 961                          * case, we take a compromise by mapping it using the
 962                          * minimum window size that can be supported
 963                          */
 964                         if (size < window_size) {
 965                                 if (size_idx > 0 || window_idx > 0)
 966                                         continue;
 967 
 968                                 /*
 969                                  * For the corner case of reaching the minimal
 970                                  * window size that can be supported on the
 971                                  * last window
 972                                  */
 973                                 axi_addr = ALIGN_DOWN(axi_addr, window_size);
 974                                 pci_addr = ALIGN_DOWN(pci_addr, window_size);
 975                                 size = window_size;
 976                         }
 977 
 978                         if (!IS_ALIGNED(axi_addr, window_size) ||
 979                             !IS_ALIGNED(pci_addr, window_size)) {
 980                                 dev_err(dev,
 981                                         "axi %pap or pci %pap not aligned\n",
 982                                         &axi_addr, &pci_addr);
 983                                 return -EINVAL;
 984                         }
 985 
 986                         /*
 987                          * Match found!  Program both OARR and OMAP and mark
 988                          * them as a valid entry.
 989                          */
 990                         ret = iproc_pcie_ob_write(pcie, window_idx, size_idx,
 991                                                   axi_addr, pci_addr);
 992                         if (ret)
 993                                 goto err_ob;
 994 
 995                         size -= window_size;
 996                         if (size == 0)
 997                                 return 0;
 998 
 999                         /*
1000                          * If we are here, we are done with the current window,
1001                          * but not yet finished all mappings.  Need to move on
1002                          * to the next window.
1003                          */
1004                         axi_addr += window_size;
1005                         pci_addr += window_size;
1006                         break;
1007                 }
1008         }
1009 
1010 err_ob:
1011         dev_err(dev, "unable to configure outbound mapping\n");
1012         dev_err(dev,
1013                 "axi %pap, axi offset %pap, pci %pap, res size %pap\n",
1014                 &axi_addr, &ob->axi_offset, &pci_addr, &size);
1015 
1016         return ret;
1017 }
1018 
1019 static int iproc_pcie_map_ranges(struct iproc_pcie *pcie,
1020                                  struct list_head *resources)
1021 {
1022         struct device *dev = pcie->dev;
1023         struct resource_entry *window;
1024         int ret;
1025 
1026         resource_list_for_each_entry(window, resources) {
1027                 struct resource *res = window->res;
1028                 u64 res_type = resource_type(res);
1029 
1030                 switch (res_type) {
1031                 case IORESOURCE_IO:
1032                 case IORESOURCE_BUS:
1033                         break;
1034                 case IORESOURCE_MEM:
1035                         ret = iproc_pcie_setup_ob(pcie, res->start,
1036                                                   res->start - window->offset,
1037                                                   resource_size(res));
1038                         if (ret)
1039                                 return ret;
1040                         break;
1041                 default:
1042                         dev_err(dev, "invalid resource %pR\n", res);
1043                         return -EINVAL;
1044                 }
1045         }
1046 
1047         return 0;
1048 }
1049 
1050 static inline bool iproc_pcie_ib_is_in_use(struct iproc_pcie *pcie,
1051                                            int region_idx)
1052 {
1053         const struct iproc_pcie_ib_map *ib_map = &pcie->ib_map[region_idx];
1054         u32 val;
1055 
1056         val = iproc_pcie_read_reg(pcie, MAP_REG(IPROC_PCIE_IARR0, region_idx));
1057 
1058         return !!(val & (BIT(ib_map->nr_sizes) - 1));
1059 }
1060 
1061 static inline bool iproc_pcie_ib_check_type(const struct iproc_pcie_ib_map *ib_map,
1062                                             enum iproc_pcie_ib_map_type type)
1063 {
1064         return !!(ib_map->type == type);
1065 }
1066 
1067 static int iproc_pcie_ib_write(struct iproc_pcie *pcie, int region_idx,
1068                                int size_idx, int nr_windows, u64 axi_addr,
1069                                u64 pci_addr, resource_size_t size)
1070 {
1071         struct device *dev = pcie->dev;
1072         const struct iproc_pcie_ib_map *ib_map = &pcie->ib_map[region_idx];
1073         u16 iarr_offset, imap_offset;
1074         u32 val;
1075         int window_idx;
1076 
1077         iarr_offset = iproc_pcie_reg_offset(pcie,
1078                                 MAP_REG(IPROC_PCIE_IARR0, region_idx));
1079         imap_offset = iproc_pcie_reg_offset(pcie,
1080                                 MAP_REG(IPROC_PCIE_IMAP0, region_idx));
1081         if (iproc_pcie_reg_is_invalid(iarr_offset) ||
1082             iproc_pcie_reg_is_invalid(imap_offset))
1083                 return -EINVAL;
1084 
1085         dev_dbg(dev, "ib region [%d]: offset 0x%x axi %pap pci %pap\n",
1086                 region_idx, iarr_offset, &axi_addr, &pci_addr);
1087 
1088         /*
1089          * Program the IARR registers.  The upper 32-bit IARR register is
1090          * always right after the lower 32-bit IARR register.
1091          */
1092         writel(lower_32_bits(pci_addr) | BIT(size_idx),
1093                pcie->base + iarr_offset);
1094         writel(upper_32_bits(pci_addr), pcie->base + iarr_offset + 4);
1095 
1096         dev_dbg(dev, "iarr lo 0x%x iarr hi 0x%x\n",
1097                 readl(pcie->base + iarr_offset),
1098                 readl(pcie->base + iarr_offset + 4));
1099 
1100         /*
1101          * Now program the IMAP registers.  Each IARR region may have one or
1102          * more IMAP windows.
1103          */
1104         size >>= ilog2(nr_windows);
1105         for (window_idx = 0; window_idx < nr_windows; window_idx++) {
1106                 val = readl(pcie->base + imap_offset);
1107                 val |= lower_32_bits(axi_addr) | IMAP_VALID;
1108                 writel(val, pcie->base + imap_offset);
1109                 writel(upper_32_bits(axi_addr),
1110                        pcie->base + imap_offset + ib_map->imap_addr_offset);
1111 
1112                 dev_dbg(dev, "imap window [%d] lo 0x%x hi 0x%x\n",
1113                         window_idx, readl(pcie->base + imap_offset),
1114                         readl(pcie->base + imap_offset +
1115                               ib_map->imap_addr_offset));
1116 
1117                 imap_offset += ib_map->imap_window_offset;
1118                 axi_addr += size;
1119         }
1120 
1121         return 0;
1122 }
1123 
1124 static int iproc_pcie_setup_ib(struct iproc_pcie *pcie,
1125                                struct of_pci_range *range,
1126                                enum iproc_pcie_ib_map_type type)
1127 {
1128         struct device *dev = pcie->dev;
1129         struct iproc_pcie_ib *ib = &pcie->ib;
1130         int ret;
1131         unsigned int region_idx, size_idx;
1132         u64 axi_addr = range->cpu_addr, pci_addr = range->pci_addr;
1133         resource_size_t size = range->size;
1134 
1135         /* iterate through all IARR mapping regions */
1136         for (region_idx = 0; region_idx < ib->nr_regions; region_idx++) {
1137                 const struct iproc_pcie_ib_map *ib_map =
1138                         &pcie->ib_map[region_idx];
1139 
1140                 /*
1141                  * If current inbound region is already in use or not a
1142                  * compatible type, move on to the next.
1143                  */
1144                 if (iproc_pcie_ib_is_in_use(pcie, region_idx) ||
1145                     !iproc_pcie_ib_check_type(ib_map, type))
1146                         continue;
1147 
1148                 /* iterate through all supported region sizes to find a match */
1149                 for (size_idx = 0; size_idx < ib_map->nr_sizes; size_idx++) {
1150                         resource_size_t region_size =
1151                         ib_map->region_sizes[size_idx] * ib_map->size_unit;
1152 
1153                         if (size != region_size)
1154                                 continue;
1155 
1156                         if (!IS_ALIGNED(axi_addr, region_size) ||
1157                             !IS_ALIGNED(pci_addr, region_size)) {
1158                                 dev_err(dev,
1159                                         "axi %pap or pci %pap not aligned\n",
1160                                         &axi_addr, &pci_addr);
1161                                 return -EINVAL;
1162                         }
1163 
1164                         /* Match found!  Program IARR and all IMAP windows. */
1165                         ret = iproc_pcie_ib_write(pcie, region_idx, size_idx,
1166                                                   ib_map->nr_windows, axi_addr,
1167                                                   pci_addr, size);
1168                         if (ret)
1169                                 goto err_ib;
1170                         else
1171                                 return 0;
1172 
1173                 }
1174         }
1175         ret = -EINVAL;
1176 
1177 err_ib:
1178         dev_err(dev, "unable to configure inbound mapping\n");
1179         dev_err(dev, "axi %pap, pci %pap, res size %pap\n",
1180                 &axi_addr, &pci_addr, &size);
1181 
1182         return ret;
1183 }
1184 
1185 static int iproc_pcie_add_dma_range(struct device *dev,
1186                                     struct list_head *resources,
1187                                     struct of_pci_range *range)
1188 {
1189         struct resource *res;
1190         struct resource_entry *entry, *tmp;
1191         struct list_head *head = resources;
1192 
1193         res = devm_kzalloc(dev, sizeof(struct resource), GFP_KERNEL);
1194         if (!res)
1195                 return -ENOMEM;
1196 
1197         resource_list_for_each_entry(tmp, resources) {
1198                 if (tmp->res->start < range->cpu_addr)
1199                         head = &tmp->node;
1200         }
1201 
1202         res->start = range->cpu_addr;
1203         res->end = res->start + range->size - 1;
1204 
1205         entry = resource_list_create_entry(res, 0);
1206         if (!entry)
1207                 return -ENOMEM;
1208 
1209         entry->offset = res->start - range->cpu_addr;
1210         resource_list_add(entry, head);
1211 
1212         return 0;
1213 }
1214 
1215 static int iproc_pcie_map_dma_ranges(struct iproc_pcie *pcie)
1216 {
1217         struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
1218         struct of_pci_range range;
1219         struct of_pci_range_parser parser;
1220         int ret;
1221         LIST_HEAD(resources);
1222 
1223         /* Get the dma-ranges from DT */
1224         ret = of_pci_dma_range_parser_init(&parser, pcie->dev->of_node);
1225         if (ret)
1226                 return ret;
1227 
1228         for_each_of_pci_range(&parser, &range) {
1229                 ret = iproc_pcie_add_dma_range(pcie->dev,
1230                                                &resources,
1231                                                &range);
1232                 if (ret)
1233                         goto out;
1234                 /* Each range entry corresponds to an inbound mapping region */
1235                 ret = iproc_pcie_setup_ib(pcie, &range, IPROC_PCIE_IB_MAP_MEM);
1236                 if (ret)
1237                         goto out;
1238         }
1239 
1240         list_splice_init(&resources, &host->dma_ranges);
1241 
1242         return 0;
1243 out:
1244         pci_free_resource_list(&resources);
1245         return ret;
1246 }
1247 
1248 static int iproce_pcie_get_msi(struct iproc_pcie *pcie,
1249                                struct device_node *msi_node,
1250                                u64 *msi_addr)
1251 {
1252         struct device *dev = pcie->dev;
1253         int ret;
1254         struct resource res;
1255 
1256         /*
1257          * Check if 'msi-map' points to ARM GICv3 ITS, which is the only
1258          * supported external MSI controller that requires steering.
1259          */
1260         if (!of_device_is_compatible(msi_node, "arm,gic-v3-its")) {
1261                 dev_err(dev, "unable to find compatible MSI controller\n");
1262                 return -ENODEV;
1263         }
1264 
1265         /* derive GITS_TRANSLATER address from GICv3 */
1266         ret = of_address_to_resource(msi_node, 0, &res);
1267         if (ret < 0) {
1268                 dev_err(dev, "unable to obtain MSI controller resources\n");
1269                 return ret;
1270         }
1271 
1272         *msi_addr = res.start + GITS_TRANSLATER;
1273         return 0;
1274 }
1275 
1276 static int iproc_pcie_paxb_v2_msi_steer(struct iproc_pcie *pcie, u64 msi_addr)
1277 {
1278         int ret;
1279         struct of_pci_range range;
1280 
1281         memset(&range, 0, sizeof(range));
1282         range.size = SZ_32K;
1283         range.pci_addr = range.cpu_addr = msi_addr & ~(range.size - 1);
1284 
1285         ret = iproc_pcie_setup_ib(pcie, &range, IPROC_PCIE_IB_MAP_IO);
1286         return ret;
1287 }
1288 
1289 static void iproc_pcie_paxc_v2_msi_steer(struct iproc_pcie *pcie, u64 msi_addr,
1290                                          bool enable)
1291 {
1292         u32 val;
1293 
1294         if (!enable) {
1295                 /*
1296                  * Disable PAXC MSI steering. All write transfers will be
1297                  * treated as non-MSI transfers
1298                  */
1299                 val = iproc_pcie_read_reg(pcie, IPROC_PCIE_MSI_EN_CFG);
1300                 val &= ~MSI_ENABLE_CFG;
1301                 iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_EN_CFG, val);
1302                 return;
1303         }
1304 
1305         /*
1306          * Program bits [43:13] of address of GITS_TRANSLATER register into
1307          * bits [30:0] of the MSI base address register.  In fact, in all iProc
1308          * based SoCs, all I/O register bases are well below the 32-bit
1309          * boundary, so we can safely assume bits [43:32] are always zeros.
1310          */
1311         iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_BASE_ADDR,
1312                              (u32)(msi_addr >> 13));
1313 
1314         /* use a default 8K window size */
1315         iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_WINDOW_SIZE, 0);
1316 
1317         /* steering MSI to GICv3 ITS */
1318         val = iproc_pcie_read_reg(pcie, IPROC_PCIE_MSI_GIC_MODE);
1319         val |= GIC_V3_CFG;
1320         iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_GIC_MODE, val);
1321 
1322         /*
1323          * Program bits [43:2] of address of GITS_TRANSLATER register into the
1324          * iProc MSI address registers.
1325          */
1326         msi_addr >>= 2;
1327         iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_ADDR_HI,
1328                              upper_32_bits(msi_addr));
1329         iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_ADDR_LO,
1330                              lower_32_bits(msi_addr));
1331 
1332         /* enable MSI */
1333         val = iproc_pcie_read_reg(pcie, IPROC_PCIE_MSI_EN_CFG);
1334         val |= MSI_ENABLE_CFG;
1335         iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_EN_CFG, val);
1336 }
1337 
1338 static int iproc_pcie_msi_steer(struct iproc_pcie *pcie,
1339                                 struct device_node *msi_node)
1340 {
1341         struct device *dev = pcie->dev;
1342         int ret;
1343         u64 msi_addr;
1344 
1345         ret = iproce_pcie_get_msi(pcie, msi_node, &msi_addr);
1346         if (ret < 0) {
1347                 dev_err(dev, "msi steering failed\n");
1348                 return ret;
1349         }
1350 
1351         switch (pcie->type) {
1352         case IPROC_PCIE_PAXB_V2:
1353                 ret = iproc_pcie_paxb_v2_msi_steer(pcie, msi_addr);
1354                 if (ret)
1355                         return ret;
1356                 break;
1357         case IPROC_PCIE_PAXC_V2:
1358                 iproc_pcie_paxc_v2_msi_steer(pcie, msi_addr, true);
1359                 break;
1360         default:
1361                 return -EINVAL;
1362         }
1363 
1364         return 0;
1365 }
1366 
1367 static int iproc_pcie_msi_enable(struct iproc_pcie *pcie)
1368 {
1369         struct device_node *msi_node;
1370         int ret;
1371 
1372         /*
1373          * Either the "msi-parent" or the "msi-map" phandle needs to exist
1374          * for us to obtain the MSI node.
1375          */
1376 
1377         msi_node = of_parse_phandle(pcie->dev->of_node, "msi-parent", 0);
1378         if (!msi_node) {
1379                 const __be32 *msi_map = NULL;
1380                 int len;
1381                 u32 phandle;
1382 
1383                 msi_map = of_get_property(pcie->dev->of_node, "msi-map", &len);
1384                 if (!msi_map)
1385                         return -ENODEV;
1386 
1387                 phandle = be32_to_cpup(msi_map + 1);
1388                 msi_node = of_find_node_by_phandle(phandle);
1389                 if (!msi_node)
1390                         return -ENODEV;
1391         }
1392 
1393         /*
1394          * Certain revisions of the iProc PCIe controller require additional
1395          * configurations to steer the MSI writes towards an external MSI
1396          * controller.
1397          */
1398         if (pcie->need_msi_steer) {
1399                 ret = iproc_pcie_msi_steer(pcie, msi_node);
1400                 if (ret)
1401                         goto out_put_node;
1402         }
1403 
1404         /*
1405          * If another MSI controller is being used, the call below should fail
1406          * but that is okay
1407          */
1408         ret = iproc_msi_init(pcie, msi_node);
1409 
1410 out_put_node:
1411         of_node_put(msi_node);
1412         return ret;
1413 }
1414 
1415 static void iproc_pcie_msi_disable(struct iproc_pcie *pcie)
1416 {
1417         iproc_msi_exit(pcie);
1418 }
1419 
1420 static int iproc_pcie_rev_init(struct iproc_pcie *pcie)
1421 {
1422         struct device *dev = pcie->dev;
1423         unsigned int reg_idx;
1424         const u16 *regs;
1425 
1426         switch (pcie->type) {
1427         case IPROC_PCIE_PAXB_BCMA:
1428                 regs = iproc_pcie_reg_paxb_bcma;
1429                 break;
1430         case IPROC_PCIE_PAXB:
1431                 regs = iproc_pcie_reg_paxb;
1432                 pcie->has_apb_err_disable = true;
1433                 if (pcie->need_ob_cfg) {
1434                         pcie->ob_map = paxb_ob_map;
1435                         pcie->ob.nr_windows = ARRAY_SIZE(paxb_ob_map);
1436                 }
1437                 break;
1438         case IPROC_PCIE_PAXB_V2:
1439                 regs = iproc_pcie_reg_paxb_v2;
1440                 pcie->iproc_cfg_read = true;
1441                 pcie->has_apb_err_disable = true;
1442                 if (pcie->need_ob_cfg) {
1443                         pcie->ob_map = paxb_v2_ob_map;
1444                         pcie->ob.nr_windows = ARRAY_SIZE(paxb_v2_ob_map);
1445                 }
1446                 pcie->ib.nr_regions = ARRAY_SIZE(paxb_v2_ib_map);
1447                 pcie->ib_map = paxb_v2_ib_map;
1448                 pcie->need_msi_steer = true;
1449                 dev_warn(dev, "reads of config registers that contain %#x return incorrect data\n",
1450                          CFG_RETRY_STATUS);
1451                 break;
1452         case IPROC_PCIE_PAXC:
1453                 regs = iproc_pcie_reg_paxc;
1454                 pcie->ep_is_internal = true;
1455                 pcie->iproc_cfg_read = true;
1456                 pcie->rej_unconfig_pf = true;
1457                 break;
1458         case IPROC_PCIE_PAXC_V2:
1459                 regs = iproc_pcie_reg_paxc_v2;
1460                 pcie->ep_is_internal = true;
1461                 pcie->iproc_cfg_read = true;
1462                 pcie->rej_unconfig_pf = true;
1463                 pcie->need_msi_steer = true;
1464                 break;
1465         default:
1466                 dev_err(dev, "incompatible iProc PCIe interface\n");
1467                 return -EINVAL;
1468         }
1469 
1470         pcie->reg_offsets = devm_kcalloc(dev, IPROC_PCIE_MAX_NUM_REG,
1471                                          sizeof(*pcie->reg_offsets),
1472                                          GFP_KERNEL);
1473         if (!pcie->reg_offsets)
1474                 return -ENOMEM;
1475 
1476         /* go through the register table and populate all valid registers */
1477         pcie->reg_offsets[0] = (pcie->type == IPROC_PCIE_PAXC_V2) ?
1478                 IPROC_PCIE_REG_INVALID : regs[0];
1479         for (reg_idx = 1; reg_idx < IPROC_PCIE_MAX_NUM_REG; reg_idx++)
1480                 pcie->reg_offsets[reg_idx] = regs[reg_idx] ?
1481                         regs[reg_idx] : IPROC_PCIE_REG_INVALID;
1482 
1483         return 0;
1484 }
1485 
1486 int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
1487 {
1488         struct device *dev;
1489         int ret;
1490         struct pci_bus *child;
1491         struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
1492 
1493         dev = pcie->dev;
1494 
1495         ret = iproc_pcie_rev_init(pcie);
1496         if (ret) {
1497                 dev_err(dev, "unable to initialize controller parameters\n");
1498                 return ret;
1499         }
1500 
1501         ret = devm_request_pci_bus_resources(dev, res);
1502         if (ret)
1503                 return ret;
1504 
1505         ret = phy_init(pcie->phy);
1506         if (ret) {
1507                 dev_err(dev, "unable to initialize PCIe PHY\n");
1508                 return ret;
1509         }
1510 
1511         ret = phy_power_on(pcie->phy);
1512         if (ret) {
1513                 dev_err(dev, "unable to power on PCIe PHY\n");
1514                 goto err_exit_phy;
1515         }
1516 
1517         iproc_pcie_perst_ctrl(pcie, true);
1518         iproc_pcie_perst_ctrl(pcie, false);
1519 
1520         if (pcie->need_ob_cfg) {
1521                 ret = iproc_pcie_map_ranges(pcie, res);
1522                 if (ret) {
1523                         dev_err(dev, "map failed\n");
1524                         goto err_power_off_phy;
1525                 }
1526         }
1527 
1528         if (pcie->need_ib_cfg) {
1529                 ret = iproc_pcie_map_dma_ranges(pcie);
1530                 if (ret && ret != -ENOENT)
1531                         goto err_power_off_phy;
1532         }
1533 
1534         ret = iproc_pcie_check_link(pcie);
1535         if (ret) {
1536                 dev_err(dev, "no PCIe EP device detected\n");
1537                 goto err_power_off_phy;
1538         }
1539 
1540         iproc_pcie_enable(pcie);
1541 
1542         if (IS_ENABLED(CONFIG_PCI_MSI))
1543                 if (iproc_pcie_msi_enable(pcie))
1544                         dev_info(dev, "not using iProc MSI\n");
1545 
1546         list_splice_init(res, &host->windows);
1547         host->busnr = 0;
1548         host->dev.parent = dev;
1549         host->ops = &iproc_pcie_ops;
1550         host->sysdata = pcie;
1551         host->map_irq = pcie->map_irq;
1552         host->swizzle_irq = pci_common_swizzle;
1553 
1554         ret = pci_scan_root_bus_bridge(host);
1555         if (ret < 0) {
1556                 dev_err(dev, "failed to scan host: %d\n", ret);
1557                 goto err_power_off_phy;
1558         }
1559 
1560         pci_assign_unassigned_bus_resources(host->bus);
1561 
1562         pcie->root_bus = host->bus;
1563 
1564         list_for_each_entry(child, &host->bus->children, node)
1565                 pcie_bus_configure_settings(child);
1566 
1567         pci_bus_add_devices(host->bus);
1568 
1569         return 0;
1570 
1571 err_power_off_phy:
1572         phy_power_off(pcie->phy);
1573 err_exit_phy:
1574         phy_exit(pcie->phy);
1575         return ret;
1576 }
1577 EXPORT_SYMBOL(iproc_pcie_setup);
1578 
1579 int iproc_pcie_remove(struct iproc_pcie *pcie)
1580 {
1581         pci_stop_root_bus(pcie->root_bus);
1582         pci_remove_root_bus(pcie->root_bus);
1583 
1584         iproc_pcie_msi_disable(pcie);
1585 
1586         phy_power_off(pcie->phy);
1587         phy_exit(pcie->phy);
1588 
1589         return 0;
1590 }
1591 EXPORT_SYMBOL(iproc_pcie_remove);
1592 
1593 /*
1594  * The MSI parsing logic in certain revisions of Broadcom PAXC based root
1595  * complex does not work and needs to be disabled
1596  */
1597 static void quirk_paxc_disable_msi_parsing(struct pci_dev *pdev)
1598 {
1599         struct iproc_pcie *pcie = iproc_data(pdev->bus);
1600 
1601         if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
1602                 iproc_pcie_paxc_v2_msi_steer(pcie, 0, false);
1603 }
1604 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16f0,
1605                         quirk_paxc_disable_msi_parsing);
1606 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd802,
1607                         quirk_paxc_disable_msi_parsing);
1608 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd804,
1609                         quirk_paxc_disable_msi_parsing);
1610 
1611 static void quirk_paxc_bridge(struct pci_dev *pdev)
1612 {
1613         /*
1614          * The PCI config space is shared with the PAXC root port and the first
1615          * Ethernet device.  So, we need to workaround this by telling the PCI
1616          * code that the bridge is not an Ethernet device.
1617          */
1618         if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
1619                 pdev->class = PCI_CLASS_BRIDGE_PCI << 8;
1620 
1621         /*
1622          * MPSS is not being set properly (as it is currently 0).  This is
1623          * because that area of the PCI config space is hard coded to zero, and
1624          * is not modifiable by firmware.  Set this to 2 (e.g., 512 byte MPS)
1625          * so that the MPS can be set to the real max value.
1626          */
1627         pdev->pcie_mpss = 2;
1628 }
1629 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16cd, quirk_paxc_bridge);
1630 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16f0, quirk_paxc_bridge);
1631 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd750, quirk_paxc_bridge);
1632 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd802, quirk_paxc_bridge);
1633 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd804, quirk_paxc_bridge);
1634 
1635 MODULE_AUTHOR("Ray Jui <rjui@broadcom.com>");
1636 MODULE_DESCRIPTION("Broadcom iPROC PCIe common driver");
1637 MODULE_LICENSE("GPL v2");

/* [<][>][^][v][top][bottom][index][help] */