root/drivers/iommu/rockchip-iommu.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. rk_table_flush
  2. to_rk_domain
  3. rk_dte_pt_address
  4. rk_dte_is_pt_valid
  5. rk_mk_dte
  6. rk_pte_page_address
  7. rk_pte_is_page_valid
  8. rk_mk_pte
  9. rk_mk_pte_invalid
  10. rk_iova_dte_index
  11. rk_iova_pte_index
  12. rk_iova_page_offset
  13. rk_iommu_read
  14. rk_iommu_write
  15. rk_iommu_command
  16. rk_iommu_base_command
  17. rk_iommu_zap_lines
  18. rk_iommu_is_stall_active
  19. rk_iommu_is_paging_enabled
  20. rk_iommu_is_reset_done
  21. rk_iommu_enable_stall
  22. rk_iommu_disable_stall
  23. rk_iommu_enable_paging
  24. rk_iommu_disable_paging
  25. rk_iommu_force_reset
  26. log_iova
  27. rk_iommu_irq
  28. rk_iommu_iova_to_phys
  29. rk_iommu_zap_iova
  30. rk_iommu_zap_iova_first_last
  31. rk_dte_get_page_table
  32. rk_iommu_unmap_iova
  33. rk_iommu_map_iova
  34. rk_iommu_map
  35. rk_iommu_unmap
  36. rk_iommu_from_dev
  37. rk_iommu_disable
  38. rk_iommu_enable
  39. rk_iommu_detach_device
  40. rk_iommu_attach_device
  41. rk_iommu_domain_alloc
  42. rk_iommu_domain_free
  43. rk_iommu_add_device
  44. rk_iommu_remove_device
  45. rk_iommu_device_group
  46. rk_iommu_of_xlate
  47. rk_iommu_probe
  48. rk_iommu_shutdown
  49. rk_iommu_suspend
  50. rk_iommu_resume
  51. rk_iommu_init

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * IOMMU API for Rockchip
   4  *
   5  * Module Authors:      Simon Xue <xxm@rock-chips.com>
   6  *                      Daniel Kurtz <djkurtz@chromium.org>
   7  */
   8 
   9 #include <linux/clk.h>
  10 #include <linux/compiler.h>
  11 #include <linux/delay.h>
  12 #include <linux/device.h>
  13 #include <linux/dma-iommu.h>
  14 #include <linux/dma-mapping.h>
  15 #include <linux/errno.h>
  16 #include <linux/interrupt.h>
  17 #include <linux/io.h>
  18 #include <linux/iommu.h>
  19 #include <linux/iopoll.h>
  20 #include <linux/list.h>
  21 #include <linux/mm.h>
  22 #include <linux/init.h>
  23 #include <linux/of.h>
  24 #include <linux/of_iommu.h>
  25 #include <linux/of_platform.h>
  26 #include <linux/platform_device.h>
  27 #include <linux/pm_runtime.h>
  28 #include <linux/slab.h>
  29 #include <linux/spinlock.h>
  30 
  31 /** MMU register offsets */
  32 #define RK_MMU_DTE_ADDR         0x00    /* Directory table address */
  33 #define RK_MMU_STATUS           0x04
  34 #define RK_MMU_COMMAND          0x08
  35 #define RK_MMU_PAGE_FAULT_ADDR  0x0C    /* IOVA of last page fault */
  36 #define RK_MMU_ZAP_ONE_LINE     0x10    /* Shootdown one IOTLB entry */
  37 #define RK_MMU_INT_RAWSTAT      0x14    /* IRQ status ignoring mask */
  38 #define RK_MMU_INT_CLEAR        0x18    /* Acknowledge and re-arm irq */
  39 #define RK_MMU_INT_MASK         0x1C    /* IRQ enable */
  40 #define RK_MMU_INT_STATUS       0x20    /* IRQ status after masking */
  41 #define RK_MMU_AUTO_GATING      0x24
  42 
  43 #define DTE_ADDR_DUMMY          0xCAFEBABE
  44 
  45 #define RK_MMU_POLL_PERIOD_US           100
  46 #define RK_MMU_FORCE_RESET_TIMEOUT_US   100000
  47 #define RK_MMU_POLL_TIMEOUT_US          1000
  48 
  49 /* RK_MMU_STATUS fields */
  50 #define RK_MMU_STATUS_PAGING_ENABLED       BIT(0)
  51 #define RK_MMU_STATUS_PAGE_FAULT_ACTIVE    BIT(1)
  52 #define RK_MMU_STATUS_STALL_ACTIVE         BIT(2)
  53 #define RK_MMU_STATUS_IDLE                 BIT(3)
  54 #define RK_MMU_STATUS_REPLAY_BUFFER_EMPTY  BIT(4)
  55 #define RK_MMU_STATUS_PAGE_FAULT_IS_WRITE  BIT(5)
  56 #define RK_MMU_STATUS_STALL_NOT_ACTIVE     BIT(31)
  57 
  58 /* RK_MMU_COMMAND command values */
  59 #define RK_MMU_CMD_ENABLE_PAGING    0  /* Enable memory translation */
  60 #define RK_MMU_CMD_DISABLE_PAGING   1  /* Disable memory translation */
  61 #define RK_MMU_CMD_ENABLE_STALL     2  /* Stall paging to allow other cmds */
  62 #define RK_MMU_CMD_DISABLE_STALL    3  /* Stop stall re-enables paging */
  63 #define RK_MMU_CMD_ZAP_CACHE        4  /* Shoot down entire IOTLB */
  64 #define RK_MMU_CMD_PAGE_FAULT_DONE  5  /* Clear page fault */
  65 #define RK_MMU_CMD_FORCE_RESET      6  /* Reset all registers */
  66 
  67 /* RK_MMU_INT_* register fields */
  68 #define RK_MMU_IRQ_PAGE_FAULT    0x01  /* page fault */
  69 #define RK_MMU_IRQ_BUS_ERROR     0x02  /* bus read error */
  70 #define RK_MMU_IRQ_MASK          (RK_MMU_IRQ_PAGE_FAULT | RK_MMU_IRQ_BUS_ERROR)
  71 
  72 #define NUM_DT_ENTRIES 1024
  73 #define NUM_PT_ENTRIES 1024
  74 
  75 #define SPAGE_ORDER 12
  76 #define SPAGE_SIZE (1 << SPAGE_ORDER)
  77 
  78  /*
  79   * Support mapping any size that fits in one page table:
  80   *   4 KiB to 4 MiB
  81   */
  82 #define RK_IOMMU_PGSIZE_BITMAP 0x007ff000
  83 
  84 struct rk_iommu_domain {
  85         struct list_head iommus;
  86         u32 *dt; /* page directory table */
  87         dma_addr_t dt_dma;
  88         spinlock_t iommus_lock; /* lock for iommus list */
  89         spinlock_t dt_lock; /* lock for modifying page directory table */
  90 
  91         struct iommu_domain domain;
  92 };
  93 
  94 /* list of clocks required by IOMMU */
  95 static const char * const rk_iommu_clocks[] = {
  96         "aclk", "iface",
  97 };
  98 
  99 struct rk_iommu {
 100         struct device *dev;
 101         void __iomem **bases;
 102         int num_mmu;
 103         int num_irq;
 104         struct clk_bulk_data *clocks;
 105         int num_clocks;
 106         bool reset_disabled;
 107         struct iommu_device iommu;
 108         struct list_head node; /* entry in rk_iommu_domain.iommus */
 109         struct iommu_domain *domain; /* domain to which iommu is attached */
 110         struct iommu_group *group;
 111 };
 112 
 113 struct rk_iommudata {
 114         struct device_link *link; /* runtime PM link from IOMMU to master */
 115         struct rk_iommu *iommu;
 116 };
 117 
 118 static struct device *dma_dev;
 119 
 120 static inline void rk_table_flush(struct rk_iommu_domain *dom, dma_addr_t dma,
 121                                   unsigned int count)
 122 {
 123         size_t size = count * sizeof(u32); /* count of u32 entry */
 124 
 125         dma_sync_single_for_device(dma_dev, dma, size, DMA_TO_DEVICE);
 126 }
 127 
 128 static struct rk_iommu_domain *to_rk_domain(struct iommu_domain *dom)
 129 {
 130         return container_of(dom, struct rk_iommu_domain, domain);
 131 }
 132 
 133 /*
 134  * The Rockchip rk3288 iommu uses a 2-level page table.
 135  * The first level is the "Directory Table" (DT).
 136  * The DT consists of 1024 4-byte Directory Table Entries (DTEs), each pointing
 137  * to a "Page Table".
 138  * The second level is the 1024 Page Tables (PT).
 139  * Each PT consists of 1024 4-byte Page Table Entries (PTEs), each pointing to
 140  * a 4 KB page of physical memory.
 141  *
 142  * The DT and each PT fits in a single 4 KB page (4-bytes * 1024 entries).
 143  * Each iommu device has a MMU_DTE_ADDR register that contains the physical
 144  * address of the start of the DT page.
 145  *
 146  * The structure of the page table is as follows:
 147  *
 148  *                   DT
 149  * MMU_DTE_ADDR -> +-----+
 150  *                 |     |
 151  *                 +-----+     PT
 152  *                 | DTE | -> +-----+
 153  *                 +-----+    |     |     Memory
 154  *                 |     |    +-----+     Page
 155  *                 |     |    | PTE | -> +-----+
 156  *                 +-----+    +-----+    |     |
 157  *                            |     |    |     |
 158  *                            |     |    |     |
 159  *                            +-----+    |     |
 160  *                                       |     |
 161  *                                       |     |
 162  *                                       +-----+
 163  */
 164 
 165 /*
 166  * Each DTE has a PT address and a valid bit:
 167  * +---------------------+-----------+-+
 168  * | PT address          | Reserved  |V|
 169  * +---------------------+-----------+-+
 170  *  31:12 - PT address (PTs always starts on a 4 KB boundary)
 171  *  11: 1 - Reserved
 172  *      0 - 1 if PT @ PT address is valid
 173  */
 174 #define RK_DTE_PT_ADDRESS_MASK    0xfffff000
 175 #define RK_DTE_PT_VALID           BIT(0)
 176 
 177 static inline phys_addr_t rk_dte_pt_address(u32 dte)
 178 {
 179         return (phys_addr_t)dte & RK_DTE_PT_ADDRESS_MASK;
 180 }
 181 
 182 static inline bool rk_dte_is_pt_valid(u32 dte)
 183 {
 184         return dte & RK_DTE_PT_VALID;
 185 }
 186 
 187 static inline u32 rk_mk_dte(dma_addr_t pt_dma)
 188 {
 189         return (pt_dma & RK_DTE_PT_ADDRESS_MASK) | RK_DTE_PT_VALID;
 190 }
 191 
 192 /*
 193  * Each PTE has a Page address, some flags and a valid bit:
 194  * +---------------------+---+-------+-+
 195  * | Page address        |Rsv| Flags |V|
 196  * +---------------------+---+-------+-+
 197  *  31:12 - Page address (Pages always start on a 4 KB boundary)
 198  *  11: 9 - Reserved
 199  *   8: 1 - Flags
 200  *      8 - Read allocate - allocate cache space on read misses
 201  *      7 - Read cache - enable cache & prefetch of data
 202  *      6 - Write buffer - enable delaying writes on their way to memory
 203  *      5 - Write allocate - allocate cache space on write misses
 204  *      4 - Write cache - different writes can be merged together
 205  *      3 - Override cache attributes
 206  *          if 1, bits 4-8 control cache attributes
 207  *          if 0, the system bus defaults are used
 208  *      2 - Writable
 209  *      1 - Readable
 210  *      0 - 1 if Page @ Page address is valid
 211  */
 212 #define RK_PTE_PAGE_ADDRESS_MASK  0xfffff000
 213 #define RK_PTE_PAGE_FLAGS_MASK    0x000001fe
 214 #define RK_PTE_PAGE_WRITABLE      BIT(2)
 215 #define RK_PTE_PAGE_READABLE      BIT(1)
 216 #define RK_PTE_PAGE_VALID         BIT(0)
 217 
 218 static inline phys_addr_t rk_pte_page_address(u32 pte)
 219 {
 220         return (phys_addr_t)pte & RK_PTE_PAGE_ADDRESS_MASK;
 221 }
 222 
 223 static inline bool rk_pte_is_page_valid(u32 pte)
 224 {
 225         return pte & RK_PTE_PAGE_VALID;
 226 }
 227 
 228 /* TODO: set cache flags per prot IOMMU_CACHE */
 229 static u32 rk_mk_pte(phys_addr_t page, int prot)
 230 {
 231         u32 flags = 0;
 232         flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE : 0;
 233         flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE : 0;
 234         page &= RK_PTE_PAGE_ADDRESS_MASK;
 235         return page | flags | RK_PTE_PAGE_VALID;
 236 }
 237 
 238 static u32 rk_mk_pte_invalid(u32 pte)
 239 {
 240         return pte & ~RK_PTE_PAGE_VALID;
 241 }
 242 
 243 /*
 244  * rk3288 iova (IOMMU Virtual Address) format
 245  *  31       22.21       12.11          0
 246  * +-----------+-----------+-------------+
 247  * | DTE index | PTE index | Page offset |
 248  * +-----------+-----------+-------------+
 249  *  31:22 - DTE index   - index of DTE in DT
 250  *  21:12 - PTE index   - index of PTE in PT @ DTE.pt_address
 251  *  11: 0 - Page offset - offset into page @ PTE.page_address
 252  */
 253 #define RK_IOVA_DTE_MASK    0xffc00000
 254 #define RK_IOVA_DTE_SHIFT   22
 255 #define RK_IOVA_PTE_MASK    0x003ff000
 256 #define RK_IOVA_PTE_SHIFT   12
 257 #define RK_IOVA_PAGE_MASK   0x00000fff
 258 #define RK_IOVA_PAGE_SHIFT  0
 259 
 260 static u32 rk_iova_dte_index(dma_addr_t iova)
 261 {
 262         return (u32)(iova & RK_IOVA_DTE_MASK) >> RK_IOVA_DTE_SHIFT;
 263 }
 264 
 265 static u32 rk_iova_pte_index(dma_addr_t iova)
 266 {
 267         return (u32)(iova & RK_IOVA_PTE_MASK) >> RK_IOVA_PTE_SHIFT;
 268 }
 269 
 270 static u32 rk_iova_page_offset(dma_addr_t iova)
 271 {
 272         return (u32)(iova & RK_IOVA_PAGE_MASK) >> RK_IOVA_PAGE_SHIFT;
 273 }
 274 
 275 static u32 rk_iommu_read(void __iomem *base, u32 offset)
 276 {
 277         return readl(base + offset);
 278 }
 279 
 280 static void rk_iommu_write(void __iomem *base, u32 offset, u32 value)
 281 {
 282         writel(value, base + offset);
 283 }
 284 
 285 static void rk_iommu_command(struct rk_iommu *iommu, u32 command)
 286 {
 287         int i;
 288 
 289         for (i = 0; i < iommu->num_mmu; i++)
 290                 writel(command, iommu->bases[i] + RK_MMU_COMMAND);
 291 }
 292 
 293 static void rk_iommu_base_command(void __iomem *base, u32 command)
 294 {
 295         writel(command, base + RK_MMU_COMMAND);
 296 }
 297 static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova_start,
 298                                size_t size)
 299 {
 300         int i;
 301         dma_addr_t iova_end = iova_start + size;
 302         /*
 303          * TODO(djkurtz): Figure out when it is more efficient to shootdown the
 304          * entire iotlb rather than iterate over individual iovas.
 305          */
 306         for (i = 0; i < iommu->num_mmu; i++) {
 307                 dma_addr_t iova;
 308 
 309                 for (iova = iova_start; iova < iova_end; iova += SPAGE_SIZE)
 310                         rk_iommu_write(iommu->bases[i], RK_MMU_ZAP_ONE_LINE, iova);
 311         }
 312 }
 313 
 314 static bool rk_iommu_is_stall_active(struct rk_iommu *iommu)
 315 {
 316         bool active = true;
 317         int i;
 318 
 319         for (i = 0; i < iommu->num_mmu; i++)
 320                 active &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
 321                                            RK_MMU_STATUS_STALL_ACTIVE);
 322 
 323         return active;
 324 }
 325 
 326 static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu)
 327 {
 328         bool enable = true;
 329         int i;
 330 
 331         for (i = 0; i < iommu->num_mmu; i++)
 332                 enable &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
 333                                            RK_MMU_STATUS_PAGING_ENABLED);
 334 
 335         return enable;
 336 }
 337 
 338 static bool rk_iommu_is_reset_done(struct rk_iommu *iommu)
 339 {
 340         bool done = true;
 341         int i;
 342 
 343         for (i = 0; i < iommu->num_mmu; i++)
 344                 done &= rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR) == 0;
 345 
 346         return done;
 347 }
 348 
 349 static int rk_iommu_enable_stall(struct rk_iommu *iommu)
 350 {
 351         int ret, i;
 352         bool val;
 353 
 354         if (rk_iommu_is_stall_active(iommu))
 355                 return 0;
 356 
 357         /* Stall can only be enabled if paging is enabled */
 358         if (!rk_iommu_is_paging_enabled(iommu))
 359                 return 0;
 360 
 361         rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_STALL);
 362 
 363         ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val,
 364                                  val, RK_MMU_POLL_PERIOD_US,
 365                                  RK_MMU_POLL_TIMEOUT_US);
 366         if (ret)
 367                 for (i = 0; i < iommu->num_mmu; i++)
 368                         dev_err(iommu->dev, "Enable stall request timed out, status: %#08x\n",
 369                                 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
 370 
 371         return ret;
 372 }
 373 
 374 static int rk_iommu_disable_stall(struct rk_iommu *iommu)
 375 {
 376         int ret, i;
 377         bool val;
 378 
 379         if (!rk_iommu_is_stall_active(iommu))
 380                 return 0;
 381 
 382         rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_STALL);
 383 
 384         ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val,
 385                                  !val, RK_MMU_POLL_PERIOD_US,
 386                                  RK_MMU_POLL_TIMEOUT_US);
 387         if (ret)
 388                 for (i = 0; i < iommu->num_mmu; i++)
 389                         dev_err(iommu->dev, "Disable stall request timed out, status: %#08x\n",
 390                                 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
 391 
 392         return ret;
 393 }
 394 
 395 static int rk_iommu_enable_paging(struct rk_iommu *iommu)
 396 {
 397         int ret, i;
 398         bool val;
 399 
 400         if (rk_iommu_is_paging_enabled(iommu))
 401                 return 0;
 402 
 403         rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_PAGING);
 404 
 405         ret = readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val,
 406                                  val, RK_MMU_POLL_PERIOD_US,
 407                                  RK_MMU_POLL_TIMEOUT_US);
 408         if (ret)
 409                 for (i = 0; i < iommu->num_mmu; i++)
 410                         dev_err(iommu->dev, "Enable paging request timed out, status: %#08x\n",
 411                                 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
 412 
 413         return ret;
 414 }
 415 
 416 static int rk_iommu_disable_paging(struct rk_iommu *iommu)
 417 {
 418         int ret, i;
 419         bool val;
 420 
 421         if (!rk_iommu_is_paging_enabled(iommu))
 422                 return 0;
 423 
 424         rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_PAGING);
 425 
 426         ret = readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val,
 427                                  !val, RK_MMU_POLL_PERIOD_US,
 428                                  RK_MMU_POLL_TIMEOUT_US);
 429         if (ret)
 430                 for (i = 0; i < iommu->num_mmu; i++)
 431                         dev_err(iommu->dev, "Disable paging request timed out, status: %#08x\n",
 432                                 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
 433 
 434         return ret;
 435 }
 436 
 437 static int rk_iommu_force_reset(struct rk_iommu *iommu)
 438 {
 439         int ret, i;
 440         u32 dte_addr;
 441         bool val;
 442 
 443         if (iommu->reset_disabled)
 444                 return 0;
 445 
 446         /*
 447          * Check if register DTE_ADDR is working by writing DTE_ADDR_DUMMY
 448          * and verifying that upper 5 nybbles are read back.
 449          */
 450         for (i = 0; i < iommu->num_mmu; i++) {
 451                 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, DTE_ADDR_DUMMY);
 452 
 453                 dte_addr = rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR);
 454                 if (dte_addr != (DTE_ADDR_DUMMY & RK_DTE_PT_ADDRESS_MASK)) {
 455                         dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n");
 456                         return -EFAULT;
 457                 }
 458         }
 459 
 460         rk_iommu_command(iommu, RK_MMU_CMD_FORCE_RESET);
 461 
 462         ret = readx_poll_timeout(rk_iommu_is_reset_done, iommu, val,
 463                                  val, RK_MMU_FORCE_RESET_TIMEOUT_US,
 464                                  RK_MMU_POLL_TIMEOUT_US);
 465         if (ret) {
 466                 dev_err(iommu->dev, "FORCE_RESET command timed out\n");
 467                 return ret;
 468         }
 469 
 470         return 0;
 471 }
 472 
 473 static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova)
 474 {
 475         void __iomem *base = iommu->bases[index];
 476         u32 dte_index, pte_index, page_offset;
 477         u32 mmu_dte_addr;
 478         phys_addr_t mmu_dte_addr_phys, dte_addr_phys;
 479         u32 *dte_addr;
 480         u32 dte;
 481         phys_addr_t pte_addr_phys = 0;
 482         u32 *pte_addr = NULL;
 483         u32 pte = 0;
 484         phys_addr_t page_addr_phys = 0;
 485         u32 page_flags = 0;
 486 
 487         dte_index = rk_iova_dte_index(iova);
 488         pte_index = rk_iova_pte_index(iova);
 489         page_offset = rk_iova_page_offset(iova);
 490 
 491         mmu_dte_addr = rk_iommu_read(base, RK_MMU_DTE_ADDR);
 492         mmu_dte_addr_phys = (phys_addr_t)mmu_dte_addr;
 493 
 494         dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index);
 495         dte_addr = phys_to_virt(dte_addr_phys);
 496         dte = *dte_addr;
 497 
 498         if (!rk_dte_is_pt_valid(dte))
 499                 goto print_it;
 500 
 501         pte_addr_phys = rk_dte_pt_address(dte) + (pte_index * 4);
 502         pte_addr = phys_to_virt(pte_addr_phys);
 503         pte = *pte_addr;
 504 
 505         if (!rk_pte_is_page_valid(pte))
 506                 goto print_it;
 507 
 508         page_addr_phys = rk_pte_page_address(pte) + page_offset;
 509         page_flags = pte & RK_PTE_PAGE_FLAGS_MASK;
 510 
 511 print_it:
 512         dev_err(iommu->dev, "iova = %pad: dte_index: %#03x pte_index: %#03x page_offset: %#03x\n",
 513                 &iova, dte_index, pte_index, page_offset);
 514         dev_err(iommu->dev, "mmu_dte_addr: %pa dte@%pa: %#08x valid: %u pte@%pa: %#08x valid: %u page@%pa flags: %#03x\n",
 515                 &mmu_dte_addr_phys, &dte_addr_phys, dte,
 516                 rk_dte_is_pt_valid(dte), &pte_addr_phys, pte,
 517                 rk_pte_is_page_valid(pte), &page_addr_phys, page_flags);
 518 }
 519 
 520 static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
 521 {
 522         struct rk_iommu *iommu = dev_id;
 523         u32 status;
 524         u32 int_status;
 525         dma_addr_t iova;
 526         irqreturn_t ret = IRQ_NONE;
 527         int i, err;
 528 
 529         err = pm_runtime_get_if_in_use(iommu->dev);
 530         if (WARN_ON_ONCE(err <= 0))
 531                 return ret;
 532 
 533         if (WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks)))
 534                 goto out;
 535 
 536         for (i = 0; i < iommu->num_mmu; i++) {
 537                 int_status = rk_iommu_read(iommu->bases[i], RK_MMU_INT_STATUS);
 538                 if (int_status == 0)
 539                         continue;
 540 
 541                 ret = IRQ_HANDLED;
 542                 iova = rk_iommu_read(iommu->bases[i], RK_MMU_PAGE_FAULT_ADDR);
 543 
 544                 if (int_status & RK_MMU_IRQ_PAGE_FAULT) {
 545                         int flags;
 546 
 547                         status = rk_iommu_read(iommu->bases[i], RK_MMU_STATUS);
 548                         flags = (status & RK_MMU_STATUS_PAGE_FAULT_IS_WRITE) ?
 549                                         IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
 550 
 551                         dev_err(iommu->dev, "Page fault at %pad of type %s\n",
 552                                 &iova,
 553                                 (flags == IOMMU_FAULT_WRITE) ? "write" : "read");
 554 
 555                         log_iova(iommu, i, iova);
 556 
 557                         /*
 558                          * Report page fault to any installed handlers.
 559                          * Ignore the return code, though, since we always zap cache
 560                          * and clear the page fault anyway.
 561                          */
 562                         if (iommu->domain)
 563                                 report_iommu_fault(iommu->domain, iommu->dev, iova,
 564                                                    flags);
 565                         else
 566                                 dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n");
 567 
 568                         rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
 569                         rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_PAGE_FAULT_DONE);
 570                 }
 571 
 572                 if (int_status & RK_MMU_IRQ_BUS_ERROR)
 573                         dev_err(iommu->dev, "BUS_ERROR occurred at %pad\n", &iova);
 574 
 575                 if (int_status & ~RK_MMU_IRQ_MASK)
 576                         dev_err(iommu->dev, "unexpected int_status: %#08x\n",
 577                                 int_status);
 578 
 579                 rk_iommu_write(iommu->bases[i], RK_MMU_INT_CLEAR, int_status);
 580         }
 581 
 582         clk_bulk_disable(iommu->num_clocks, iommu->clocks);
 583 
 584 out:
 585         pm_runtime_put(iommu->dev);
 586         return ret;
 587 }
 588 
 589 static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain,
 590                                          dma_addr_t iova)
 591 {
 592         struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
 593         unsigned long flags;
 594         phys_addr_t pt_phys, phys = 0;
 595         u32 dte, pte;
 596         u32 *page_table;
 597 
 598         spin_lock_irqsave(&rk_domain->dt_lock, flags);
 599 
 600         dte = rk_domain->dt[rk_iova_dte_index(iova)];
 601         if (!rk_dte_is_pt_valid(dte))
 602                 goto out;
 603 
 604         pt_phys = rk_dte_pt_address(dte);
 605         page_table = (u32 *)phys_to_virt(pt_phys);
 606         pte = page_table[rk_iova_pte_index(iova)];
 607         if (!rk_pte_is_page_valid(pte))
 608                 goto out;
 609 
 610         phys = rk_pte_page_address(pte) + rk_iova_page_offset(iova);
 611 out:
 612         spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
 613 
 614         return phys;
 615 }
 616 
 617 static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain,
 618                               dma_addr_t iova, size_t size)
 619 {
 620         struct list_head *pos;
 621         unsigned long flags;
 622 
 623         /* shootdown these iova from all iommus using this domain */
 624         spin_lock_irqsave(&rk_domain->iommus_lock, flags);
 625         list_for_each(pos, &rk_domain->iommus) {
 626                 struct rk_iommu *iommu;
 627                 int ret;
 628 
 629                 iommu = list_entry(pos, struct rk_iommu, node);
 630 
 631                 /* Only zap TLBs of IOMMUs that are powered on. */
 632                 ret = pm_runtime_get_if_in_use(iommu->dev);
 633                 if (WARN_ON_ONCE(ret < 0))
 634                         continue;
 635                 if (ret) {
 636                         WARN_ON(clk_bulk_enable(iommu->num_clocks,
 637                                                 iommu->clocks));
 638                         rk_iommu_zap_lines(iommu, iova, size);
 639                         clk_bulk_disable(iommu->num_clocks, iommu->clocks);
 640                         pm_runtime_put(iommu->dev);
 641                 }
 642         }
 643         spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
 644 }
 645 
 646 static void rk_iommu_zap_iova_first_last(struct rk_iommu_domain *rk_domain,
 647                                          dma_addr_t iova, size_t size)
 648 {
 649         rk_iommu_zap_iova(rk_domain, iova, SPAGE_SIZE);
 650         if (size > SPAGE_SIZE)
 651                 rk_iommu_zap_iova(rk_domain, iova + size - SPAGE_SIZE,
 652                                         SPAGE_SIZE);
 653 }
 654 
 655 static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
 656                                   dma_addr_t iova)
 657 {
 658         u32 *page_table, *dte_addr;
 659         u32 dte_index, dte;
 660         phys_addr_t pt_phys;
 661         dma_addr_t pt_dma;
 662 
 663         assert_spin_locked(&rk_domain->dt_lock);
 664 
 665         dte_index = rk_iova_dte_index(iova);
 666         dte_addr = &rk_domain->dt[dte_index];
 667         dte = *dte_addr;
 668         if (rk_dte_is_pt_valid(dte))
 669                 goto done;
 670 
 671         page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
 672         if (!page_table)
 673                 return ERR_PTR(-ENOMEM);
 674 
 675         pt_dma = dma_map_single(dma_dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE);
 676         if (dma_mapping_error(dma_dev, pt_dma)) {
 677                 dev_err(dma_dev, "DMA mapping error while allocating page table\n");
 678                 free_page((unsigned long)page_table);
 679                 return ERR_PTR(-ENOMEM);
 680         }
 681 
 682         dte = rk_mk_dte(pt_dma);
 683         *dte_addr = dte;
 684 
 685         rk_table_flush(rk_domain, pt_dma, NUM_PT_ENTRIES);
 686         rk_table_flush(rk_domain,
 687                        rk_domain->dt_dma + dte_index * sizeof(u32), 1);
 688 done:
 689         pt_phys = rk_dte_pt_address(dte);
 690         return (u32 *)phys_to_virt(pt_phys);
 691 }
 692 
 693 static size_t rk_iommu_unmap_iova(struct rk_iommu_domain *rk_domain,
 694                                   u32 *pte_addr, dma_addr_t pte_dma,
 695                                   size_t size)
 696 {
 697         unsigned int pte_count;
 698         unsigned int pte_total = size / SPAGE_SIZE;
 699 
 700         assert_spin_locked(&rk_domain->dt_lock);
 701 
 702         for (pte_count = 0; pte_count < pte_total; pte_count++) {
 703                 u32 pte = pte_addr[pte_count];
 704                 if (!rk_pte_is_page_valid(pte))
 705                         break;
 706 
 707                 pte_addr[pte_count] = rk_mk_pte_invalid(pte);
 708         }
 709 
 710         rk_table_flush(rk_domain, pte_dma, pte_count);
 711 
 712         return pte_count * SPAGE_SIZE;
 713 }
 714 
 715 static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr,
 716                              dma_addr_t pte_dma, dma_addr_t iova,
 717                              phys_addr_t paddr, size_t size, int prot)
 718 {
 719         unsigned int pte_count;
 720         unsigned int pte_total = size / SPAGE_SIZE;
 721         phys_addr_t page_phys;
 722 
 723         assert_spin_locked(&rk_domain->dt_lock);
 724 
 725         for (pte_count = 0; pte_count < pte_total; pte_count++) {
 726                 u32 pte = pte_addr[pte_count];
 727 
 728                 if (rk_pte_is_page_valid(pte))
 729                         goto unwind;
 730 
 731                 pte_addr[pte_count] = rk_mk_pte(paddr, prot);
 732 
 733                 paddr += SPAGE_SIZE;
 734         }
 735 
 736         rk_table_flush(rk_domain, pte_dma, pte_total);
 737 
 738         /*
 739          * Zap the first and last iova to evict from iotlb any previously
 740          * mapped cachelines holding stale values for its dte and pte.
 741          * We only zap the first and last iova, since only they could have
 742          * dte or pte shared with an existing mapping.
 743          */
 744         rk_iommu_zap_iova_first_last(rk_domain, iova, size);
 745 
 746         return 0;
 747 unwind:
 748         /* Unmap the range of iovas that we just mapped */
 749         rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma,
 750                             pte_count * SPAGE_SIZE);
 751 
 752         iova += pte_count * SPAGE_SIZE;
 753         page_phys = rk_pte_page_address(pte_addr[pte_count]);
 754         pr_err("iova: %pad already mapped to %pa cannot remap to phys: %pa prot: %#x\n",
 755                &iova, &page_phys, &paddr, prot);
 756 
 757         return -EADDRINUSE;
 758 }
 759 
 760 static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
 761                         phys_addr_t paddr, size_t size, int prot)
 762 {
 763         struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
 764         unsigned long flags;
 765         dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
 766         u32 *page_table, *pte_addr;
 767         u32 dte_index, pte_index;
 768         int ret;
 769 
 770         spin_lock_irqsave(&rk_domain->dt_lock, flags);
 771 
 772         /*
 773          * pgsize_bitmap specifies iova sizes that fit in one page table
 774          * (1024 4-KiB pages = 4 MiB).
 775          * So, size will always be 4096 <= size <= 4194304.
 776          * Since iommu_map() guarantees that both iova and size will be
 777          * aligned, we will always only be mapping from a single dte here.
 778          */
 779         page_table = rk_dte_get_page_table(rk_domain, iova);
 780         if (IS_ERR(page_table)) {
 781                 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
 782                 return PTR_ERR(page_table);
 783         }
 784 
 785         dte_index = rk_domain->dt[rk_iova_dte_index(iova)];
 786         pte_index = rk_iova_pte_index(iova);
 787         pte_addr = &page_table[pte_index];
 788         pte_dma = rk_dte_pt_address(dte_index) + pte_index * sizeof(u32);
 789         ret = rk_iommu_map_iova(rk_domain, pte_addr, pte_dma, iova,
 790                                 paddr, size, prot);
 791 
 792         spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
 793 
 794         return ret;
 795 }
 796 
 797 static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
 798                              size_t size, struct iommu_iotlb_gather *gather)
 799 {
 800         struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
 801         unsigned long flags;
 802         dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
 803         phys_addr_t pt_phys;
 804         u32 dte;
 805         u32 *pte_addr;
 806         size_t unmap_size;
 807 
 808         spin_lock_irqsave(&rk_domain->dt_lock, flags);
 809 
 810         /*
 811          * pgsize_bitmap specifies iova sizes that fit in one page table
 812          * (1024 4-KiB pages = 4 MiB).
 813          * So, size will always be 4096 <= size <= 4194304.
 814          * Since iommu_unmap() guarantees that both iova and size will be
 815          * aligned, we will always only be unmapping from a single dte here.
 816          */
 817         dte = rk_domain->dt[rk_iova_dte_index(iova)];
 818         /* Just return 0 if iova is unmapped */
 819         if (!rk_dte_is_pt_valid(dte)) {
 820                 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
 821                 return 0;
 822         }
 823 
 824         pt_phys = rk_dte_pt_address(dte);
 825         pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova);
 826         pte_dma = pt_phys + rk_iova_pte_index(iova) * sizeof(u32);
 827         unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, size);
 828 
 829         spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
 830 
 831         /* Shootdown iotlb entries for iova range that was just unmapped */
 832         rk_iommu_zap_iova(rk_domain, iova, unmap_size);
 833 
 834         return unmap_size;
 835 }
 836 
 837 static struct rk_iommu *rk_iommu_from_dev(struct device *dev)
 838 {
 839         struct rk_iommudata *data = dev->archdata.iommu;
 840 
 841         return data ? data->iommu : NULL;
 842 }
 843 
 844 /* Must be called with iommu powered on and attached */
 845 static void rk_iommu_disable(struct rk_iommu *iommu)
 846 {
 847         int i;
 848 
 849         /* Ignore error while disabling, just keep going */
 850         WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks));
 851         rk_iommu_enable_stall(iommu);
 852         rk_iommu_disable_paging(iommu);
 853         for (i = 0; i < iommu->num_mmu; i++) {
 854                 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0);
 855                 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 0);
 856         }
 857         rk_iommu_disable_stall(iommu);
 858         clk_bulk_disable(iommu->num_clocks, iommu->clocks);
 859 }
 860 
 861 /* Must be called with iommu powered on and attached */
 862 static int rk_iommu_enable(struct rk_iommu *iommu)
 863 {
 864         struct iommu_domain *domain = iommu->domain;
 865         struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
 866         int ret, i;
 867 
 868         ret = clk_bulk_enable(iommu->num_clocks, iommu->clocks);
 869         if (ret)
 870                 return ret;
 871 
 872         ret = rk_iommu_enable_stall(iommu);
 873         if (ret)
 874                 goto out_disable_clocks;
 875 
 876         ret = rk_iommu_force_reset(iommu);
 877         if (ret)
 878                 goto out_disable_stall;
 879 
 880         for (i = 0; i < iommu->num_mmu; i++) {
 881                 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR,
 882                                rk_domain->dt_dma);
 883                 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
 884                 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
 885         }
 886 
 887         ret = rk_iommu_enable_paging(iommu);
 888 
 889 out_disable_stall:
 890         rk_iommu_disable_stall(iommu);
 891 out_disable_clocks:
 892         clk_bulk_disable(iommu->num_clocks, iommu->clocks);
 893         return ret;
 894 }
 895 
 896 static void rk_iommu_detach_device(struct iommu_domain *domain,
 897                                    struct device *dev)
 898 {
 899         struct rk_iommu *iommu;
 900         struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
 901         unsigned long flags;
 902         int ret;
 903 
 904         /* Allow 'virtual devices' (eg drm) to detach from domain */
 905         iommu = rk_iommu_from_dev(dev);
 906         if (!iommu)
 907                 return;
 908 
 909         dev_dbg(dev, "Detaching from iommu domain\n");
 910 
 911         /* iommu already detached */
 912         if (iommu->domain != domain)
 913                 return;
 914 
 915         iommu->domain = NULL;
 916 
 917         spin_lock_irqsave(&rk_domain->iommus_lock, flags);
 918         list_del_init(&iommu->node);
 919         spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
 920 
 921         ret = pm_runtime_get_if_in_use(iommu->dev);
 922         WARN_ON_ONCE(ret < 0);
 923         if (ret > 0) {
 924                 rk_iommu_disable(iommu);
 925                 pm_runtime_put(iommu->dev);
 926         }
 927 }
 928 
 929 static int rk_iommu_attach_device(struct iommu_domain *domain,
 930                 struct device *dev)
 931 {
 932         struct rk_iommu *iommu;
 933         struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
 934         unsigned long flags;
 935         int ret;
 936 
 937         /*
 938          * Allow 'virtual devices' (e.g., drm) to attach to domain.
 939          * Such a device does not belong to an iommu group.
 940          */
 941         iommu = rk_iommu_from_dev(dev);
 942         if (!iommu)
 943                 return 0;
 944 
 945         dev_dbg(dev, "Attaching to iommu domain\n");
 946 
 947         /* iommu already attached */
 948         if (iommu->domain == domain)
 949                 return 0;
 950 
 951         if (iommu->domain)
 952                 rk_iommu_detach_device(iommu->domain, dev);
 953 
 954         iommu->domain = domain;
 955 
 956         spin_lock_irqsave(&rk_domain->iommus_lock, flags);
 957         list_add_tail(&iommu->node, &rk_domain->iommus);
 958         spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
 959 
 960         ret = pm_runtime_get_if_in_use(iommu->dev);
 961         if (!ret || WARN_ON_ONCE(ret < 0))
 962                 return 0;
 963 
 964         ret = rk_iommu_enable(iommu);
 965         if (ret)
 966                 rk_iommu_detach_device(iommu->domain, dev);
 967 
 968         pm_runtime_put(iommu->dev);
 969 
 970         return ret;
 971 }
 972 
 973 static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
 974 {
 975         struct rk_iommu_domain *rk_domain;
 976 
 977         if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
 978                 return NULL;
 979 
 980         if (!dma_dev)
 981                 return NULL;
 982 
 983         rk_domain = kzalloc(sizeof(*rk_domain), GFP_KERNEL);
 984         if (!rk_domain)
 985                 return NULL;
 986 
 987         if (type == IOMMU_DOMAIN_DMA &&
 988             iommu_get_dma_cookie(&rk_domain->domain))
 989                 goto err_free_domain;
 990 
 991         /*
 992          * rk32xx iommus use a 2 level pagetable.
 993          * Each level1 (dt) and level2 (pt) table has 1024 4-byte entries.
 994          * Allocate one 4 KiB page for each table.
 995          */
 996         rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | GFP_DMA32);
 997         if (!rk_domain->dt)
 998                 goto err_put_cookie;
 999 
1000         rk_domain->dt_dma = dma_map_single(dma_dev, rk_domain->dt,
1001                                            SPAGE_SIZE, DMA_TO_DEVICE);
1002         if (dma_mapping_error(dma_dev, rk_domain->dt_dma)) {
1003                 dev_err(dma_dev, "DMA map error for DT\n");
1004                 goto err_free_dt;
1005         }
1006 
1007         rk_table_flush(rk_domain, rk_domain->dt_dma, NUM_DT_ENTRIES);
1008 
1009         spin_lock_init(&rk_domain->iommus_lock);
1010         spin_lock_init(&rk_domain->dt_lock);
1011         INIT_LIST_HEAD(&rk_domain->iommus);
1012 
1013         rk_domain->domain.geometry.aperture_start = 0;
1014         rk_domain->domain.geometry.aperture_end   = DMA_BIT_MASK(32);
1015         rk_domain->domain.geometry.force_aperture = true;
1016 
1017         return &rk_domain->domain;
1018 
1019 err_free_dt:
1020         free_page((unsigned long)rk_domain->dt);
1021 err_put_cookie:
1022         if (type == IOMMU_DOMAIN_DMA)
1023                 iommu_put_dma_cookie(&rk_domain->domain);
1024 err_free_domain:
1025         kfree(rk_domain);
1026 
1027         return NULL;
1028 }
1029 
1030 static void rk_iommu_domain_free(struct iommu_domain *domain)
1031 {
1032         struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
1033         int i;
1034 
1035         WARN_ON(!list_empty(&rk_domain->iommus));
1036 
1037         for (i = 0; i < NUM_DT_ENTRIES; i++) {
1038                 u32 dte = rk_domain->dt[i];
1039                 if (rk_dte_is_pt_valid(dte)) {
1040                         phys_addr_t pt_phys = rk_dte_pt_address(dte);
1041                         u32 *page_table = phys_to_virt(pt_phys);
1042                         dma_unmap_single(dma_dev, pt_phys,
1043                                          SPAGE_SIZE, DMA_TO_DEVICE);
1044                         free_page((unsigned long)page_table);
1045                 }
1046         }
1047 
1048         dma_unmap_single(dma_dev, rk_domain->dt_dma,
1049                          SPAGE_SIZE, DMA_TO_DEVICE);
1050         free_page((unsigned long)rk_domain->dt);
1051 
1052         if (domain->type == IOMMU_DOMAIN_DMA)
1053                 iommu_put_dma_cookie(&rk_domain->domain);
1054         kfree(rk_domain);
1055 }
1056 
1057 static int rk_iommu_add_device(struct device *dev)
1058 {
1059         struct iommu_group *group;
1060         struct rk_iommu *iommu;
1061         struct rk_iommudata *data;
1062 
1063         data = dev->archdata.iommu;
1064         if (!data)
1065                 return -ENODEV;
1066 
1067         iommu = rk_iommu_from_dev(dev);
1068 
1069         group = iommu_group_get_for_dev(dev);
1070         if (IS_ERR(group))
1071                 return PTR_ERR(group);
1072         iommu_group_put(group);
1073 
1074         iommu_device_link(&iommu->iommu, dev);
1075         data->link = device_link_add(dev, iommu->dev,
1076                                      DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME);
1077 
1078         return 0;
1079 }
1080 
1081 static void rk_iommu_remove_device(struct device *dev)
1082 {
1083         struct rk_iommu *iommu;
1084         struct rk_iommudata *data = dev->archdata.iommu;
1085 
1086         iommu = rk_iommu_from_dev(dev);
1087 
1088         device_link_del(data->link);
1089         iommu_device_unlink(&iommu->iommu, dev);
1090         iommu_group_remove_device(dev);
1091 }
1092 
1093 static struct iommu_group *rk_iommu_device_group(struct device *dev)
1094 {
1095         struct rk_iommu *iommu;
1096 
1097         iommu = rk_iommu_from_dev(dev);
1098 
1099         return iommu_group_ref_get(iommu->group);
1100 }
1101 
1102 static int rk_iommu_of_xlate(struct device *dev,
1103                              struct of_phandle_args *args)
1104 {
1105         struct platform_device *iommu_dev;
1106         struct rk_iommudata *data;
1107 
1108         data = devm_kzalloc(dma_dev, sizeof(*data), GFP_KERNEL);
1109         if (!data)
1110                 return -ENOMEM;
1111 
1112         iommu_dev = of_find_device_by_node(args->np);
1113 
1114         data->iommu = platform_get_drvdata(iommu_dev);
1115         dev->archdata.iommu = data;
1116 
1117         platform_device_put(iommu_dev);
1118 
1119         return 0;
1120 }
1121 
1122 static const struct iommu_ops rk_iommu_ops = {
1123         .domain_alloc = rk_iommu_domain_alloc,
1124         .domain_free = rk_iommu_domain_free,
1125         .attach_dev = rk_iommu_attach_device,
1126         .detach_dev = rk_iommu_detach_device,
1127         .map = rk_iommu_map,
1128         .unmap = rk_iommu_unmap,
1129         .add_device = rk_iommu_add_device,
1130         .remove_device = rk_iommu_remove_device,
1131         .iova_to_phys = rk_iommu_iova_to_phys,
1132         .device_group = rk_iommu_device_group,
1133         .pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP,
1134         .of_xlate = rk_iommu_of_xlate,
1135 };
1136 
1137 static int rk_iommu_probe(struct platform_device *pdev)
1138 {
1139         struct device *dev = &pdev->dev;
1140         struct rk_iommu *iommu;
1141         struct resource *res;
1142         int num_res = pdev->num_resources;
1143         int err, i;
1144 
1145         iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL);
1146         if (!iommu)
1147                 return -ENOMEM;
1148 
1149         platform_set_drvdata(pdev, iommu);
1150         iommu->dev = dev;
1151         iommu->num_mmu = 0;
1152 
1153         iommu->bases = devm_kcalloc(dev, num_res, sizeof(*iommu->bases),
1154                                     GFP_KERNEL);
1155         if (!iommu->bases)
1156                 return -ENOMEM;
1157 
1158         for (i = 0; i < num_res; i++) {
1159                 res = platform_get_resource(pdev, IORESOURCE_MEM, i);
1160                 if (!res)
1161                         continue;
1162                 iommu->bases[i] = devm_ioremap_resource(&pdev->dev, res);
1163                 if (IS_ERR(iommu->bases[i]))
1164                         continue;
1165                 iommu->num_mmu++;
1166         }
1167         if (iommu->num_mmu == 0)
1168                 return PTR_ERR(iommu->bases[0]);
1169 
1170         iommu->num_irq = platform_irq_count(pdev);
1171         if (iommu->num_irq < 0)
1172                 return iommu->num_irq;
1173 
1174         iommu->reset_disabled = device_property_read_bool(dev,
1175                                         "rockchip,disable-mmu-reset");
1176 
1177         iommu->num_clocks = ARRAY_SIZE(rk_iommu_clocks);
1178         iommu->clocks = devm_kcalloc(iommu->dev, iommu->num_clocks,
1179                                      sizeof(*iommu->clocks), GFP_KERNEL);
1180         if (!iommu->clocks)
1181                 return -ENOMEM;
1182 
1183         for (i = 0; i < iommu->num_clocks; ++i)
1184                 iommu->clocks[i].id = rk_iommu_clocks[i];
1185 
1186         /*
1187          * iommu clocks should be present for all new devices and devicetrees
1188          * but there are older devicetrees without clocks out in the wild.
1189          * So clocks as optional for the time being.
1190          */
1191         err = devm_clk_bulk_get(iommu->dev, iommu->num_clocks, iommu->clocks);
1192         if (err == -ENOENT)
1193                 iommu->num_clocks = 0;
1194         else if (err)
1195                 return err;
1196 
1197         err = clk_bulk_prepare(iommu->num_clocks, iommu->clocks);
1198         if (err)
1199                 return err;
1200 
1201         iommu->group = iommu_group_alloc();
1202         if (IS_ERR(iommu->group)) {
1203                 err = PTR_ERR(iommu->group);
1204                 goto err_unprepare_clocks;
1205         }
1206 
1207         err = iommu_device_sysfs_add(&iommu->iommu, dev, NULL, dev_name(dev));
1208         if (err)
1209                 goto err_put_group;
1210 
1211         iommu_device_set_ops(&iommu->iommu, &rk_iommu_ops);
1212         iommu_device_set_fwnode(&iommu->iommu, &dev->of_node->fwnode);
1213 
1214         err = iommu_device_register(&iommu->iommu);
1215         if (err)
1216                 goto err_remove_sysfs;
1217 
1218         /*
1219          * Use the first registered IOMMU device for domain to use with DMA
1220          * API, since a domain might not physically correspond to a single
1221          * IOMMU device..
1222          */
1223         if (!dma_dev)
1224                 dma_dev = &pdev->dev;
1225 
1226         bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
1227 
1228         pm_runtime_enable(dev);
1229 
1230         for (i = 0; i < iommu->num_irq; i++) {
1231                 int irq = platform_get_irq(pdev, i);
1232 
1233                 if (irq < 0)
1234                         return irq;
1235 
1236                 err = devm_request_irq(iommu->dev, irq, rk_iommu_irq,
1237                                        IRQF_SHARED, dev_name(dev), iommu);
1238                 if (err) {
1239                         pm_runtime_disable(dev);
1240                         goto err_remove_sysfs;
1241                 }
1242         }
1243 
1244         return 0;
1245 err_remove_sysfs:
1246         iommu_device_sysfs_remove(&iommu->iommu);
1247 err_put_group:
1248         iommu_group_put(iommu->group);
1249 err_unprepare_clocks:
1250         clk_bulk_unprepare(iommu->num_clocks, iommu->clocks);
1251         return err;
1252 }
1253 
1254 static void rk_iommu_shutdown(struct platform_device *pdev)
1255 {
1256         struct rk_iommu *iommu = platform_get_drvdata(pdev);
1257         int i;
1258 
1259         for (i = 0; i < iommu->num_irq; i++) {
1260                 int irq = platform_get_irq(pdev, i);
1261 
1262                 devm_free_irq(iommu->dev, irq, iommu);
1263         }
1264 
1265         pm_runtime_force_suspend(&pdev->dev);
1266 }
1267 
1268 static int __maybe_unused rk_iommu_suspend(struct device *dev)
1269 {
1270         struct rk_iommu *iommu = dev_get_drvdata(dev);
1271 
1272         if (!iommu->domain)
1273                 return 0;
1274 
1275         rk_iommu_disable(iommu);
1276         return 0;
1277 }
1278 
1279 static int __maybe_unused rk_iommu_resume(struct device *dev)
1280 {
1281         struct rk_iommu *iommu = dev_get_drvdata(dev);
1282 
1283         if (!iommu->domain)
1284                 return 0;
1285 
1286         return rk_iommu_enable(iommu);
1287 }
1288 
1289 static const struct dev_pm_ops rk_iommu_pm_ops = {
1290         SET_RUNTIME_PM_OPS(rk_iommu_suspend, rk_iommu_resume, NULL)
1291         SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1292                                 pm_runtime_force_resume)
1293 };
1294 
1295 static const struct of_device_id rk_iommu_dt_ids[] = {
1296         { .compatible = "rockchip,iommu" },
1297         { /* sentinel */ }
1298 };
1299 
1300 static struct platform_driver rk_iommu_driver = {
1301         .probe = rk_iommu_probe,
1302         .shutdown = rk_iommu_shutdown,
1303         .driver = {
1304                    .name = "rk_iommu",
1305                    .of_match_table = rk_iommu_dt_ids,
1306                    .pm = &rk_iommu_pm_ops,
1307                    .suppress_bind_attrs = true,
1308         },
1309 };
1310 
1311 static int __init rk_iommu_init(void)
1312 {
1313         return platform_driver_register(&rk_iommu_driver);
1314 }
1315 subsys_initcall(rk_iommu_init);

/* [<][>][^][v][top][bottom][index][help] */