root/drivers/iommu/dma-iommu.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. cookie_msi_granule
  2. cookie_alloc
  3. iommu_get_dma_cookie
  4. iommu_get_msi_cookie
  5. iommu_put_dma_cookie
  6. iommu_dma_get_resv_regions
  7. cookie_init_hw_msi_region
  8. iova_reserve_pci_windows
  9. iova_reserve_iommu_regions
  10. iommu_dma_flush_iotlb_all
  11. iommu_dma_init_domain
  12. dma_info_to_prot
  13. iommu_dma_alloc_iova
  14. iommu_dma_free_iova
  15. __iommu_dma_unmap
  16. __iommu_dma_map
  17. __iommu_dma_free_pages
  18. __iommu_dma_alloc_pages
  19. iommu_dma_alloc_remap
  20. __iommu_dma_mmap
  21. iommu_dma_sync_single_for_cpu
  22. iommu_dma_sync_single_for_device
  23. iommu_dma_sync_sg_for_cpu
  24. iommu_dma_sync_sg_for_device
  25. iommu_dma_map_page
  26. iommu_dma_unmap_page
  27. __finalise_sg
  28. __invalidate_sg
  29. iommu_dma_map_sg
  30. iommu_dma_unmap_sg
  31. iommu_dma_map_resource
  32. iommu_dma_unmap_resource
  33. __iommu_dma_free
  34. iommu_dma_free
  35. iommu_dma_alloc_pages
  36. iommu_dma_alloc
  37. iommu_dma_mmap
  38. iommu_dma_get_sgtable
  39. iommu_dma_get_merge_boundary
  40. iommu_setup_dma_ops
  41. iommu_dma_get_msi_page
  42. iommu_dma_prepare_msi
  43. iommu_dma_compose_msi_msg
  44. iommu_dma_init

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * A fairly generic DMA-API to IOMMU-API glue layer.
   4  *
   5  * Copyright (C) 2014-2015 ARM Ltd.
   6  *
   7  * based in part on arch/arm/mm/dma-mapping.c:
   8  * Copyright (C) 2000-2004 Russell King
   9  */
  10 
  11 #include <linux/acpi_iort.h>
  12 #include <linux/device.h>
  13 #include <linux/dma-contiguous.h>
  14 #include <linux/dma-iommu.h>
  15 #include <linux/dma-noncoherent.h>
  16 #include <linux/gfp.h>
  17 #include <linux/huge_mm.h>
  18 #include <linux/iommu.h>
  19 #include <linux/iova.h>
  20 #include <linux/irq.h>
  21 #include <linux/mm.h>
  22 #include <linux/mutex.h>
  23 #include <linux/pci.h>
  24 #include <linux/scatterlist.h>
  25 #include <linux/vmalloc.h>
  26 
  27 struct iommu_dma_msi_page {
  28         struct list_head        list;
  29         dma_addr_t              iova;
  30         phys_addr_t             phys;
  31 };
  32 
  33 enum iommu_dma_cookie_type {
  34         IOMMU_DMA_IOVA_COOKIE,
  35         IOMMU_DMA_MSI_COOKIE,
  36 };
  37 
  38 struct iommu_dma_cookie {
  39         enum iommu_dma_cookie_type      type;
  40         union {
  41                 /* Full allocator for IOMMU_DMA_IOVA_COOKIE */
  42                 struct iova_domain      iovad;
  43                 /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
  44                 dma_addr_t              msi_iova;
  45         };
  46         struct list_head                msi_page_list;
  47 
  48         /* Domain for flush queue callback; NULL if flush queue not in use */
  49         struct iommu_domain             *fq_domain;
  50 };
  51 
  52 static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
  53 {
  54         if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
  55                 return cookie->iovad.granule;
  56         return PAGE_SIZE;
  57 }
  58 
  59 static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
  60 {
  61         struct iommu_dma_cookie *cookie;
  62 
  63         cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
  64         if (cookie) {
  65                 INIT_LIST_HEAD(&cookie->msi_page_list);
  66                 cookie->type = type;
  67         }
  68         return cookie;
  69 }
  70 
  71 /**
  72  * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
  73  * @domain: IOMMU domain to prepare for DMA-API usage
  74  *
  75  * IOMMU drivers should normally call this from their domain_alloc
  76  * callback when domain->type == IOMMU_DOMAIN_DMA.
  77  */
  78 int iommu_get_dma_cookie(struct iommu_domain *domain)
  79 {
  80         if (domain->iova_cookie)
  81                 return -EEXIST;
  82 
  83         domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE);
  84         if (!domain->iova_cookie)
  85                 return -ENOMEM;
  86 
  87         return 0;
  88 }
  89 EXPORT_SYMBOL(iommu_get_dma_cookie);
  90 
  91 /**
  92  * iommu_get_msi_cookie - Acquire just MSI remapping resources
  93  * @domain: IOMMU domain to prepare
  94  * @base: Start address of IOVA region for MSI mappings
  95  *
  96  * Users who manage their own IOVA allocation and do not want DMA API support,
  97  * but would still like to take advantage of automatic MSI remapping, can use
  98  * this to initialise their own domain appropriately. Users should reserve a
  99  * contiguous IOVA region, starting at @base, large enough to accommodate the
 100  * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address
 101  * used by the devices attached to @domain.
 102  */
 103 int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
 104 {
 105         struct iommu_dma_cookie *cookie;
 106 
 107         if (domain->type != IOMMU_DOMAIN_UNMANAGED)
 108                 return -EINVAL;
 109 
 110         if (domain->iova_cookie)
 111                 return -EEXIST;
 112 
 113         cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE);
 114         if (!cookie)
 115                 return -ENOMEM;
 116 
 117         cookie->msi_iova = base;
 118         domain->iova_cookie = cookie;
 119         return 0;
 120 }
 121 EXPORT_SYMBOL(iommu_get_msi_cookie);
 122 
 123 /**
 124  * iommu_put_dma_cookie - Release a domain's DMA mapping resources
 125  * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
 126  *          iommu_get_msi_cookie()
 127  *
 128  * IOMMU drivers should normally call this from their domain_free callback.
 129  */
 130 void iommu_put_dma_cookie(struct iommu_domain *domain)
 131 {
 132         struct iommu_dma_cookie *cookie = domain->iova_cookie;
 133         struct iommu_dma_msi_page *msi, *tmp;
 134 
 135         if (!cookie)
 136                 return;
 137 
 138         if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule)
 139                 put_iova_domain(&cookie->iovad);
 140 
 141         list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) {
 142                 list_del(&msi->list);
 143                 kfree(msi);
 144         }
 145         kfree(cookie);
 146         domain->iova_cookie = NULL;
 147 }
 148 EXPORT_SYMBOL(iommu_put_dma_cookie);
 149 
 150 /**
 151  * iommu_dma_get_resv_regions - Reserved region driver helper
 152  * @dev: Device from iommu_get_resv_regions()
 153  * @list: Reserved region list from iommu_get_resv_regions()
 154  *
 155  * IOMMU drivers can use this to implement their .get_resv_regions callback
 156  * for general non-IOMMU-specific reservations. Currently, this covers GICv3
 157  * ITS region reservation on ACPI based ARM platforms that may require HW MSI
 158  * reservation.
 159  */
 160 void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
 161 {
 162 
 163         if (!is_of_node(dev_iommu_fwspec_get(dev)->iommu_fwnode))
 164                 iort_iommu_msi_get_resv_regions(dev, list);
 165 
 166 }
 167 EXPORT_SYMBOL(iommu_dma_get_resv_regions);
 168 
 169 static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
 170                 phys_addr_t start, phys_addr_t end)
 171 {
 172         struct iova_domain *iovad = &cookie->iovad;
 173         struct iommu_dma_msi_page *msi_page;
 174         int i, num_pages;
 175 
 176         start -= iova_offset(iovad, start);
 177         num_pages = iova_align(iovad, end - start) >> iova_shift(iovad);
 178 
 179         for (i = 0; i < num_pages; i++) {
 180                 msi_page = kmalloc(sizeof(*msi_page), GFP_KERNEL);
 181                 if (!msi_page)
 182                         return -ENOMEM;
 183 
 184                 msi_page->phys = start;
 185                 msi_page->iova = start;
 186                 INIT_LIST_HEAD(&msi_page->list);
 187                 list_add(&msi_page->list, &cookie->msi_page_list);
 188                 start += iovad->granule;
 189         }
 190 
 191         return 0;
 192 }
 193 
 194 static int iova_reserve_pci_windows(struct pci_dev *dev,
 195                 struct iova_domain *iovad)
 196 {
 197         struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
 198         struct resource_entry *window;
 199         unsigned long lo, hi;
 200         phys_addr_t start = 0, end;
 201 
 202         resource_list_for_each_entry(window, &bridge->windows) {
 203                 if (resource_type(window->res) != IORESOURCE_MEM)
 204                         continue;
 205 
 206                 lo = iova_pfn(iovad, window->res->start - window->offset);
 207                 hi = iova_pfn(iovad, window->res->end - window->offset);
 208                 reserve_iova(iovad, lo, hi);
 209         }
 210 
 211         /* Get reserved DMA windows from host bridge */
 212         resource_list_for_each_entry(window, &bridge->dma_ranges) {
 213                 end = window->res->start - window->offset;
 214 resv_iova:
 215                 if (end > start) {
 216                         lo = iova_pfn(iovad, start);
 217                         hi = iova_pfn(iovad, end);
 218                         reserve_iova(iovad, lo, hi);
 219                 } else {
 220                         /* dma_ranges list should be sorted */
 221                         dev_err(&dev->dev, "Failed to reserve IOVA\n");
 222                         return -EINVAL;
 223                 }
 224 
 225                 start = window->res->end - window->offset + 1;
 226                 /* If window is last entry */
 227                 if (window->node.next == &bridge->dma_ranges &&
 228                     end != ~(phys_addr_t)0) {
 229                         end = ~(phys_addr_t)0;
 230                         goto resv_iova;
 231                 }
 232         }
 233 
 234         return 0;
 235 }
 236 
 237 static int iova_reserve_iommu_regions(struct device *dev,
 238                 struct iommu_domain *domain)
 239 {
 240         struct iommu_dma_cookie *cookie = domain->iova_cookie;
 241         struct iova_domain *iovad = &cookie->iovad;
 242         struct iommu_resv_region *region;
 243         LIST_HEAD(resv_regions);
 244         int ret = 0;
 245 
 246         if (dev_is_pci(dev)) {
 247                 ret = iova_reserve_pci_windows(to_pci_dev(dev), iovad);
 248                 if (ret)
 249                         return ret;
 250         }
 251 
 252         iommu_get_resv_regions(dev, &resv_regions);
 253         list_for_each_entry(region, &resv_regions, list) {
 254                 unsigned long lo, hi;
 255 
 256                 /* We ARE the software that manages these! */
 257                 if (region->type == IOMMU_RESV_SW_MSI)
 258                         continue;
 259 
 260                 lo = iova_pfn(iovad, region->start);
 261                 hi = iova_pfn(iovad, region->start + region->length - 1);
 262                 reserve_iova(iovad, lo, hi);
 263 
 264                 if (region->type == IOMMU_RESV_MSI)
 265                         ret = cookie_init_hw_msi_region(cookie, region->start,
 266                                         region->start + region->length);
 267                 if (ret)
 268                         break;
 269         }
 270         iommu_put_resv_regions(dev, &resv_regions);
 271 
 272         return ret;
 273 }
 274 
 275 static void iommu_dma_flush_iotlb_all(struct iova_domain *iovad)
 276 {
 277         struct iommu_dma_cookie *cookie;
 278         struct iommu_domain *domain;
 279 
 280         cookie = container_of(iovad, struct iommu_dma_cookie, iovad);
 281         domain = cookie->fq_domain;
 282         /*
 283          * The IOMMU driver supporting DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE
 284          * implies that ops->flush_iotlb_all must be non-NULL.
 285          */
 286         domain->ops->flush_iotlb_all(domain);
 287 }
 288 
 289 /**
 290  * iommu_dma_init_domain - Initialise a DMA mapping domain
 291  * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
 292  * @base: IOVA at which the mappable address space starts
 293  * @size: Size of IOVA space
 294  * @dev: Device the domain is being initialised for
 295  *
 296  * @base and @size should be exact multiples of IOMMU page granularity to
 297  * avoid rounding surprises. If necessary, we reserve the page at address 0
 298  * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
 299  * any change which could make prior IOVAs invalid will fail.
 300  */
 301 static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
 302                 u64 size, struct device *dev)
 303 {
 304         struct iommu_dma_cookie *cookie = domain->iova_cookie;
 305         unsigned long order, base_pfn;
 306         struct iova_domain *iovad;
 307         int attr;
 308 
 309         if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
 310                 return -EINVAL;
 311 
 312         iovad = &cookie->iovad;
 313 
 314         /* Use the smallest supported page size for IOVA granularity */
 315         order = __ffs(domain->pgsize_bitmap);
 316         base_pfn = max_t(unsigned long, 1, base >> order);
 317 
 318         /* Check the domain allows at least some access to the device... */
 319         if (domain->geometry.force_aperture) {
 320                 if (base > domain->geometry.aperture_end ||
 321                     base + size <= domain->geometry.aperture_start) {
 322                         pr_warn("specified DMA range outside IOMMU capability\n");
 323                         return -EFAULT;
 324                 }
 325                 /* ...then finally give it a kicking to make sure it fits */
 326                 base_pfn = max_t(unsigned long, base_pfn,
 327                                 domain->geometry.aperture_start >> order);
 328         }
 329 
 330         /* start_pfn is always nonzero for an already-initialised domain */
 331         if (iovad->start_pfn) {
 332                 if (1UL << order != iovad->granule ||
 333                     base_pfn != iovad->start_pfn) {
 334                         pr_warn("Incompatible range for DMA domain\n");
 335                         return -EFAULT;
 336                 }
 337 
 338                 return 0;
 339         }
 340 
 341         init_iova_domain(iovad, 1UL << order, base_pfn);
 342 
 343         if (!cookie->fq_domain && !iommu_domain_get_attr(domain,
 344                         DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) && attr) {
 345                 cookie->fq_domain = domain;
 346                 init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all, NULL);
 347         }
 348 
 349         if (!dev)
 350                 return 0;
 351 
 352         return iova_reserve_iommu_regions(dev, domain);
 353 }
 354 
 355 /**
 356  * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
 357  *                    page flags.
 358  * @dir: Direction of DMA transfer
 359  * @coherent: Is the DMA master cache-coherent?
 360  * @attrs: DMA attributes for the mapping
 361  *
 362  * Return: corresponding IOMMU API page protection flags
 363  */
 364 static int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
 365                      unsigned long attrs)
 366 {
 367         int prot = coherent ? IOMMU_CACHE : 0;
 368 
 369         if (attrs & DMA_ATTR_PRIVILEGED)
 370                 prot |= IOMMU_PRIV;
 371 
 372         switch (dir) {
 373         case DMA_BIDIRECTIONAL:
 374                 return prot | IOMMU_READ | IOMMU_WRITE;
 375         case DMA_TO_DEVICE:
 376                 return prot | IOMMU_READ;
 377         case DMA_FROM_DEVICE:
 378                 return prot | IOMMU_WRITE;
 379         default:
 380                 return 0;
 381         }
 382 }
 383 
 384 static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
 385                 size_t size, dma_addr_t dma_limit, struct device *dev)
 386 {
 387         struct iommu_dma_cookie *cookie = domain->iova_cookie;
 388         struct iova_domain *iovad = &cookie->iovad;
 389         unsigned long shift, iova_len, iova = 0;
 390 
 391         if (cookie->type == IOMMU_DMA_MSI_COOKIE) {
 392                 cookie->msi_iova += size;
 393                 return cookie->msi_iova - size;
 394         }
 395 
 396         shift = iova_shift(iovad);
 397         iova_len = size >> shift;
 398         /*
 399          * Freeing non-power-of-two-sized allocations back into the IOVA caches
 400          * will come back to bite us badly, so we have to waste a bit of space
 401          * rounding up anything cacheable to make sure that can't happen. The
 402          * order of the unadjusted size will still match upon freeing.
 403          */
 404         if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
 405                 iova_len = roundup_pow_of_two(iova_len);
 406 
 407         if (dev->bus_dma_mask)
 408                 dma_limit &= dev->bus_dma_mask;
 409 
 410         if (domain->geometry.force_aperture)
 411                 dma_limit = min(dma_limit, domain->geometry.aperture_end);
 412 
 413         /* Try to get PCI devices a SAC address */
 414         if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev))
 415                 iova = alloc_iova_fast(iovad, iova_len,
 416                                        DMA_BIT_MASK(32) >> shift, false);
 417 
 418         if (!iova)
 419                 iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift,
 420                                        true);
 421 
 422         return (dma_addr_t)iova << shift;
 423 }
 424 
 425 static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
 426                 dma_addr_t iova, size_t size)
 427 {
 428         struct iova_domain *iovad = &cookie->iovad;
 429 
 430         /* The MSI case is only ever cleaning up its most recent allocation */
 431         if (cookie->type == IOMMU_DMA_MSI_COOKIE)
 432                 cookie->msi_iova -= size;
 433         else if (cookie->fq_domain)     /* non-strict mode */
 434                 queue_iova(iovad, iova_pfn(iovad, iova),
 435                                 size >> iova_shift(iovad), 0);
 436         else
 437                 free_iova_fast(iovad, iova_pfn(iovad, iova),
 438                                 size >> iova_shift(iovad));
 439 }
 440 
 441 static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
 442                 size_t size)
 443 {
 444         struct iommu_domain *domain = iommu_get_dma_domain(dev);
 445         struct iommu_dma_cookie *cookie = domain->iova_cookie;
 446         struct iova_domain *iovad = &cookie->iovad;
 447         size_t iova_off = iova_offset(iovad, dma_addr);
 448         struct iommu_iotlb_gather iotlb_gather;
 449         size_t unmapped;
 450 
 451         dma_addr -= iova_off;
 452         size = iova_align(iovad, size + iova_off);
 453         iommu_iotlb_gather_init(&iotlb_gather);
 454 
 455         unmapped = iommu_unmap_fast(domain, dma_addr, size, &iotlb_gather);
 456         WARN_ON(unmapped != size);
 457 
 458         if (!cookie->fq_domain)
 459                 iommu_tlb_sync(domain, &iotlb_gather);
 460         iommu_dma_free_iova(cookie, dma_addr, size);
 461 }
 462 
 463 static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
 464                 size_t size, int prot)
 465 {
 466         struct iommu_domain *domain = iommu_get_dma_domain(dev);
 467         struct iommu_dma_cookie *cookie = domain->iova_cookie;
 468         struct iova_domain *iovad = &cookie->iovad;
 469         size_t iova_off = iova_offset(iovad, phys);
 470         dma_addr_t iova;
 471 
 472         size = iova_align(iovad, size + iova_off);
 473 
 474         iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
 475         if (!iova)
 476                 return DMA_MAPPING_ERROR;
 477 
 478         if (iommu_map(domain, iova, phys - iova_off, size, prot)) {
 479                 iommu_dma_free_iova(cookie, iova, size);
 480                 return DMA_MAPPING_ERROR;
 481         }
 482         return iova + iova_off;
 483 }
 484 
 485 static void __iommu_dma_free_pages(struct page **pages, int count)
 486 {
 487         while (count--)
 488                 __free_page(pages[count]);
 489         kvfree(pages);
 490 }
 491 
 492 static struct page **__iommu_dma_alloc_pages(struct device *dev,
 493                 unsigned int count, unsigned long order_mask, gfp_t gfp)
 494 {
 495         struct page **pages;
 496         unsigned int i = 0, nid = dev_to_node(dev);
 497 
 498         order_mask &= (2U << MAX_ORDER) - 1;
 499         if (!order_mask)
 500                 return NULL;
 501 
 502         pages = kvzalloc(count * sizeof(*pages), GFP_KERNEL);
 503         if (!pages)
 504                 return NULL;
 505 
 506         /* IOMMU can map any pages, so himem can also be used here */
 507         gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
 508 
 509         while (count) {
 510                 struct page *page = NULL;
 511                 unsigned int order_size;
 512 
 513                 /*
 514                  * Higher-order allocations are a convenience rather
 515                  * than a necessity, hence using __GFP_NORETRY until
 516                  * falling back to minimum-order allocations.
 517                  */
 518                 for (order_mask &= (2U << __fls(count)) - 1;
 519                      order_mask; order_mask &= ~order_size) {
 520                         unsigned int order = __fls(order_mask);
 521                         gfp_t alloc_flags = gfp;
 522 
 523                         order_size = 1U << order;
 524                         if (order_mask > order_size)
 525                                 alloc_flags |= __GFP_NORETRY;
 526                         page = alloc_pages_node(nid, alloc_flags, order);
 527                         if (!page)
 528                                 continue;
 529                         if (!order)
 530                                 break;
 531                         if (!PageCompound(page)) {
 532                                 split_page(page, order);
 533                                 break;
 534                         } else if (!split_huge_page(page)) {
 535                                 break;
 536                         }
 537                         __free_pages(page, order);
 538                 }
 539                 if (!page) {
 540                         __iommu_dma_free_pages(pages, i);
 541                         return NULL;
 542                 }
 543                 count -= order_size;
 544                 while (order_size--)
 545                         pages[i++] = page++;
 546         }
 547         return pages;
 548 }
 549 
 550 /**
 551  * iommu_dma_alloc_remap - Allocate and map a buffer contiguous in IOVA space
 552  * @dev: Device to allocate memory for. Must be a real device
 553  *       attached to an iommu_dma_domain
 554  * @size: Size of buffer in bytes
 555  * @dma_handle: Out argument for allocated DMA handle
 556  * @gfp: Allocation flags
 557  * @attrs: DMA attributes for this allocation
 558  *
 559  * If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
 560  * but an IOMMU which supports smaller pages might not map the whole thing.
 561  *
 562  * Return: Mapped virtual address, or NULL on failure.
 563  */
 564 static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
 565                 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
 566 {
 567         struct iommu_domain *domain = iommu_get_dma_domain(dev);
 568         struct iommu_dma_cookie *cookie = domain->iova_cookie;
 569         struct iova_domain *iovad = &cookie->iovad;
 570         bool coherent = dev_is_dma_coherent(dev);
 571         int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
 572         pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
 573         unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
 574         struct page **pages;
 575         struct sg_table sgt;
 576         dma_addr_t iova;
 577         void *vaddr;
 578 
 579         *dma_handle = DMA_MAPPING_ERROR;
 580 
 581         min_size = alloc_sizes & -alloc_sizes;
 582         if (min_size < PAGE_SIZE) {
 583                 min_size = PAGE_SIZE;
 584                 alloc_sizes |= PAGE_SIZE;
 585         } else {
 586                 size = ALIGN(size, min_size);
 587         }
 588         if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
 589                 alloc_sizes = min_size;
 590 
 591         count = PAGE_ALIGN(size) >> PAGE_SHIFT;
 592         pages = __iommu_dma_alloc_pages(dev, count, alloc_sizes >> PAGE_SHIFT,
 593                                         gfp);
 594         if (!pages)
 595                 return NULL;
 596 
 597         size = iova_align(iovad, size);
 598         iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev);
 599         if (!iova)
 600                 goto out_free_pages;
 601 
 602         if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL))
 603                 goto out_free_iova;
 604 
 605         if (!(ioprot & IOMMU_CACHE)) {
 606                 struct scatterlist *sg;
 607                 int i;
 608 
 609                 for_each_sg(sgt.sgl, sg, sgt.orig_nents, i)
 610                         arch_dma_prep_coherent(sg_page(sg), sg->length);
 611         }
 612 
 613         if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, ioprot)
 614                         < size)
 615                 goto out_free_sg;
 616 
 617         vaddr = dma_common_pages_remap(pages, size, prot,
 618                         __builtin_return_address(0));
 619         if (!vaddr)
 620                 goto out_unmap;
 621 
 622         *dma_handle = iova;
 623         sg_free_table(&sgt);
 624         return vaddr;
 625 
 626 out_unmap:
 627         __iommu_dma_unmap(dev, iova, size);
 628 out_free_sg:
 629         sg_free_table(&sgt);
 630 out_free_iova:
 631         iommu_dma_free_iova(cookie, iova, size);
 632 out_free_pages:
 633         __iommu_dma_free_pages(pages, count);
 634         return NULL;
 635 }
 636 
 637 /**
 638  * __iommu_dma_mmap - Map a buffer into provided user VMA
 639  * @pages: Array representing buffer from __iommu_dma_alloc()
 640  * @size: Size of buffer in bytes
 641  * @vma: VMA describing requested userspace mapping
 642  *
 643  * Maps the pages of the buffer in @pages into @vma. The caller is responsible
 644  * for verifying the correct size and protection of @vma beforehand.
 645  */
 646 static int __iommu_dma_mmap(struct page **pages, size_t size,
 647                 struct vm_area_struct *vma)
 648 {
 649         return vm_map_pages(vma, pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
 650 }
 651 
 652 static void iommu_dma_sync_single_for_cpu(struct device *dev,
 653                 dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
 654 {
 655         phys_addr_t phys;
 656 
 657         if (dev_is_dma_coherent(dev))
 658                 return;
 659 
 660         phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
 661         arch_sync_dma_for_cpu(dev, phys, size, dir);
 662 }
 663 
 664 static void iommu_dma_sync_single_for_device(struct device *dev,
 665                 dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
 666 {
 667         phys_addr_t phys;
 668 
 669         if (dev_is_dma_coherent(dev))
 670                 return;
 671 
 672         phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
 673         arch_sync_dma_for_device(dev, phys, size, dir);
 674 }
 675 
 676 static void iommu_dma_sync_sg_for_cpu(struct device *dev,
 677                 struct scatterlist *sgl, int nelems,
 678                 enum dma_data_direction dir)
 679 {
 680         struct scatterlist *sg;
 681         int i;
 682 
 683         if (dev_is_dma_coherent(dev))
 684                 return;
 685 
 686         for_each_sg(sgl, sg, nelems, i)
 687                 arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
 688 }
 689 
 690 static void iommu_dma_sync_sg_for_device(struct device *dev,
 691                 struct scatterlist *sgl, int nelems,
 692                 enum dma_data_direction dir)
 693 {
 694         struct scatterlist *sg;
 695         int i;
 696 
 697         if (dev_is_dma_coherent(dev))
 698                 return;
 699 
 700         for_each_sg(sgl, sg, nelems, i)
 701                 arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
 702 }
 703 
 704 static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
 705                 unsigned long offset, size_t size, enum dma_data_direction dir,
 706                 unsigned long attrs)
 707 {
 708         phys_addr_t phys = page_to_phys(page) + offset;
 709         bool coherent = dev_is_dma_coherent(dev);
 710         int prot = dma_info_to_prot(dir, coherent, attrs);
 711         dma_addr_t dma_handle;
 712 
 713         dma_handle =__iommu_dma_map(dev, phys, size, prot);
 714         if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
 715             dma_handle != DMA_MAPPING_ERROR)
 716                 arch_sync_dma_for_device(dev, phys, size, dir);
 717         return dma_handle;
 718 }
 719 
 720 static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
 721                 size_t size, enum dma_data_direction dir, unsigned long attrs)
 722 {
 723         if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
 724                 iommu_dma_sync_single_for_cpu(dev, dma_handle, size, dir);
 725         __iommu_dma_unmap(dev, dma_handle, size);
 726 }
 727 
 728 /*
 729  * Prepare a successfully-mapped scatterlist to give back to the caller.
 730  *
 731  * At this point the segments are already laid out by iommu_dma_map_sg() to
 732  * avoid individually crossing any boundaries, so we merely need to check a
 733  * segment's start address to avoid concatenating across one.
 734  */
 735 static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
 736                 dma_addr_t dma_addr)
 737 {
 738         struct scatterlist *s, *cur = sg;
 739         unsigned long seg_mask = dma_get_seg_boundary(dev);
 740         unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev);
 741         int i, count = 0;
 742 
 743         for_each_sg(sg, s, nents, i) {
 744                 /* Restore this segment's original unaligned fields first */
 745                 unsigned int s_iova_off = sg_dma_address(s);
 746                 unsigned int s_length = sg_dma_len(s);
 747                 unsigned int s_iova_len = s->length;
 748 
 749                 s->offset += s_iova_off;
 750                 s->length = s_length;
 751                 sg_dma_address(s) = DMA_MAPPING_ERROR;
 752                 sg_dma_len(s) = 0;
 753 
 754                 /*
 755                  * Now fill in the real DMA data. If...
 756                  * - there is a valid output segment to append to
 757                  * - and this segment starts on an IOVA page boundary
 758                  * - but doesn't fall at a segment boundary
 759                  * - and wouldn't make the resulting output segment too long
 760                  */
 761                 if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
 762                     (max_len - cur_len >= s_length)) {
 763                         /* ...then concatenate it with the previous one */
 764                         cur_len += s_length;
 765                 } else {
 766                         /* Otherwise start the next output segment */
 767                         if (i > 0)
 768                                 cur = sg_next(cur);
 769                         cur_len = s_length;
 770                         count++;
 771 
 772                         sg_dma_address(cur) = dma_addr + s_iova_off;
 773                 }
 774 
 775                 sg_dma_len(cur) = cur_len;
 776                 dma_addr += s_iova_len;
 777 
 778                 if (s_length + s_iova_off < s_iova_len)
 779                         cur_len = 0;
 780         }
 781         return count;
 782 }
 783 
 784 /*
 785  * If mapping failed, then just restore the original list,
 786  * but making sure the DMA fields are invalidated.
 787  */
 788 static void __invalidate_sg(struct scatterlist *sg, int nents)
 789 {
 790         struct scatterlist *s;
 791         int i;
 792 
 793         for_each_sg(sg, s, nents, i) {
 794                 if (sg_dma_address(s) != DMA_MAPPING_ERROR)
 795                         s->offset += sg_dma_address(s);
 796                 if (sg_dma_len(s))
 797                         s->length = sg_dma_len(s);
 798                 sg_dma_address(s) = DMA_MAPPING_ERROR;
 799                 sg_dma_len(s) = 0;
 800         }
 801 }
 802 
 803 /*
 804  * The DMA API client is passing in a scatterlist which could describe
 805  * any old buffer layout, but the IOMMU API requires everything to be
 806  * aligned to IOMMU pages. Hence the need for this complicated bit of
 807  * impedance-matching, to be able to hand off a suitably-aligned list,
 808  * but still preserve the original offsets and sizes for the caller.
 809  */
 810 static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
 811                 int nents, enum dma_data_direction dir, unsigned long attrs)
 812 {
 813         struct iommu_domain *domain = iommu_get_dma_domain(dev);
 814         struct iommu_dma_cookie *cookie = domain->iova_cookie;
 815         struct iova_domain *iovad = &cookie->iovad;
 816         struct scatterlist *s, *prev = NULL;
 817         int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs);
 818         dma_addr_t iova;
 819         size_t iova_len = 0;
 820         unsigned long mask = dma_get_seg_boundary(dev);
 821         int i;
 822 
 823         if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
 824                 iommu_dma_sync_sg_for_device(dev, sg, nents, dir);
 825 
 826         /*
 827          * Work out how much IOVA space we need, and align the segments to
 828          * IOVA granules for the IOMMU driver to handle. With some clever
 829          * trickery we can modify the list in-place, but reversibly, by
 830          * stashing the unaligned parts in the as-yet-unused DMA fields.
 831          */
 832         for_each_sg(sg, s, nents, i) {
 833                 size_t s_iova_off = iova_offset(iovad, s->offset);
 834                 size_t s_length = s->length;
 835                 size_t pad_len = (mask - iova_len + 1) & mask;
 836 
 837                 sg_dma_address(s) = s_iova_off;
 838                 sg_dma_len(s) = s_length;
 839                 s->offset -= s_iova_off;
 840                 s_length = iova_align(iovad, s_length + s_iova_off);
 841                 s->length = s_length;
 842 
 843                 /*
 844                  * Due to the alignment of our single IOVA allocation, we can
 845                  * depend on these assumptions about the segment boundary mask:
 846                  * - If mask size >= IOVA size, then the IOVA range cannot
 847                  *   possibly fall across a boundary, so we don't care.
 848                  * - If mask size < IOVA size, then the IOVA range must start
 849                  *   exactly on a boundary, therefore we can lay things out
 850                  *   based purely on segment lengths without needing to know
 851                  *   the actual addresses beforehand.
 852                  * - The mask must be a power of 2, so pad_len == 0 if
 853                  *   iova_len == 0, thus we cannot dereference prev the first
 854                  *   time through here (i.e. before it has a meaningful value).
 855                  */
 856                 if (pad_len && pad_len < s_length - 1) {
 857                         prev->length += pad_len;
 858                         iova_len += pad_len;
 859                 }
 860 
 861                 iova_len += s_length;
 862                 prev = s;
 863         }
 864 
 865         iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
 866         if (!iova)
 867                 goto out_restore_sg;
 868 
 869         /*
 870          * We'll leave any physical concatenation to the IOMMU driver's
 871          * implementation - it knows better than we do.
 872          */
 873         if (iommu_map_sg(domain, iova, sg, nents, prot) < iova_len)
 874                 goto out_free_iova;
 875 
 876         return __finalise_sg(dev, sg, nents, iova);
 877 
 878 out_free_iova:
 879         iommu_dma_free_iova(cookie, iova, iova_len);
 880 out_restore_sg:
 881         __invalidate_sg(sg, nents);
 882         return 0;
 883 }
 884 
 885 static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
 886                 int nents, enum dma_data_direction dir, unsigned long attrs)
 887 {
 888         dma_addr_t start, end;
 889         struct scatterlist *tmp;
 890         int i;
 891 
 892         if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
 893                 iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir);
 894 
 895         /*
 896          * The scatterlist segments are mapped into a single
 897          * contiguous IOVA allocation, so this is incredibly easy.
 898          */
 899         start = sg_dma_address(sg);
 900         for_each_sg(sg_next(sg), tmp, nents - 1, i) {
 901                 if (sg_dma_len(tmp) == 0)
 902                         break;
 903                 sg = tmp;
 904         }
 905         end = sg_dma_address(sg) + sg_dma_len(sg);
 906         __iommu_dma_unmap(dev, start, end - start);
 907 }
 908 
 909 static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
 910                 size_t size, enum dma_data_direction dir, unsigned long attrs)
 911 {
 912         return __iommu_dma_map(dev, phys, size,
 913                         dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO);
 914 }
 915 
 916 static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
 917                 size_t size, enum dma_data_direction dir, unsigned long attrs)
 918 {
 919         __iommu_dma_unmap(dev, handle, size);
 920 }
 921 
 922 static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr)
 923 {
 924         size_t alloc_size = PAGE_ALIGN(size);
 925         int count = alloc_size >> PAGE_SHIFT;
 926         struct page *page = NULL, **pages = NULL;
 927 
 928         /* Non-coherent atomic allocation? Easy */
 929         if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
 930             dma_free_from_pool(cpu_addr, alloc_size))
 931                 return;
 932 
 933         if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
 934                 /*
 935                  * If it the address is remapped, then it's either non-coherent
 936                  * or highmem CMA, or an iommu_dma_alloc_remap() construction.
 937                  */
 938                 pages = dma_common_find_pages(cpu_addr);
 939                 if (!pages)
 940                         page = vmalloc_to_page(cpu_addr);
 941                 dma_common_free_remap(cpu_addr, alloc_size);
 942         } else {
 943                 /* Lowmem means a coherent atomic or CMA allocation */
 944                 page = virt_to_page(cpu_addr);
 945         }
 946 
 947         if (pages)
 948                 __iommu_dma_free_pages(pages, count);
 949         if (page)
 950                 dma_free_contiguous(dev, page, alloc_size);
 951 }
 952 
 953 static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
 954                 dma_addr_t handle, unsigned long attrs)
 955 {
 956         __iommu_dma_unmap(dev, handle, size);
 957         __iommu_dma_free(dev, size, cpu_addr);
 958 }
 959 
 960 static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
 961                 struct page **pagep, gfp_t gfp, unsigned long attrs)
 962 {
 963         bool coherent = dev_is_dma_coherent(dev);
 964         size_t alloc_size = PAGE_ALIGN(size);
 965         int node = dev_to_node(dev);
 966         struct page *page = NULL;
 967         void *cpu_addr;
 968 
 969         page = dma_alloc_contiguous(dev, alloc_size, gfp);
 970         if (!page)
 971                 page = alloc_pages_node(node, gfp, get_order(alloc_size));
 972         if (!page)
 973                 return NULL;
 974 
 975         if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) {
 976                 pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
 977 
 978                 cpu_addr = dma_common_contiguous_remap(page, alloc_size,
 979                                 prot, __builtin_return_address(0));
 980                 if (!cpu_addr)
 981                         goto out_free_pages;
 982 
 983                 if (!coherent)
 984                         arch_dma_prep_coherent(page, size);
 985         } else {
 986                 cpu_addr = page_address(page);
 987         }
 988 
 989         *pagep = page;
 990         memset(cpu_addr, 0, alloc_size);
 991         return cpu_addr;
 992 out_free_pages:
 993         dma_free_contiguous(dev, page, alloc_size);
 994         return NULL;
 995 }
 996 
 997 static void *iommu_dma_alloc(struct device *dev, size_t size,
 998                 dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
 999 {
1000         bool coherent = dev_is_dma_coherent(dev);
1001         int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
1002         struct page *page = NULL;
1003         void *cpu_addr;
1004 
1005         gfp |= __GFP_ZERO;
1006 
1007         if (IS_ENABLED(CONFIG_DMA_REMAP) && gfpflags_allow_blocking(gfp) &&
1008             !(attrs & DMA_ATTR_FORCE_CONTIGUOUS))
1009                 return iommu_dma_alloc_remap(dev, size, handle, gfp, attrs);
1010 
1011         if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
1012             !gfpflags_allow_blocking(gfp) && !coherent)
1013                 cpu_addr = dma_alloc_from_pool(PAGE_ALIGN(size), &page, gfp);
1014         else
1015                 cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs);
1016         if (!cpu_addr)
1017                 return NULL;
1018 
1019         *handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot);
1020         if (*handle == DMA_MAPPING_ERROR) {
1021                 __iommu_dma_free(dev, size, cpu_addr);
1022                 return NULL;
1023         }
1024 
1025         return cpu_addr;
1026 }
1027 
1028 static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
1029                 void *cpu_addr, dma_addr_t dma_addr, size_t size,
1030                 unsigned long attrs)
1031 {
1032         unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
1033         unsigned long pfn, off = vma->vm_pgoff;
1034         int ret;
1035 
1036         vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
1037 
1038         if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
1039                 return ret;
1040 
1041         if (off >= nr_pages || vma_pages(vma) > nr_pages - off)
1042                 return -ENXIO;
1043 
1044         if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
1045                 struct page **pages = dma_common_find_pages(cpu_addr);
1046 
1047                 if (pages)
1048                         return __iommu_dma_mmap(pages, size, vma);
1049                 pfn = vmalloc_to_pfn(cpu_addr);
1050         } else {
1051                 pfn = page_to_pfn(virt_to_page(cpu_addr));
1052         }
1053 
1054         return remap_pfn_range(vma, vma->vm_start, pfn + off,
1055                                vma->vm_end - vma->vm_start,
1056                                vma->vm_page_prot);
1057 }
1058 
1059 static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
1060                 void *cpu_addr, dma_addr_t dma_addr, size_t size,
1061                 unsigned long attrs)
1062 {
1063         struct page *page;
1064         int ret;
1065 
1066         if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
1067                 struct page **pages = dma_common_find_pages(cpu_addr);
1068 
1069                 if (pages) {
1070                         return sg_alloc_table_from_pages(sgt, pages,
1071                                         PAGE_ALIGN(size) >> PAGE_SHIFT,
1072                                         0, size, GFP_KERNEL);
1073                 }
1074 
1075                 page = vmalloc_to_page(cpu_addr);
1076         } else {
1077                 page = virt_to_page(cpu_addr);
1078         }
1079 
1080         ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
1081         if (!ret)
1082                 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
1083         return ret;
1084 }
1085 
1086 static unsigned long iommu_dma_get_merge_boundary(struct device *dev)
1087 {
1088         struct iommu_domain *domain = iommu_get_dma_domain(dev);
1089 
1090         return (1UL << __ffs(domain->pgsize_bitmap)) - 1;
1091 }
1092 
1093 static const struct dma_map_ops iommu_dma_ops = {
1094         .alloc                  = iommu_dma_alloc,
1095         .free                   = iommu_dma_free,
1096         .mmap                   = iommu_dma_mmap,
1097         .get_sgtable            = iommu_dma_get_sgtable,
1098         .map_page               = iommu_dma_map_page,
1099         .unmap_page             = iommu_dma_unmap_page,
1100         .map_sg                 = iommu_dma_map_sg,
1101         .unmap_sg               = iommu_dma_unmap_sg,
1102         .sync_single_for_cpu    = iommu_dma_sync_single_for_cpu,
1103         .sync_single_for_device = iommu_dma_sync_single_for_device,
1104         .sync_sg_for_cpu        = iommu_dma_sync_sg_for_cpu,
1105         .sync_sg_for_device     = iommu_dma_sync_sg_for_device,
1106         .map_resource           = iommu_dma_map_resource,
1107         .unmap_resource         = iommu_dma_unmap_resource,
1108         .get_merge_boundary     = iommu_dma_get_merge_boundary,
1109 };
1110 
1111 /*
1112  * The IOMMU core code allocates the default DMA domain, which the underlying
1113  * IOMMU driver needs to support via the dma-iommu layer.
1114  */
1115 void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size)
1116 {
1117         struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1118 
1119         if (!domain)
1120                 goto out_err;
1121 
1122         /*
1123          * The IOMMU core code allocates the default DMA domain, which the
1124          * underlying IOMMU driver needs to support via the dma-iommu layer.
1125          */
1126         if (domain->type == IOMMU_DOMAIN_DMA) {
1127                 if (iommu_dma_init_domain(domain, dma_base, size, dev))
1128                         goto out_err;
1129                 dev->dma_ops = &iommu_dma_ops;
1130         }
1131 
1132         return;
1133 out_err:
1134          pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
1135                  dev_name(dev));
1136 }
1137 
1138 static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
1139                 phys_addr_t msi_addr, struct iommu_domain *domain)
1140 {
1141         struct iommu_dma_cookie *cookie = domain->iova_cookie;
1142         struct iommu_dma_msi_page *msi_page;
1143         dma_addr_t iova;
1144         int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1145         size_t size = cookie_msi_granule(cookie);
1146 
1147         msi_addr &= ~(phys_addr_t)(size - 1);
1148         list_for_each_entry(msi_page, &cookie->msi_page_list, list)
1149                 if (msi_page->phys == msi_addr)
1150                         return msi_page;
1151 
1152         msi_page = kzalloc(sizeof(*msi_page), GFP_KERNEL);
1153         if (!msi_page)
1154                 return NULL;
1155 
1156         iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
1157         if (!iova)
1158                 goto out_free_page;
1159 
1160         if (iommu_map(domain, iova, msi_addr, size, prot))
1161                 goto out_free_iova;
1162 
1163         INIT_LIST_HEAD(&msi_page->list);
1164         msi_page->phys = msi_addr;
1165         msi_page->iova = iova;
1166         list_add(&msi_page->list, &cookie->msi_page_list);
1167         return msi_page;
1168 
1169 out_free_iova:
1170         iommu_dma_free_iova(cookie, iova, size);
1171 out_free_page:
1172         kfree(msi_page);
1173         return NULL;
1174 }
1175 
1176 int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
1177 {
1178         struct device *dev = msi_desc_to_dev(desc);
1179         struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1180         struct iommu_dma_msi_page *msi_page;
1181         static DEFINE_MUTEX(msi_prepare_lock); /* see below */
1182 
1183         if (!domain || !domain->iova_cookie) {
1184                 desc->iommu_cookie = NULL;
1185                 return 0;
1186         }
1187 
1188         /*
1189          * In fact the whole prepare operation should already be serialised by
1190          * irq_domain_mutex further up the callchain, but that's pretty subtle
1191          * on its own, so consider this locking as failsafe documentation...
1192          */
1193         mutex_lock(&msi_prepare_lock);
1194         msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
1195         mutex_unlock(&msi_prepare_lock);
1196 
1197         msi_desc_set_iommu_cookie(desc, msi_page);
1198 
1199         if (!msi_page)
1200                 return -ENOMEM;
1201         return 0;
1202 }
1203 
1204 void iommu_dma_compose_msi_msg(struct msi_desc *desc,
1205                                struct msi_msg *msg)
1206 {
1207         struct device *dev = msi_desc_to_dev(desc);
1208         const struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1209         const struct iommu_dma_msi_page *msi_page;
1210 
1211         msi_page = msi_desc_get_iommu_cookie(desc);
1212 
1213         if (!domain || !domain->iova_cookie || WARN_ON(!msi_page))
1214                 return;
1215 
1216         msg->address_hi = upper_32_bits(msi_page->iova);
1217         msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1;
1218         msg->address_lo += lower_32_bits(msi_page->iova);
1219 }
1220 
1221 static int iommu_dma_init(void)
1222 {
1223         return iova_cache_get();
1224 }
1225 arch_initcall(iommu_dma_init);

/* [<][>][^][v][top][bottom][index][help] */