root/drivers/dma/dmaengine.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. dev_to_dma_chan
  2. memcpy_count_show
  3. bytes_transferred_show
  4. in_use_show
  5. chan_dev_release
  6. __dma_device_satisfies_mask
  7. dma_chan_to_owner
  8. balance_ref_count
  9. dma_chan_get
  10. dma_chan_put
  11. dma_sync_wait
  12. dma_channel_table_init
  13. dma_find_channel
  14. dma_issue_pending_all
  15. dma_chan_is_local
  16. min_chan
  17. dma_channel_rebalance
  18. dma_get_slave_caps
  19. private_candidate
  20. find_candidate
  21. dma_get_slave_channel
  22. dma_get_any_slave_channel
  23. __dma_request_channel
  24. dma_filter_match
  25. dma_request_chan
  26. dma_request_slave_channel
  27. dma_request_chan_by_mask
  28. dma_release_channel
  29. dmaengine_get
  30. dmaengine_put
  31. device_has_all_tx_types
  32. get_dma_id
  33. dma_async_device_register
  34. dma_async_device_unregister
  35. dmam_device_release
  36. dmaenginem_async_device_register
  37. __get_unmap_pool
  38. dmaengine_unmap
  39. dmaengine_unmap_put
  40. dmaengine_destroy_unmap_pool
  41. dmaengine_init_unmap_pool
  42. dmaengine_get_unmap_data
  43. dma_async_tx_descriptor_init
  44. dma_wait_for_async_tx
  45. dma_run_dependencies
  46. dma_bus_init

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /*
   3  * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
   4  */
   5 
   6 /*
   7  * This code implements the DMA subsystem. It provides a HW-neutral interface
   8  * for other kernel code to use asynchronous memory copy capabilities,
   9  * if present, and allows different HW DMA drivers to register as providing
  10  * this capability.
  11  *
  12  * Due to the fact we are accelerating what is already a relatively fast
  13  * operation, the code goes to great lengths to avoid additional overhead,
  14  * such as locking.
  15  *
  16  * LOCKING:
  17  *
  18  * The subsystem keeps a global list of dma_device structs it is protected by a
  19  * mutex, dma_list_mutex.
  20  *
  21  * A subsystem can get access to a channel by calling dmaengine_get() followed
  22  * by dma_find_channel(), or if it has need for an exclusive channel it can call
  23  * dma_request_channel().  Once a channel is allocated a reference is taken
  24  * against its corresponding driver to disable removal.
  25  *
  26  * Each device has a channels list, which runs unlocked but is never modified
  27  * once the device is registered, it's just setup by the driver.
  28  *
  29  * See Documentation/driver-api/dmaengine for more details
  30  */
  31 
  32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  33 
  34 #include <linux/platform_device.h>
  35 #include <linux/dma-mapping.h>
  36 #include <linux/init.h>
  37 #include <linux/module.h>
  38 #include <linux/mm.h>
  39 #include <linux/device.h>
  40 #include <linux/dmaengine.h>
  41 #include <linux/hardirq.h>
  42 #include <linux/spinlock.h>
  43 #include <linux/percpu.h>
  44 #include <linux/rcupdate.h>
  45 #include <linux/mutex.h>
  46 #include <linux/jiffies.h>
  47 #include <linux/rculist.h>
  48 #include <linux/idr.h>
  49 #include <linux/slab.h>
  50 #include <linux/acpi.h>
  51 #include <linux/acpi_dma.h>
  52 #include <linux/of_dma.h>
  53 #include <linux/mempool.h>
  54 #include <linux/numa.h>
  55 
  56 static DEFINE_MUTEX(dma_list_mutex);
  57 static DEFINE_IDA(dma_ida);
  58 static LIST_HEAD(dma_device_list);
  59 static long dmaengine_ref_count;
  60 
  61 /* --- sysfs implementation --- */
  62 
  63 /**
  64  * dev_to_dma_chan - convert a device pointer to its sysfs container object
  65  * @dev - device node
  66  *
  67  * Must be called under dma_list_mutex
  68  */
  69 static struct dma_chan *dev_to_dma_chan(struct device *dev)
  70 {
  71         struct dma_chan_dev *chan_dev;
  72 
  73         chan_dev = container_of(dev, typeof(*chan_dev), device);
  74         return chan_dev->chan;
  75 }
  76 
  77 static ssize_t memcpy_count_show(struct device *dev,
  78                                  struct device_attribute *attr, char *buf)
  79 {
  80         struct dma_chan *chan;
  81         unsigned long count = 0;
  82         int i;
  83         int err;
  84 
  85         mutex_lock(&dma_list_mutex);
  86         chan = dev_to_dma_chan(dev);
  87         if (chan) {
  88                 for_each_possible_cpu(i)
  89                         count += per_cpu_ptr(chan->local, i)->memcpy_count;
  90                 err = sprintf(buf, "%lu\n", count);
  91         } else
  92                 err = -ENODEV;
  93         mutex_unlock(&dma_list_mutex);
  94 
  95         return err;
  96 }
  97 static DEVICE_ATTR_RO(memcpy_count);
  98 
  99 static ssize_t bytes_transferred_show(struct device *dev,
 100                                       struct device_attribute *attr, char *buf)
 101 {
 102         struct dma_chan *chan;
 103         unsigned long count = 0;
 104         int i;
 105         int err;
 106 
 107         mutex_lock(&dma_list_mutex);
 108         chan = dev_to_dma_chan(dev);
 109         if (chan) {
 110                 for_each_possible_cpu(i)
 111                         count += per_cpu_ptr(chan->local, i)->bytes_transferred;
 112                 err = sprintf(buf, "%lu\n", count);
 113         } else
 114                 err = -ENODEV;
 115         mutex_unlock(&dma_list_mutex);
 116 
 117         return err;
 118 }
 119 static DEVICE_ATTR_RO(bytes_transferred);
 120 
 121 static ssize_t in_use_show(struct device *dev, struct device_attribute *attr,
 122                            char *buf)
 123 {
 124         struct dma_chan *chan;
 125         int err;
 126 
 127         mutex_lock(&dma_list_mutex);
 128         chan = dev_to_dma_chan(dev);
 129         if (chan)
 130                 err = sprintf(buf, "%d\n", chan->client_count);
 131         else
 132                 err = -ENODEV;
 133         mutex_unlock(&dma_list_mutex);
 134 
 135         return err;
 136 }
 137 static DEVICE_ATTR_RO(in_use);
 138 
 139 static struct attribute *dma_dev_attrs[] = {
 140         &dev_attr_memcpy_count.attr,
 141         &dev_attr_bytes_transferred.attr,
 142         &dev_attr_in_use.attr,
 143         NULL,
 144 };
 145 ATTRIBUTE_GROUPS(dma_dev);
 146 
 147 static void chan_dev_release(struct device *dev)
 148 {
 149         struct dma_chan_dev *chan_dev;
 150 
 151         chan_dev = container_of(dev, typeof(*chan_dev), device);
 152         if (atomic_dec_and_test(chan_dev->idr_ref)) {
 153                 ida_free(&dma_ida, chan_dev->dev_id);
 154                 kfree(chan_dev->idr_ref);
 155         }
 156         kfree(chan_dev);
 157 }
 158 
 159 static struct class dma_devclass = {
 160         .name           = "dma",
 161         .dev_groups     = dma_dev_groups,
 162         .dev_release    = chan_dev_release,
 163 };
 164 
 165 /* --- client and device registration --- */
 166 
 167 #define dma_device_satisfies_mask(device, mask) \
 168         __dma_device_satisfies_mask((device), &(mask))
 169 static int
 170 __dma_device_satisfies_mask(struct dma_device *device,
 171                             const dma_cap_mask_t *want)
 172 {
 173         dma_cap_mask_t has;
 174 
 175         bitmap_and(has.bits, want->bits, device->cap_mask.bits,
 176                 DMA_TX_TYPE_END);
 177         return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
 178 }
 179 
 180 static struct module *dma_chan_to_owner(struct dma_chan *chan)
 181 {
 182         return chan->device->owner;
 183 }
 184 
 185 /**
 186  * balance_ref_count - catch up the channel reference count
 187  * @chan - channel to balance ->client_count versus dmaengine_ref_count
 188  *
 189  * balance_ref_count must be called under dma_list_mutex
 190  */
 191 static void balance_ref_count(struct dma_chan *chan)
 192 {
 193         struct module *owner = dma_chan_to_owner(chan);
 194 
 195         while (chan->client_count < dmaengine_ref_count) {
 196                 __module_get(owner);
 197                 chan->client_count++;
 198         }
 199 }
 200 
 201 /**
 202  * dma_chan_get - try to grab a dma channel's parent driver module
 203  * @chan - channel to grab
 204  *
 205  * Must be called under dma_list_mutex
 206  */
 207 static int dma_chan_get(struct dma_chan *chan)
 208 {
 209         struct module *owner = dma_chan_to_owner(chan);
 210         int ret;
 211 
 212         /* The channel is already in use, update client count */
 213         if (chan->client_count) {
 214                 __module_get(owner);
 215                 goto out;
 216         }
 217 
 218         if (!try_module_get(owner))
 219                 return -ENODEV;
 220 
 221         /* allocate upon first client reference */
 222         if (chan->device->device_alloc_chan_resources) {
 223                 ret = chan->device->device_alloc_chan_resources(chan);
 224                 if (ret < 0)
 225                         goto err_out;
 226         }
 227 
 228         if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
 229                 balance_ref_count(chan);
 230 
 231 out:
 232         chan->client_count++;
 233         return 0;
 234 
 235 err_out:
 236         module_put(owner);
 237         return ret;
 238 }
 239 
 240 /**
 241  * dma_chan_put - drop a reference to a dma channel's parent driver module
 242  * @chan - channel to release
 243  *
 244  * Must be called under dma_list_mutex
 245  */
 246 static void dma_chan_put(struct dma_chan *chan)
 247 {
 248         /* This channel is not in use, bail out */
 249         if (!chan->client_count)
 250                 return;
 251 
 252         chan->client_count--;
 253         module_put(dma_chan_to_owner(chan));
 254 
 255         /* This channel is not in use anymore, free it */
 256         if (!chan->client_count && chan->device->device_free_chan_resources) {
 257                 /* Make sure all operations have completed */
 258                 dmaengine_synchronize(chan);
 259                 chan->device->device_free_chan_resources(chan);
 260         }
 261 
 262         /* If the channel is used via a DMA request router, free the mapping */
 263         if (chan->router && chan->router->route_free) {
 264                 chan->router->route_free(chan->router->dev, chan->route_data);
 265                 chan->router = NULL;
 266                 chan->route_data = NULL;
 267         }
 268 }
 269 
 270 enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
 271 {
 272         enum dma_status status;
 273         unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
 274 
 275         dma_async_issue_pending(chan);
 276         do {
 277                 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
 278                 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
 279                         dev_err(chan->device->dev, "%s: timeout!\n", __func__);
 280                         return DMA_ERROR;
 281                 }
 282                 if (status != DMA_IN_PROGRESS)
 283                         break;
 284                 cpu_relax();
 285         } while (1);
 286 
 287         return status;
 288 }
 289 EXPORT_SYMBOL(dma_sync_wait);
 290 
 291 /**
 292  * dma_cap_mask_all - enable iteration over all operation types
 293  */
 294 static dma_cap_mask_t dma_cap_mask_all;
 295 
 296 /**
 297  * dma_chan_tbl_ent - tracks channel allocations per core/operation
 298  * @chan - associated channel for this entry
 299  */
 300 struct dma_chan_tbl_ent {
 301         struct dma_chan *chan;
 302 };
 303 
 304 /**
 305  * channel_table - percpu lookup table for memory-to-memory offload providers
 306  */
 307 static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
 308 
 309 static int __init dma_channel_table_init(void)
 310 {
 311         enum dma_transaction_type cap;
 312         int err = 0;
 313 
 314         bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
 315 
 316         /* 'interrupt', 'private', and 'slave' are channel capabilities,
 317          * but are not associated with an operation so they do not need
 318          * an entry in the channel_table
 319          */
 320         clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
 321         clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
 322         clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
 323 
 324         for_each_dma_cap_mask(cap, dma_cap_mask_all) {
 325                 channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
 326                 if (!channel_table[cap]) {
 327                         err = -ENOMEM;
 328                         break;
 329                 }
 330         }
 331 
 332         if (err) {
 333                 pr_err("initialization failure\n");
 334                 for_each_dma_cap_mask(cap, dma_cap_mask_all)
 335                         free_percpu(channel_table[cap]);
 336         }
 337 
 338         return err;
 339 }
 340 arch_initcall(dma_channel_table_init);
 341 
 342 /**
 343  * dma_find_channel - find a channel to carry out the operation
 344  * @tx_type: transaction type
 345  */
 346 struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
 347 {
 348         return this_cpu_read(channel_table[tx_type]->chan);
 349 }
 350 EXPORT_SYMBOL(dma_find_channel);
 351 
 352 /**
 353  * dma_issue_pending_all - flush all pending operations across all channels
 354  */
 355 void dma_issue_pending_all(void)
 356 {
 357         struct dma_device *device;
 358         struct dma_chan *chan;
 359 
 360         rcu_read_lock();
 361         list_for_each_entry_rcu(device, &dma_device_list, global_node) {
 362                 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 363                         continue;
 364                 list_for_each_entry(chan, &device->channels, device_node)
 365                         if (chan->client_count)
 366                                 device->device_issue_pending(chan);
 367         }
 368         rcu_read_unlock();
 369 }
 370 EXPORT_SYMBOL(dma_issue_pending_all);
 371 
 372 /**
 373  * dma_chan_is_local - returns true if the channel is in the same numa-node as the cpu
 374  */
 375 static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
 376 {
 377         int node = dev_to_node(chan->device->dev);
 378         return node == NUMA_NO_NODE ||
 379                 cpumask_test_cpu(cpu, cpumask_of_node(node));
 380 }
 381 
 382 /**
 383  * min_chan - returns the channel with min count and in the same numa-node as the cpu
 384  * @cap: capability to match
 385  * @cpu: cpu index which the channel should be close to
 386  *
 387  * If some channels are close to the given cpu, the one with the lowest
 388  * reference count is returned. Otherwise, cpu is ignored and only the
 389  * reference count is taken into account.
 390  * Must be called under dma_list_mutex.
 391  */
 392 static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
 393 {
 394         struct dma_device *device;
 395         struct dma_chan *chan;
 396         struct dma_chan *min = NULL;
 397         struct dma_chan *localmin = NULL;
 398 
 399         list_for_each_entry(device, &dma_device_list, global_node) {
 400                 if (!dma_has_cap(cap, device->cap_mask) ||
 401                     dma_has_cap(DMA_PRIVATE, device->cap_mask))
 402                         continue;
 403                 list_for_each_entry(chan, &device->channels, device_node) {
 404                         if (!chan->client_count)
 405                                 continue;
 406                         if (!min || chan->table_count < min->table_count)
 407                                 min = chan;
 408 
 409                         if (dma_chan_is_local(chan, cpu))
 410                                 if (!localmin ||
 411                                     chan->table_count < localmin->table_count)
 412                                         localmin = chan;
 413                 }
 414         }
 415 
 416         chan = localmin ? localmin : min;
 417 
 418         if (chan)
 419                 chan->table_count++;
 420 
 421         return chan;
 422 }
 423 
 424 /**
 425  * dma_channel_rebalance - redistribute the available channels
 426  *
 427  * Optimize for cpu isolation (each cpu gets a dedicated channel for an
 428  * operation type) in the SMP case,  and operation isolation (avoid
 429  * multi-tasking channels) in the non-SMP case.  Must be called under
 430  * dma_list_mutex.
 431  */
 432 static void dma_channel_rebalance(void)
 433 {
 434         struct dma_chan *chan;
 435         struct dma_device *device;
 436         int cpu;
 437         int cap;
 438 
 439         /* undo the last distribution */
 440         for_each_dma_cap_mask(cap, dma_cap_mask_all)
 441                 for_each_possible_cpu(cpu)
 442                         per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
 443 
 444         list_for_each_entry(device, &dma_device_list, global_node) {
 445                 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 446                         continue;
 447                 list_for_each_entry(chan, &device->channels, device_node)
 448                         chan->table_count = 0;
 449         }
 450 
 451         /* don't populate the channel_table if no clients are available */
 452         if (!dmaengine_ref_count)
 453                 return;
 454 
 455         /* redistribute available channels */
 456         for_each_dma_cap_mask(cap, dma_cap_mask_all)
 457                 for_each_online_cpu(cpu) {
 458                         chan = min_chan(cap, cpu);
 459                         per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
 460                 }
 461 }
 462 
 463 int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
 464 {
 465         struct dma_device *device;
 466 
 467         if (!chan || !caps)
 468                 return -EINVAL;
 469 
 470         device = chan->device;
 471 
 472         /* check if the channel supports slave transactions */
 473         if (!(test_bit(DMA_SLAVE, device->cap_mask.bits) ||
 474               test_bit(DMA_CYCLIC, device->cap_mask.bits)))
 475                 return -ENXIO;
 476 
 477         /*
 478          * Check whether it reports it uses the generic slave
 479          * capabilities, if not, that means it doesn't support any
 480          * kind of slave capabilities reporting.
 481          */
 482         if (!device->directions)
 483                 return -ENXIO;
 484 
 485         caps->src_addr_widths = device->src_addr_widths;
 486         caps->dst_addr_widths = device->dst_addr_widths;
 487         caps->directions = device->directions;
 488         caps->max_burst = device->max_burst;
 489         caps->residue_granularity = device->residue_granularity;
 490         caps->descriptor_reuse = device->descriptor_reuse;
 491         caps->cmd_pause = !!device->device_pause;
 492         caps->cmd_resume = !!device->device_resume;
 493         caps->cmd_terminate = !!device->device_terminate_all;
 494 
 495         return 0;
 496 }
 497 EXPORT_SYMBOL_GPL(dma_get_slave_caps);
 498 
 499 static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
 500                                           struct dma_device *dev,
 501                                           dma_filter_fn fn, void *fn_param)
 502 {
 503         struct dma_chan *chan;
 504 
 505         if (mask && !__dma_device_satisfies_mask(dev, mask)) {
 506                 dev_dbg(dev->dev, "%s: wrong capabilities\n", __func__);
 507                 return NULL;
 508         }
 509         /* devices with multiple channels need special handling as we need to
 510          * ensure that all channels are either private or public.
 511          */
 512         if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
 513                 list_for_each_entry(chan, &dev->channels, device_node) {
 514                         /* some channels are already publicly allocated */
 515                         if (chan->client_count)
 516                                 return NULL;
 517                 }
 518 
 519         list_for_each_entry(chan, &dev->channels, device_node) {
 520                 if (chan->client_count) {
 521                         dev_dbg(dev->dev, "%s: %s busy\n",
 522                                  __func__, dma_chan_name(chan));
 523                         continue;
 524                 }
 525                 if (fn && !fn(chan, fn_param)) {
 526                         dev_dbg(dev->dev, "%s: %s filter said false\n",
 527                                  __func__, dma_chan_name(chan));
 528                         continue;
 529                 }
 530                 return chan;
 531         }
 532 
 533         return NULL;
 534 }
 535 
 536 static struct dma_chan *find_candidate(struct dma_device *device,
 537                                        const dma_cap_mask_t *mask,
 538                                        dma_filter_fn fn, void *fn_param)
 539 {
 540         struct dma_chan *chan = private_candidate(mask, device, fn, fn_param);
 541         int err;
 542 
 543         if (chan) {
 544                 /* Found a suitable channel, try to grab, prep, and return it.
 545                  * We first set DMA_PRIVATE to disable balance_ref_count as this
 546                  * channel will not be published in the general-purpose
 547                  * allocator
 548                  */
 549                 dma_cap_set(DMA_PRIVATE, device->cap_mask);
 550                 device->privatecnt++;
 551                 err = dma_chan_get(chan);
 552 
 553                 if (err) {
 554                         if (err == -ENODEV) {
 555                                 dev_dbg(device->dev, "%s: %s module removed\n",
 556                                         __func__, dma_chan_name(chan));
 557                                 list_del_rcu(&device->global_node);
 558                         } else
 559                                 dev_dbg(device->dev,
 560                                         "%s: failed to get %s: (%d)\n",
 561                                          __func__, dma_chan_name(chan), err);
 562 
 563                         if (--device->privatecnt == 0)
 564                                 dma_cap_clear(DMA_PRIVATE, device->cap_mask);
 565 
 566                         chan = ERR_PTR(err);
 567                 }
 568         }
 569 
 570         return chan ? chan : ERR_PTR(-EPROBE_DEFER);
 571 }
 572 
 573 /**
 574  * dma_get_slave_channel - try to get specific channel exclusively
 575  * @chan: target channel
 576  */
 577 struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
 578 {
 579         int err = -EBUSY;
 580 
 581         /* lock against __dma_request_channel */
 582         mutex_lock(&dma_list_mutex);
 583 
 584         if (chan->client_count == 0) {
 585                 struct dma_device *device = chan->device;
 586 
 587                 dma_cap_set(DMA_PRIVATE, device->cap_mask);
 588                 device->privatecnt++;
 589                 err = dma_chan_get(chan);
 590                 if (err) {
 591                         dev_dbg(chan->device->dev,
 592                                 "%s: failed to get %s: (%d)\n",
 593                                 __func__, dma_chan_name(chan), err);
 594                         chan = NULL;
 595                         if (--device->privatecnt == 0)
 596                                 dma_cap_clear(DMA_PRIVATE, device->cap_mask);
 597                 }
 598         } else
 599                 chan = NULL;
 600 
 601         mutex_unlock(&dma_list_mutex);
 602 
 603 
 604         return chan;
 605 }
 606 EXPORT_SYMBOL_GPL(dma_get_slave_channel);
 607 
 608 struct dma_chan *dma_get_any_slave_channel(struct dma_device *device)
 609 {
 610         dma_cap_mask_t mask;
 611         struct dma_chan *chan;
 612 
 613         dma_cap_zero(mask);
 614         dma_cap_set(DMA_SLAVE, mask);
 615 
 616         /* lock against __dma_request_channel */
 617         mutex_lock(&dma_list_mutex);
 618 
 619         chan = find_candidate(device, &mask, NULL, NULL);
 620 
 621         mutex_unlock(&dma_list_mutex);
 622 
 623         return IS_ERR(chan) ? NULL : chan;
 624 }
 625 EXPORT_SYMBOL_GPL(dma_get_any_slave_channel);
 626 
 627 /**
 628  * __dma_request_channel - try to allocate an exclusive channel
 629  * @mask: capabilities that the channel must satisfy
 630  * @fn: optional callback to disposition available channels
 631  * @fn_param: opaque parameter to pass to dma_filter_fn
 632  * @np: device node to look for DMA channels
 633  *
 634  * Returns pointer to appropriate DMA channel on success or NULL.
 635  */
 636 struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
 637                                        dma_filter_fn fn, void *fn_param,
 638                                        struct device_node *np)
 639 {
 640         struct dma_device *device, *_d;
 641         struct dma_chan *chan = NULL;
 642 
 643         /* Find a channel */
 644         mutex_lock(&dma_list_mutex);
 645         list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
 646                 /* Finds a DMA controller with matching device node */
 647                 if (np && device->dev->of_node && np != device->dev->of_node)
 648                         continue;
 649 
 650                 chan = find_candidate(device, mask, fn, fn_param);
 651                 if (!IS_ERR(chan))
 652                         break;
 653 
 654                 chan = NULL;
 655         }
 656         mutex_unlock(&dma_list_mutex);
 657 
 658         pr_debug("%s: %s (%s)\n",
 659                  __func__,
 660                  chan ? "success" : "fail",
 661                  chan ? dma_chan_name(chan) : NULL);
 662 
 663         return chan;
 664 }
 665 EXPORT_SYMBOL_GPL(__dma_request_channel);
 666 
 667 static const struct dma_slave_map *dma_filter_match(struct dma_device *device,
 668                                                     const char *name,
 669                                                     struct device *dev)
 670 {
 671         int i;
 672 
 673         if (!device->filter.mapcnt)
 674                 return NULL;
 675 
 676         for (i = 0; i < device->filter.mapcnt; i++) {
 677                 const struct dma_slave_map *map = &device->filter.map[i];
 678 
 679                 if (!strcmp(map->devname, dev_name(dev)) &&
 680                     !strcmp(map->slave, name))
 681                         return map;
 682         }
 683 
 684         return NULL;
 685 }
 686 
 687 /**
 688  * dma_request_chan - try to allocate an exclusive slave channel
 689  * @dev:        pointer to client device structure
 690  * @name:       slave channel name
 691  *
 692  * Returns pointer to appropriate DMA channel on success or an error pointer.
 693  */
 694 struct dma_chan *dma_request_chan(struct device *dev, const char *name)
 695 {
 696         struct dma_device *d, *_d;
 697         struct dma_chan *chan = NULL;
 698 
 699         /* If device-tree is present get slave info from here */
 700         if (dev->of_node)
 701                 chan = of_dma_request_slave_channel(dev->of_node, name);
 702 
 703         /* If device was enumerated by ACPI get slave info from here */
 704         if (has_acpi_companion(dev) && !chan)
 705                 chan = acpi_dma_request_slave_chan_by_name(dev, name);
 706 
 707         if (chan) {
 708                 /* Valid channel found or requester needs to be deferred */
 709                 if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
 710                         return chan;
 711         }
 712 
 713         /* Try to find the channel via the DMA filter map(s) */
 714         mutex_lock(&dma_list_mutex);
 715         list_for_each_entry_safe(d, _d, &dma_device_list, global_node) {
 716                 dma_cap_mask_t mask;
 717                 const struct dma_slave_map *map = dma_filter_match(d, name, dev);
 718 
 719                 if (!map)
 720                         continue;
 721 
 722                 dma_cap_zero(mask);
 723                 dma_cap_set(DMA_SLAVE, mask);
 724 
 725                 chan = find_candidate(d, &mask, d->filter.fn, map->param);
 726                 if (!IS_ERR(chan))
 727                         break;
 728         }
 729         mutex_unlock(&dma_list_mutex);
 730 
 731         return chan ? chan : ERR_PTR(-EPROBE_DEFER);
 732 }
 733 EXPORT_SYMBOL_GPL(dma_request_chan);
 734 
 735 /**
 736  * dma_request_slave_channel - try to allocate an exclusive slave channel
 737  * @dev:        pointer to client device structure
 738  * @name:       slave channel name
 739  *
 740  * Returns pointer to appropriate DMA channel on success or NULL.
 741  */
 742 struct dma_chan *dma_request_slave_channel(struct device *dev,
 743                                            const char *name)
 744 {
 745         struct dma_chan *ch = dma_request_chan(dev, name);
 746         if (IS_ERR(ch))
 747                 return NULL;
 748 
 749         return ch;
 750 }
 751 EXPORT_SYMBOL_GPL(dma_request_slave_channel);
 752 
 753 /**
 754  * dma_request_chan_by_mask - allocate a channel satisfying certain capabilities
 755  * @mask: capabilities that the channel must satisfy
 756  *
 757  * Returns pointer to appropriate DMA channel on success or an error pointer.
 758  */
 759 struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask)
 760 {
 761         struct dma_chan *chan;
 762 
 763         if (!mask)
 764                 return ERR_PTR(-ENODEV);
 765 
 766         chan = __dma_request_channel(mask, NULL, NULL, NULL);
 767         if (!chan) {
 768                 mutex_lock(&dma_list_mutex);
 769                 if (list_empty(&dma_device_list))
 770                         chan = ERR_PTR(-EPROBE_DEFER);
 771                 else
 772                         chan = ERR_PTR(-ENODEV);
 773                 mutex_unlock(&dma_list_mutex);
 774         }
 775 
 776         return chan;
 777 }
 778 EXPORT_SYMBOL_GPL(dma_request_chan_by_mask);
 779 
 780 void dma_release_channel(struct dma_chan *chan)
 781 {
 782         mutex_lock(&dma_list_mutex);
 783         WARN_ONCE(chan->client_count != 1,
 784                   "chan reference count %d != 1\n", chan->client_count);
 785         dma_chan_put(chan);
 786         /* drop PRIVATE cap enabled by __dma_request_channel() */
 787         if (--chan->device->privatecnt == 0)
 788                 dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
 789         mutex_unlock(&dma_list_mutex);
 790 }
 791 EXPORT_SYMBOL_GPL(dma_release_channel);
 792 
 793 /**
 794  * dmaengine_get - register interest in dma_channels
 795  */
 796 void dmaengine_get(void)
 797 {
 798         struct dma_device *device, *_d;
 799         struct dma_chan *chan;
 800         int err;
 801 
 802         mutex_lock(&dma_list_mutex);
 803         dmaengine_ref_count++;
 804 
 805         /* try to grab channels */
 806         list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
 807                 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 808                         continue;
 809                 list_for_each_entry(chan, &device->channels, device_node) {
 810                         err = dma_chan_get(chan);
 811                         if (err == -ENODEV) {
 812                                 /* module removed before we could use it */
 813                                 list_del_rcu(&device->global_node);
 814                                 break;
 815                         } else if (err)
 816                                 dev_dbg(chan->device->dev,
 817                                         "%s: failed to get %s: (%d)\n",
 818                                         __func__, dma_chan_name(chan), err);
 819                 }
 820         }
 821 
 822         /* if this is the first reference and there were channels
 823          * waiting we need to rebalance to get those channels
 824          * incorporated into the channel table
 825          */
 826         if (dmaengine_ref_count == 1)
 827                 dma_channel_rebalance();
 828         mutex_unlock(&dma_list_mutex);
 829 }
 830 EXPORT_SYMBOL(dmaengine_get);
 831 
 832 /**
 833  * dmaengine_put - let dma drivers be removed when ref_count == 0
 834  */
 835 void dmaengine_put(void)
 836 {
 837         struct dma_device *device;
 838         struct dma_chan *chan;
 839 
 840         mutex_lock(&dma_list_mutex);
 841         dmaengine_ref_count--;
 842         BUG_ON(dmaengine_ref_count < 0);
 843         /* drop channel references */
 844         list_for_each_entry(device, &dma_device_list, global_node) {
 845                 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 846                         continue;
 847                 list_for_each_entry(chan, &device->channels, device_node)
 848                         dma_chan_put(chan);
 849         }
 850         mutex_unlock(&dma_list_mutex);
 851 }
 852 EXPORT_SYMBOL(dmaengine_put);
 853 
 854 static bool device_has_all_tx_types(struct dma_device *device)
 855 {
 856         /* A device that satisfies this test has channels that will never cause
 857          * an async_tx channel switch event as all possible operation types can
 858          * be handled.
 859          */
 860         #ifdef CONFIG_ASYNC_TX_DMA
 861         if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask))
 862                 return false;
 863         #endif
 864 
 865         #if IS_ENABLED(CONFIG_ASYNC_MEMCPY)
 866         if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
 867                 return false;
 868         #endif
 869 
 870         #if IS_ENABLED(CONFIG_ASYNC_XOR)
 871         if (!dma_has_cap(DMA_XOR, device->cap_mask))
 872                 return false;
 873 
 874         #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
 875         if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
 876                 return false;
 877         #endif
 878         #endif
 879 
 880         #if IS_ENABLED(CONFIG_ASYNC_PQ)
 881         if (!dma_has_cap(DMA_PQ, device->cap_mask))
 882                 return false;
 883 
 884         #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
 885         if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
 886                 return false;
 887         #endif
 888         #endif
 889 
 890         return true;
 891 }
 892 
 893 static int get_dma_id(struct dma_device *device)
 894 {
 895         int rc = ida_alloc(&dma_ida, GFP_KERNEL);
 896 
 897         if (rc < 0)
 898                 return rc;
 899         device->dev_id = rc;
 900         return 0;
 901 }
 902 
 903 /**
 904  * dma_async_device_register - registers DMA devices found
 905  * @device: &dma_device
 906  */
 907 int dma_async_device_register(struct dma_device *device)
 908 {
 909         int chancnt = 0, rc;
 910         struct dma_chan* chan;
 911         atomic_t *idr_ref;
 912 
 913         if (!device)
 914                 return -ENODEV;
 915 
 916         /* validate device routines */
 917         if (!device->dev) {
 918                 pr_err("DMAdevice must have dev\n");
 919                 return -EIO;
 920         }
 921 
 922         device->owner = device->dev->driver->owner;
 923 
 924         if (dma_has_cap(DMA_MEMCPY, device->cap_mask) && !device->device_prep_dma_memcpy) {
 925                 dev_err(device->dev,
 926                         "Device claims capability %s, but op is not defined\n",
 927                         "DMA_MEMCPY");
 928                 return -EIO;
 929         }
 930 
 931         if (dma_has_cap(DMA_XOR, device->cap_mask) && !device->device_prep_dma_xor) {
 932                 dev_err(device->dev,
 933                         "Device claims capability %s, but op is not defined\n",
 934                         "DMA_XOR");
 935                 return -EIO;
 936         }
 937 
 938         if (dma_has_cap(DMA_XOR_VAL, device->cap_mask) && !device->device_prep_dma_xor_val) {
 939                 dev_err(device->dev,
 940                         "Device claims capability %s, but op is not defined\n",
 941                         "DMA_XOR_VAL");
 942                 return -EIO;
 943         }
 944 
 945         if (dma_has_cap(DMA_PQ, device->cap_mask) && !device->device_prep_dma_pq) {
 946                 dev_err(device->dev,
 947                         "Device claims capability %s, but op is not defined\n",
 948                         "DMA_PQ");
 949                 return -EIO;
 950         }
 951 
 952         if (dma_has_cap(DMA_PQ_VAL, device->cap_mask) && !device->device_prep_dma_pq_val) {
 953                 dev_err(device->dev,
 954                         "Device claims capability %s, but op is not defined\n",
 955                         "DMA_PQ_VAL");
 956                 return -EIO;
 957         }
 958 
 959         if (dma_has_cap(DMA_MEMSET, device->cap_mask) && !device->device_prep_dma_memset) {
 960                 dev_err(device->dev,
 961                         "Device claims capability %s, but op is not defined\n",
 962                         "DMA_MEMSET");
 963                 return -EIO;
 964         }
 965 
 966         if (dma_has_cap(DMA_INTERRUPT, device->cap_mask) && !device->device_prep_dma_interrupt) {
 967                 dev_err(device->dev,
 968                         "Device claims capability %s, but op is not defined\n",
 969                         "DMA_INTERRUPT");
 970                 return -EIO;
 971         }
 972 
 973         if (dma_has_cap(DMA_CYCLIC, device->cap_mask) && !device->device_prep_dma_cyclic) {
 974                 dev_err(device->dev,
 975                         "Device claims capability %s, but op is not defined\n",
 976                         "DMA_CYCLIC");
 977                 return -EIO;
 978         }
 979 
 980         if (dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && !device->device_prep_interleaved_dma) {
 981                 dev_err(device->dev,
 982                         "Device claims capability %s, but op is not defined\n",
 983                         "DMA_INTERLEAVE");
 984                 return -EIO;
 985         }
 986 
 987 
 988         if (!device->device_tx_status) {
 989                 dev_err(device->dev, "Device tx_status is not defined\n");
 990                 return -EIO;
 991         }
 992 
 993 
 994         if (!device->device_issue_pending) {
 995                 dev_err(device->dev, "Device issue_pending is not defined\n");
 996                 return -EIO;
 997         }
 998 
 999         /* note: this only matters in the
1000          * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
1001          */
1002         if (device_has_all_tx_types(device))
1003                 dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
1004 
1005         idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
1006         if (!idr_ref)
1007                 return -ENOMEM;
1008         rc = get_dma_id(device);
1009         if (rc != 0) {
1010                 kfree(idr_ref);
1011                 return rc;
1012         }
1013 
1014         atomic_set(idr_ref, 0);
1015 
1016         /* represent channels in sysfs. Probably want devs too */
1017         list_for_each_entry(chan, &device->channels, device_node) {
1018                 rc = -ENOMEM;
1019                 chan->local = alloc_percpu(typeof(*chan->local));
1020                 if (chan->local == NULL)
1021                         goto err_out;
1022                 chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
1023                 if (chan->dev == NULL) {
1024                         free_percpu(chan->local);
1025                         chan->local = NULL;
1026                         goto err_out;
1027                 }
1028 
1029                 chan->chan_id = chancnt++;
1030                 chan->dev->device.class = &dma_devclass;
1031                 chan->dev->device.parent = device->dev;
1032                 chan->dev->chan = chan;
1033                 chan->dev->idr_ref = idr_ref;
1034                 chan->dev->dev_id = device->dev_id;
1035                 atomic_inc(idr_ref);
1036                 dev_set_name(&chan->dev->device, "dma%dchan%d",
1037                              device->dev_id, chan->chan_id);
1038 
1039                 rc = device_register(&chan->dev->device);
1040                 if (rc) {
1041                         free_percpu(chan->local);
1042                         chan->local = NULL;
1043                         kfree(chan->dev);
1044                         atomic_dec(idr_ref);
1045                         goto err_out;
1046                 }
1047                 chan->client_count = 0;
1048         }
1049 
1050         if (!chancnt) {
1051                 dev_err(device->dev, "%s: device has no channels!\n", __func__);
1052                 rc = -ENODEV;
1053                 goto err_out;
1054         }
1055 
1056         device->chancnt = chancnt;
1057 
1058         mutex_lock(&dma_list_mutex);
1059         /* take references on public channels */
1060         if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
1061                 list_for_each_entry(chan, &device->channels, device_node) {
1062                         /* if clients are already waiting for channels we need
1063                          * to take references on their behalf
1064                          */
1065                         if (dma_chan_get(chan) == -ENODEV) {
1066                                 /* note we can only get here for the first
1067                                  * channel as the remaining channels are
1068                                  * guaranteed to get a reference
1069                                  */
1070                                 rc = -ENODEV;
1071                                 mutex_unlock(&dma_list_mutex);
1072                                 goto err_out;
1073                         }
1074                 }
1075         list_add_tail_rcu(&device->global_node, &dma_device_list);
1076         if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
1077                 device->privatecnt++;   /* Always private */
1078         dma_channel_rebalance();
1079         mutex_unlock(&dma_list_mutex);
1080 
1081         return 0;
1082 
1083 err_out:
1084         /* if we never registered a channel just release the idr */
1085         if (atomic_read(idr_ref) == 0) {
1086                 ida_free(&dma_ida, device->dev_id);
1087                 kfree(idr_ref);
1088                 return rc;
1089         }
1090 
1091         list_for_each_entry(chan, &device->channels, device_node) {
1092                 if (chan->local == NULL)
1093                         continue;
1094                 mutex_lock(&dma_list_mutex);
1095                 chan->dev->chan = NULL;
1096                 mutex_unlock(&dma_list_mutex);
1097                 device_unregister(&chan->dev->device);
1098                 free_percpu(chan->local);
1099         }
1100         return rc;
1101 }
1102 EXPORT_SYMBOL(dma_async_device_register);
1103 
1104 /**
1105  * dma_async_device_unregister - unregister a DMA device
1106  * @device: &dma_device
1107  *
1108  * This routine is called by dma driver exit routines, dmaengine holds module
1109  * references to prevent it being called while channels are in use.
1110  */
1111 void dma_async_device_unregister(struct dma_device *device)
1112 {
1113         struct dma_chan *chan;
1114 
1115         mutex_lock(&dma_list_mutex);
1116         list_del_rcu(&device->global_node);
1117         dma_channel_rebalance();
1118         mutex_unlock(&dma_list_mutex);
1119 
1120         list_for_each_entry(chan, &device->channels, device_node) {
1121                 WARN_ONCE(chan->client_count,
1122                           "%s called while %d clients hold a reference\n",
1123                           __func__, chan->client_count);
1124                 mutex_lock(&dma_list_mutex);
1125                 chan->dev->chan = NULL;
1126                 mutex_unlock(&dma_list_mutex);
1127                 device_unregister(&chan->dev->device);
1128                 free_percpu(chan->local);
1129         }
1130 }
1131 EXPORT_SYMBOL(dma_async_device_unregister);
1132 
1133 static void dmam_device_release(struct device *dev, void *res)
1134 {
1135         struct dma_device *device;
1136 
1137         device = *(struct dma_device **)res;
1138         dma_async_device_unregister(device);
1139 }
1140 
1141 /**
1142  * dmaenginem_async_device_register - registers DMA devices found
1143  * @device: &dma_device
1144  *
1145  * The operation is managed and will be undone on driver detach.
1146  */
1147 int dmaenginem_async_device_register(struct dma_device *device)
1148 {
1149         void *p;
1150         int ret;
1151 
1152         p = devres_alloc(dmam_device_release, sizeof(void *), GFP_KERNEL);
1153         if (!p)
1154                 return -ENOMEM;
1155 
1156         ret = dma_async_device_register(device);
1157         if (!ret) {
1158                 *(struct dma_device **)p = device;
1159                 devres_add(device->dev, p);
1160         } else {
1161                 devres_free(p);
1162         }
1163 
1164         return ret;
1165 }
1166 EXPORT_SYMBOL(dmaenginem_async_device_register);
1167 
1168 struct dmaengine_unmap_pool {
1169         struct kmem_cache *cache;
1170         const char *name;
1171         mempool_t *pool;
1172         size_t size;
1173 };
1174 
1175 #define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
1176 static struct dmaengine_unmap_pool unmap_pool[] = {
1177         __UNMAP_POOL(2),
1178         #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
1179         __UNMAP_POOL(16),
1180         __UNMAP_POOL(128),
1181         __UNMAP_POOL(256),
1182         #endif
1183 };
1184 
1185 static struct dmaengine_unmap_pool *__get_unmap_pool(int nr)
1186 {
1187         int order = get_count_order(nr);
1188 
1189         switch (order) {
1190         case 0 ... 1:
1191                 return &unmap_pool[0];
1192 #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
1193         case 2 ... 4:
1194                 return &unmap_pool[1];
1195         case 5 ... 7:
1196                 return &unmap_pool[2];
1197         case 8:
1198                 return &unmap_pool[3];
1199 #endif
1200         default:
1201                 BUG();
1202                 return NULL;
1203         }
1204 }
1205 
1206 static void dmaengine_unmap(struct kref *kref)
1207 {
1208         struct dmaengine_unmap_data *unmap = container_of(kref, typeof(*unmap), kref);
1209         struct device *dev = unmap->dev;
1210         int cnt, i;
1211 
1212         cnt = unmap->to_cnt;
1213         for (i = 0; i < cnt; i++)
1214                 dma_unmap_page(dev, unmap->addr[i], unmap->len,
1215                                DMA_TO_DEVICE);
1216         cnt += unmap->from_cnt;
1217         for (; i < cnt; i++)
1218                 dma_unmap_page(dev, unmap->addr[i], unmap->len,
1219                                DMA_FROM_DEVICE);
1220         cnt += unmap->bidi_cnt;
1221         for (; i < cnt; i++) {
1222                 if (unmap->addr[i] == 0)
1223                         continue;
1224                 dma_unmap_page(dev, unmap->addr[i], unmap->len,
1225                                DMA_BIDIRECTIONAL);
1226         }
1227         cnt = unmap->map_cnt;
1228         mempool_free(unmap, __get_unmap_pool(cnt)->pool);
1229 }
1230 
1231 void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
1232 {
1233         if (unmap)
1234                 kref_put(&unmap->kref, dmaengine_unmap);
1235 }
1236 EXPORT_SYMBOL_GPL(dmaengine_unmap_put);
1237 
1238 static void dmaengine_destroy_unmap_pool(void)
1239 {
1240         int i;
1241 
1242         for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
1243                 struct dmaengine_unmap_pool *p = &unmap_pool[i];
1244 
1245                 mempool_destroy(p->pool);
1246                 p->pool = NULL;
1247                 kmem_cache_destroy(p->cache);
1248                 p->cache = NULL;
1249         }
1250 }
1251 
1252 static int __init dmaengine_init_unmap_pool(void)
1253 {
1254         int i;
1255 
1256         for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
1257                 struct dmaengine_unmap_pool *p = &unmap_pool[i];
1258                 size_t size;
1259 
1260                 size = sizeof(struct dmaengine_unmap_data) +
1261                        sizeof(dma_addr_t) * p->size;
1262 
1263                 p->cache = kmem_cache_create(p->name, size, 0,
1264                                              SLAB_HWCACHE_ALIGN, NULL);
1265                 if (!p->cache)
1266                         break;
1267                 p->pool = mempool_create_slab_pool(1, p->cache);
1268                 if (!p->pool)
1269                         break;
1270         }
1271 
1272         if (i == ARRAY_SIZE(unmap_pool))
1273                 return 0;
1274 
1275         dmaengine_destroy_unmap_pool();
1276         return -ENOMEM;
1277 }
1278 
1279 struct dmaengine_unmap_data *
1280 dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
1281 {
1282         struct dmaengine_unmap_data *unmap;
1283 
1284         unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags);
1285         if (!unmap)
1286                 return NULL;
1287 
1288         memset(unmap, 0, sizeof(*unmap));
1289         kref_init(&unmap->kref);
1290         unmap->dev = dev;
1291         unmap->map_cnt = nr;
1292 
1293         return unmap;
1294 }
1295 EXPORT_SYMBOL(dmaengine_get_unmap_data);
1296 
1297 void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
1298         struct dma_chan *chan)
1299 {
1300         tx->chan = chan;
1301         #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
1302         spin_lock_init(&tx->lock);
1303         #endif
1304 }
1305 EXPORT_SYMBOL(dma_async_tx_descriptor_init);
1306 
1307 /* dma_wait_for_async_tx - spin wait for a transaction to complete
1308  * @tx: in-flight transaction to wait on
1309  */
1310 enum dma_status
1311 dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1312 {
1313         unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
1314 
1315         if (!tx)
1316                 return DMA_COMPLETE;
1317 
1318         while (tx->cookie == -EBUSY) {
1319                 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
1320                         dev_err(tx->chan->device->dev,
1321                                 "%s timeout waiting for descriptor submission\n",
1322                                 __func__);
1323                         return DMA_ERROR;
1324                 }
1325                 cpu_relax();
1326         }
1327         return dma_sync_wait(tx->chan, tx->cookie);
1328 }
1329 EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
1330 
1331 /* dma_run_dependencies - helper routine for dma drivers to process
1332  *      (start) dependent operations on their target channel
1333  * @tx: transaction with dependencies
1334  */
1335 void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
1336 {
1337         struct dma_async_tx_descriptor *dep = txd_next(tx);
1338         struct dma_async_tx_descriptor *dep_next;
1339         struct dma_chan *chan;
1340 
1341         if (!dep)
1342                 return;
1343 
1344         /* we'll submit tx->next now, so clear the link */
1345         txd_clear_next(tx);
1346         chan = dep->chan;
1347 
1348         /* keep submitting up until a channel switch is detected
1349          * in that case we will be called again as a result of
1350          * processing the interrupt from async_tx_channel_switch
1351          */
1352         for (; dep; dep = dep_next) {
1353                 txd_lock(dep);
1354                 txd_clear_parent(dep);
1355                 dep_next = txd_next(dep);
1356                 if (dep_next && dep_next->chan == chan)
1357                         txd_clear_next(dep); /* ->next will be submitted */
1358                 else
1359                         dep_next = NULL; /* submit current dep and terminate */
1360                 txd_unlock(dep);
1361 
1362                 dep->tx_submit(dep);
1363         }
1364 
1365         chan->device->device_issue_pending(chan);
1366 }
1367 EXPORT_SYMBOL_GPL(dma_run_dependencies);
1368 
1369 static int __init dma_bus_init(void)
1370 {
1371         int err = dmaengine_init_unmap_pool();
1372 
1373         if (err)
1374                 return err;
1375         return class_register(&dma_devclass);
1376 }
1377 arch_initcall(dma_bus_init);
1378 
1379 

/* [<][>][^][v][top][bottom][index][help] */