root/drivers/dma/sun4i-dma.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. to_sun4i_dma_dev
  2. to_sun4i_dma_vchan
  3. to_sun4i_dma_contract
  4. chan2dev
  5. convert_burst
  6. convert_buswidth
  7. sun4i_dma_free_chan_resources
  8. find_and_use_pchan
  9. release_pchan
  10. configure_pchan
  11. set_pchan_interrupt
  12. __execute_vchan_pending
  13. sanitize_config
  14. generate_ndma_promise
  15. generate_ddma_promise
  16. generate_dma_contract
  17. get_next_cyclic_promise
  18. sun4i_dma_free_contract
  19. sun4i_dma_prep_dma_memcpy
  20. sun4i_dma_prep_dma_cyclic
  21. sun4i_dma_prep_slave_sg
  22. sun4i_dma_terminate_all
  23. sun4i_dma_config
  24. sun4i_dma_of_xlate
  25. sun4i_dma_tx_status
  26. sun4i_dma_issue_pending
  27. sun4i_dma_interrupt
  28. sun4i_dma_probe
  29. sun4i_dma_remove

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /*
   3  * Copyright (C) 2014 Emilio López
   4  * Emilio López <emilio@elopez.com.ar>
   5  */
   6 
   7 #include <linux/bitmap.h>
   8 #include <linux/bitops.h>
   9 #include <linux/clk.h>
  10 #include <linux/dmaengine.h>
  11 #include <linux/dmapool.h>
  12 #include <linux/interrupt.h>
  13 #include <linux/module.h>
  14 #include <linux/of_dma.h>
  15 #include <linux/platform_device.h>
  16 #include <linux/slab.h>
  17 #include <linux/spinlock.h>
  18 
  19 #include "virt-dma.h"
  20 
  21 /** Common macros to normal and dedicated DMA registers **/
  22 
  23 #define SUN4I_DMA_CFG_LOADING                   BIT(31)
  24 #define SUN4I_DMA_CFG_DST_DATA_WIDTH(width)     ((width) << 25)
  25 #define SUN4I_DMA_CFG_DST_BURST_LENGTH(len)     ((len) << 23)
  26 #define SUN4I_DMA_CFG_DST_ADDR_MODE(mode)       ((mode) << 21)
  27 #define SUN4I_DMA_CFG_DST_DRQ_TYPE(type)        ((type) << 16)
  28 #define SUN4I_DMA_CFG_SRC_DATA_WIDTH(width)     ((width) << 9)
  29 #define SUN4I_DMA_CFG_SRC_BURST_LENGTH(len)     ((len) << 7)
  30 #define SUN4I_DMA_CFG_SRC_ADDR_MODE(mode)       ((mode) << 5)
  31 #define SUN4I_DMA_CFG_SRC_DRQ_TYPE(type)        (type)
  32 
  33 /** Normal DMA register values **/
  34 
  35 /* Normal DMA source/destination data request type values */
  36 #define SUN4I_NDMA_DRQ_TYPE_SDRAM               0x16
  37 #define SUN4I_NDMA_DRQ_TYPE_LIMIT               (0x1F + 1)
  38 
  39 /** Normal DMA register layout **/
  40 
  41 /* Dedicated DMA source/destination address mode values */
  42 #define SUN4I_NDMA_ADDR_MODE_LINEAR             0
  43 #define SUN4I_NDMA_ADDR_MODE_IO                 1
  44 
  45 /* Normal DMA configuration register layout */
  46 #define SUN4I_NDMA_CFG_CONT_MODE                BIT(30)
  47 #define SUN4I_NDMA_CFG_WAIT_STATE(n)            ((n) << 27)
  48 #define SUN4I_NDMA_CFG_DST_NON_SECURE           BIT(22)
  49 #define SUN4I_NDMA_CFG_BYTE_COUNT_MODE_REMAIN   BIT(15)
  50 #define SUN4I_NDMA_CFG_SRC_NON_SECURE           BIT(6)
  51 
  52 /** Dedicated DMA register values **/
  53 
  54 /* Dedicated DMA source/destination address mode values */
  55 #define SUN4I_DDMA_ADDR_MODE_LINEAR             0
  56 #define SUN4I_DDMA_ADDR_MODE_IO                 1
  57 #define SUN4I_DDMA_ADDR_MODE_HORIZONTAL_PAGE    2
  58 #define SUN4I_DDMA_ADDR_MODE_VERTICAL_PAGE      3
  59 
  60 /* Dedicated DMA source/destination data request type values */
  61 #define SUN4I_DDMA_DRQ_TYPE_SDRAM               0x1
  62 #define SUN4I_DDMA_DRQ_TYPE_LIMIT               (0x1F + 1)
  63 
  64 /** Dedicated DMA register layout **/
  65 
  66 /* Dedicated DMA configuration register layout */
  67 #define SUN4I_DDMA_CFG_BUSY                     BIT(30)
  68 #define SUN4I_DDMA_CFG_CONT_MODE                BIT(29)
  69 #define SUN4I_DDMA_CFG_DST_NON_SECURE           BIT(28)
  70 #define SUN4I_DDMA_CFG_BYTE_COUNT_MODE_REMAIN   BIT(15)
  71 #define SUN4I_DDMA_CFG_SRC_NON_SECURE           BIT(12)
  72 
  73 /* Dedicated DMA parameter register layout */
  74 #define SUN4I_DDMA_PARA_DST_DATA_BLK_SIZE(n)    (((n) - 1) << 24)
  75 #define SUN4I_DDMA_PARA_DST_WAIT_CYCLES(n)      (((n) - 1) << 16)
  76 #define SUN4I_DDMA_PARA_SRC_DATA_BLK_SIZE(n)    (((n) - 1) << 8)
  77 #define SUN4I_DDMA_PARA_SRC_WAIT_CYCLES(n)      (((n) - 1) << 0)
  78 
  79 /** DMA register offsets **/
  80 
  81 /* General register offsets */
  82 #define SUN4I_DMA_IRQ_ENABLE_REG                0x0
  83 #define SUN4I_DMA_IRQ_PENDING_STATUS_REG        0x4
  84 
  85 /* Normal DMA register offsets */
  86 #define SUN4I_NDMA_CHANNEL_REG_BASE(n)          (0x100 + (n) * 0x20)
  87 #define SUN4I_NDMA_CFG_REG                      0x0
  88 #define SUN4I_NDMA_SRC_ADDR_REG                 0x4
  89 #define SUN4I_NDMA_DST_ADDR_REG         0x8
  90 #define SUN4I_NDMA_BYTE_COUNT_REG               0xC
  91 
  92 /* Dedicated DMA register offsets */
  93 #define SUN4I_DDMA_CHANNEL_REG_BASE(n)          (0x300 + (n) * 0x20)
  94 #define SUN4I_DDMA_CFG_REG                      0x0
  95 #define SUN4I_DDMA_SRC_ADDR_REG                 0x4
  96 #define SUN4I_DDMA_DST_ADDR_REG         0x8
  97 #define SUN4I_DDMA_BYTE_COUNT_REG               0xC
  98 #define SUN4I_DDMA_PARA_REG                     0x18
  99 
 100 /** DMA Driver **/
 101 
 102 /*
 103  * Normal DMA has 8 channels, and Dedicated DMA has another 8, so
 104  * that's 16 channels. As for endpoints, there's 29 and 21
 105  * respectively. Given that the Normal DMA endpoints (other than
 106  * SDRAM) can be used as tx/rx, we need 78 vchans in total
 107  */
 108 #define SUN4I_NDMA_NR_MAX_CHANNELS      8
 109 #define SUN4I_DDMA_NR_MAX_CHANNELS      8
 110 #define SUN4I_DMA_NR_MAX_CHANNELS                                       \
 111         (SUN4I_NDMA_NR_MAX_CHANNELS + SUN4I_DDMA_NR_MAX_CHANNELS)
 112 #define SUN4I_NDMA_NR_MAX_VCHANS        (29 * 2 - 1)
 113 #define SUN4I_DDMA_NR_MAX_VCHANS        21
 114 #define SUN4I_DMA_NR_MAX_VCHANS                                         \
 115         (SUN4I_NDMA_NR_MAX_VCHANS + SUN4I_DDMA_NR_MAX_VCHANS)
 116 
 117 /* This set of SUN4I_DDMA timing parameters were found experimentally while
 118  * working with the SPI driver and seem to make it behave correctly */
 119 #define SUN4I_DDMA_MAGIC_SPI_PARAMETERS \
 120         (SUN4I_DDMA_PARA_DST_DATA_BLK_SIZE(1) |                 \
 121          SUN4I_DDMA_PARA_SRC_DATA_BLK_SIZE(1) |                         \
 122          SUN4I_DDMA_PARA_DST_WAIT_CYCLES(2) |                           \
 123          SUN4I_DDMA_PARA_SRC_WAIT_CYCLES(2))
 124 
 125 struct sun4i_dma_pchan {
 126         /* Register base of channel */
 127         void __iomem                    *base;
 128         /* vchan currently being serviced */
 129         struct sun4i_dma_vchan          *vchan;
 130         /* Is this a dedicated pchan? */
 131         int                             is_dedicated;
 132 };
 133 
 134 struct sun4i_dma_vchan {
 135         struct virt_dma_chan            vc;
 136         struct dma_slave_config         cfg;
 137         struct sun4i_dma_pchan          *pchan;
 138         struct sun4i_dma_promise        *processing;
 139         struct sun4i_dma_contract       *contract;
 140         u8                              endpoint;
 141         int                             is_dedicated;
 142 };
 143 
 144 struct sun4i_dma_promise {
 145         u32                             cfg;
 146         u32                             para;
 147         dma_addr_t                      src;
 148         dma_addr_t                      dst;
 149         size_t                          len;
 150         struct list_head                list;
 151 };
 152 
 153 /* A contract is a set of promises */
 154 struct sun4i_dma_contract {
 155         struct virt_dma_desc            vd;
 156         struct list_head                demands;
 157         struct list_head                completed_demands;
 158         int                             is_cyclic;
 159 };
 160 
 161 struct sun4i_dma_dev {
 162         DECLARE_BITMAP(pchans_used, SUN4I_DMA_NR_MAX_CHANNELS);
 163         struct dma_device               slave;
 164         struct sun4i_dma_pchan          *pchans;
 165         struct sun4i_dma_vchan          *vchans;
 166         void __iomem                    *base;
 167         struct clk                      *clk;
 168         int                             irq;
 169         spinlock_t                      lock;
 170 };
 171 
 172 static struct sun4i_dma_dev *to_sun4i_dma_dev(struct dma_device *dev)
 173 {
 174         return container_of(dev, struct sun4i_dma_dev, slave);
 175 }
 176 
 177 static struct sun4i_dma_vchan *to_sun4i_dma_vchan(struct dma_chan *chan)
 178 {
 179         return container_of(chan, struct sun4i_dma_vchan, vc.chan);
 180 }
 181 
 182 static struct sun4i_dma_contract *to_sun4i_dma_contract(struct virt_dma_desc *vd)
 183 {
 184         return container_of(vd, struct sun4i_dma_contract, vd);
 185 }
 186 
 187 static struct device *chan2dev(struct dma_chan *chan)
 188 {
 189         return &chan->dev->device;
 190 }
 191 
 192 static int convert_burst(u32 maxburst)
 193 {
 194         if (maxburst > 8)
 195                 return -EINVAL;
 196 
 197         /* 1 -> 0, 4 -> 1, 8 -> 2 */
 198         return (maxburst >> 2);
 199 }
 200 
 201 static int convert_buswidth(enum dma_slave_buswidth addr_width)
 202 {
 203         if (addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES)
 204                 return -EINVAL;
 205 
 206         /* 8 (1 byte) -> 0, 16 (2 bytes) -> 1, 32 (4 bytes) -> 2 */
 207         return (addr_width >> 1);
 208 }
 209 
 210 static void sun4i_dma_free_chan_resources(struct dma_chan *chan)
 211 {
 212         struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
 213 
 214         vchan_free_chan_resources(&vchan->vc);
 215 }
 216 
 217 static struct sun4i_dma_pchan *find_and_use_pchan(struct sun4i_dma_dev *priv,
 218                                                   struct sun4i_dma_vchan *vchan)
 219 {
 220         struct sun4i_dma_pchan *pchan = NULL, *pchans = priv->pchans;
 221         unsigned long flags;
 222         int i, max;
 223 
 224         /*
 225          * pchans 0-SUN4I_NDMA_NR_MAX_CHANNELS are normal, and
 226          * SUN4I_NDMA_NR_MAX_CHANNELS+ are dedicated ones
 227          */
 228         if (vchan->is_dedicated) {
 229                 i = SUN4I_NDMA_NR_MAX_CHANNELS;
 230                 max = SUN4I_DMA_NR_MAX_CHANNELS;
 231         } else {
 232                 i = 0;
 233                 max = SUN4I_NDMA_NR_MAX_CHANNELS;
 234         }
 235 
 236         spin_lock_irqsave(&priv->lock, flags);
 237         for_each_clear_bit_from(i, priv->pchans_used, max) {
 238                 pchan = &pchans[i];
 239                 pchan->vchan = vchan;
 240                 set_bit(i, priv->pchans_used);
 241                 break;
 242         }
 243         spin_unlock_irqrestore(&priv->lock, flags);
 244 
 245         return pchan;
 246 }
 247 
 248 static void release_pchan(struct sun4i_dma_dev *priv,
 249                           struct sun4i_dma_pchan *pchan)
 250 {
 251         unsigned long flags;
 252         int nr = pchan - priv->pchans;
 253 
 254         spin_lock_irqsave(&priv->lock, flags);
 255 
 256         pchan->vchan = NULL;
 257         clear_bit(nr, priv->pchans_used);
 258 
 259         spin_unlock_irqrestore(&priv->lock, flags);
 260 }
 261 
 262 static void configure_pchan(struct sun4i_dma_pchan *pchan,
 263                             struct sun4i_dma_promise *d)
 264 {
 265         /*
 266          * Configure addresses and misc parameters depending on type
 267          * SUN4I_DDMA has an extra field with timing parameters
 268          */
 269         if (pchan->is_dedicated) {
 270                 writel_relaxed(d->src, pchan->base + SUN4I_DDMA_SRC_ADDR_REG);
 271                 writel_relaxed(d->dst, pchan->base + SUN4I_DDMA_DST_ADDR_REG);
 272                 writel_relaxed(d->len, pchan->base + SUN4I_DDMA_BYTE_COUNT_REG);
 273                 writel_relaxed(d->para, pchan->base + SUN4I_DDMA_PARA_REG);
 274                 writel_relaxed(d->cfg, pchan->base + SUN4I_DDMA_CFG_REG);
 275         } else {
 276                 writel_relaxed(d->src, pchan->base + SUN4I_NDMA_SRC_ADDR_REG);
 277                 writel_relaxed(d->dst, pchan->base + SUN4I_NDMA_DST_ADDR_REG);
 278                 writel_relaxed(d->len, pchan->base + SUN4I_NDMA_BYTE_COUNT_REG);
 279                 writel_relaxed(d->cfg, pchan->base + SUN4I_NDMA_CFG_REG);
 280         }
 281 }
 282 
 283 static void set_pchan_interrupt(struct sun4i_dma_dev *priv,
 284                                 struct sun4i_dma_pchan *pchan,
 285                                 int half, int end)
 286 {
 287         u32 reg;
 288         int pchan_number = pchan - priv->pchans;
 289         unsigned long flags;
 290 
 291         spin_lock_irqsave(&priv->lock, flags);
 292 
 293         reg = readl_relaxed(priv->base + SUN4I_DMA_IRQ_ENABLE_REG);
 294 
 295         if (half)
 296                 reg |= BIT(pchan_number * 2);
 297         else
 298                 reg &= ~BIT(pchan_number * 2);
 299 
 300         if (end)
 301                 reg |= BIT(pchan_number * 2 + 1);
 302         else
 303                 reg &= ~BIT(pchan_number * 2 + 1);
 304 
 305         writel_relaxed(reg, priv->base + SUN4I_DMA_IRQ_ENABLE_REG);
 306 
 307         spin_unlock_irqrestore(&priv->lock, flags);
 308 }
 309 
 310 /**
 311  * Execute pending operations on a vchan
 312  *
 313  * When given a vchan, this function will try to acquire a suitable
 314  * pchan and, if successful, will configure it to fulfill a promise
 315  * from the next pending contract.
 316  *
 317  * This function must be called with &vchan->vc.lock held.
 318  */
 319 static int __execute_vchan_pending(struct sun4i_dma_dev *priv,
 320                                    struct sun4i_dma_vchan *vchan)
 321 {
 322         struct sun4i_dma_promise *promise = NULL;
 323         struct sun4i_dma_contract *contract = NULL;
 324         struct sun4i_dma_pchan *pchan;
 325         struct virt_dma_desc *vd;
 326         int ret;
 327 
 328         lockdep_assert_held(&vchan->vc.lock);
 329 
 330         /* We need a pchan to do anything, so secure one if available */
 331         pchan = find_and_use_pchan(priv, vchan);
 332         if (!pchan)
 333                 return -EBUSY;
 334 
 335         /*
 336          * Channel endpoints must not be repeated, so if this vchan
 337          * has already submitted some work, we can't do anything else
 338          */
 339         if (vchan->processing) {
 340                 dev_dbg(chan2dev(&vchan->vc.chan),
 341                         "processing something to this endpoint already\n");
 342                 ret = -EBUSY;
 343                 goto release_pchan;
 344         }
 345 
 346         do {
 347                 /* Figure out which contract we're working with today */
 348                 vd = vchan_next_desc(&vchan->vc);
 349                 if (!vd) {
 350                         dev_dbg(chan2dev(&vchan->vc.chan),
 351                                 "No pending contract found");
 352                         ret = 0;
 353                         goto release_pchan;
 354                 }
 355 
 356                 contract = to_sun4i_dma_contract(vd);
 357                 if (list_empty(&contract->demands)) {
 358                         /* The contract has been completed so mark it as such */
 359                         list_del(&contract->vd.node);
 360                         vchan_cookie_complete(&contract->vd);
 361                         dev_dbg(chan2dev(&vchan->vc.chan),
 362                                 "Empty contract found and marked complete");
 363                 }
 364         } while (list_empty(&contract->demands));
 365 
 366         /* Now find out what we need to do */
 367         promise = list_first_entry(&contract->demands,
 368                                    struct sun4i_dma_promise, list);
 369         vchan->processing = promise;
 370 
 371         /* ... and make it reality */
 372         if (promise) {
 373                 vchan->contract = contract;
 374                 vchan->pchan = pchan;
 375                 set_pchan_interrupt(priv, pchan, contract->is_cyclic, 1);
 376                 configure_pchan(pchan, promise);
 377         }
 378 
 379         return 0;
 380 
 381 release_pchan:
 382         release_pchan(priv, pchan);
 383         return ret;
 384 }
 385 
 386 static int sanitize_config(struct dma_slave_config *sconfig,
 387                            enum dma_transfer_direction direction)
 388 {
 389         switch (direction) {
 390         case DMA_MEM_TO_DEV:
 391                 if ((sconfig->dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) ||
 392                     !sconfig->dst_maxburst)
 393                         return -EINVAL;
 394 
 395                 if (sconfig->src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
 396                         sconfig->src_addr_width = sconfig->dst_addr_width;
 397 
 398                 if (!sconfig->src_maxburst)
 399                         sconfig->src_maxburst = sconfig->dst_maxburst;
 400 
 401                 break;
 402 
 403         case DMA_DEV_TO_MEM:
 404                 if ((sconfig->src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) ||
 405                     !sconfig->src_maxburst)
 406                         return -EINVAL;
 407 
 408                 if (sconfig->dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
 409                         sconfig->dst_addr_width = sconfig->src_addr_width;
 410 
 411                 if (!sconfig->dst_maxburst)
 412                         sconfig->dst_maxburst = sconfig->src_maxburst;
 413 
 414                 break;
 415         default:
 416                 return 0;
 417         }
 418 
 419         return 0;
 420 }
 421 
 422 /**
 423  * Generate a promise, to be used in a normal DMA contract.
 424  *
 425  * A NDMA promise contains all the information required to program the
 426  * normal part of the DMA Engine and get data copied. A non-executed
 427  * promise will live in the demands list on a contract. Once it has been
 428  * completed, it will be moved to the completed demands list for later freeing.
 429  * All linked promises will be freed when the corresponding contract is freed
 430  */
 431 static struct sun4i_dma_promise *
 432 generate_ndma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
 433                       size_t len, struct dma_slave_config *sconfig,
 434                       enum dma_transfer_direction direction)
 435 {
 436         struct sun4i_dma_promise *promise;
 437         int ret;
 438 
 439         ret = sanitize_config(sconfig, direction);
 440         if (ret)
 441                 return NULL;
 442 
 443         promise = kzalloc(sizeof(*promise), GFP_NOWAIT);
 444         if (!promise)
 445                 return NULL;
 446 
 447         promise->src = src;
 448         promise->dst = dest;
 449         promise->len = len;
 450         promise->cfg = SUN4I_DMA_CFG_LOADING |
 451                 SUN4I_NDMA_CFG_BYTE_COUNT_MODE_REMAIN;
 452 
 453         dev_dbg(chan2dev(chan),
 454                 "src burst %d, dst burst %d, src buswidth %d, dst buswidth %d",
 455                 sconfig->src_maxburst, sconfig->dst_maxburst,
 456                 sconfig->src_addr_width, sconfig->dst_addr_width);
 457 
 458         /* Source burst */
 459         ret = convert_burst(sconfig->src_maxburst);
 460         if (ret < 0)
 461                 goto fail;
 462         promise->cfg |= SUN4I_DMA_CFG_SRC_BURST_LENGTH(ret);
 463 
 464         /* Destination burst */
 465         ret = convert_burst(sconfig->dst_maxburst);
 466         if (ret < 0)
 467                 goto fail;
 468         promise->cfg |= SUN4I_DMA_CFG_DST_BURST_LENGTH(ret);
 469 
 470         /* Source bus width */
 471         ret = convert_buswidth(sconfig->src_addr_width);
 472         if (ret < 0)
 473                 goto fail;
 474         promise->cfg |= SUN4I_DMA_CFG_SRC_DATA_WIDTH(ret);
 475 
 476         /* Destination bus width */
 477         ret = convert_buswidth(sconfig->dst_addr_width);
 478         if (ret < 0)
 479                 goto fail;
 480         promise->cfg |= SUN4I_DMA_CFG_DST_DATA_WIDTH(ret);
 481 
 482         return promise;
 483 
 484 fail:
 485         kfree(promise);
 486         return NULL;
 487 }
 488 
 489 /**
 490  * Generate a promise, to be used in a dedicated DMA contract.
 491  *
 492  * A DDMA promise contains all the information required to program the
 493  * Dedicated part of the DMA Engine and get data copied. A non-executed
 494  * promise will live in the demands list on a contract. Once it has been
 495  * completed, it will be moved to the completed demands list for later freeing.
 496  * All linked promises will be freed when the corresponding contract is freed
 497  */
 498 static struct sun4i_dma_promise *
 499 generate_ddma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
 500                       size_t len, struct dma_slave_config *sconfig)
 501 {
 502         struct sun4i_dma_promise *promise;
 503         int ret;
 504 
 505         promise = kzalloc(sizeof(*promise), GFP_NOWAIT);
 506         if (!promise)
 507                 return NULL;
 508 
 509         promise->src = src;
 510         promise->dst = dest;
 511         promise->len = len;
 512         promise->cfg = SUN4I_DMA_CFG_LOADING |
 513                 SUN4I_DDMA_CFG_BYTE_COUNT_MODE_REMAIN;
 514 
 515         /* Source burst */
 516         ret = convert_burst(sconfig->src_maxburst);
 517         if (ret < 0)
 518                 goto fail;
 519         promise->cfg |= SUN4I_DMA_CFG_SRC_BURST_LENGTH(ret);
 520 
 521         /* Destination burst */
 522         ret = convert_burst(sconfig->dst_maxburst);
 523         if (ret < 0)
 524                 goto fail;
 525         promise->cfg |= SUN4I_DMA_CFG_DST_BURST_LENGTH(ret);
 526 
 527         /* Source bus width */
 528         ret = convert_buswidth(sconfig->src_addr_width);
 529         if (ret < 0)
 530                 goto fail;
 531         promise->cfg |= SUN4I_DMA_CFG_SRC_DATA_WIDTH(ret);
 532 
 533         /* Destination bus width */
 534         ret = convert_buswidth(sconfig->dst_addr_width);
 535         if (ret < 0)
 536                 goto fail;
 537         promise->cfg |= SUN4I_DMA_CFG_DST_DATA_WIDTH(ret);
 538 
 539         return promise;
 540 
 541 fail:
 542         kfree(promise);
 543         return NULL;
 544 }
 545 
 546 /**
 547  * Generate a contract
 548  *
 549  * Contracts function as DMA descriptors. As our hardware does not support
 550  * linked lists, we need to implement SG via software. We use a contract
 551  * to hold all the pieces of the request and process them serially one
 552  * after another. Each piece is represented as a promise.
 553  */
 554 static struct sun4i_dma_contract *generate_dma_contract(void)
 555 {
 556         struct sun4i_dma_contract *contract;
 557 
 558         contract = kzalloc(sizeof(*contract), GFP_NOWAIT);
 559         if (!contract)
 560                 return NULL;
 561 
 562         INIT_LIST_HEAD(&contract->demands);
 563         INIT_LIST_HEAD(&contract->completed_demands);
 564 
 565         return contract;
 566 }
 567 
 568 /**
 569  * Get next promise on a cyclic transfer
 570  *
 571  * Cyclic contracts contain a series of promises which are executed on a
 572  * loop. This function returns the next promise from a cyclic contract,
 573  * so it can be programmed into the hardware.
 574  */
 575 static struct sun4i_dma_promise *
 576 get_next_cyclic_promise(struct sun4i_dma_contract *contract)
 577 {
 578         struct sun4i_dma_promise *promise;
 579 
 580         promise = list_first_entry_or_null(&contract->demands,
 581                                            struct sun4i_dma_promise, list);
 582         if (!promise) {
 583                 list_splice_init(&contract->completed_demands,
 584                                  &contract->demands);
 585                 promise = list_first_entry(&contract->demands,
 586                                            struct sun4i_dma_promise, list);
 587         }
 588 
 589         return promise;
 590 }
 591 
 592 /**
 593  * Free a contract and all its associated promises
 594  */
 595 static void sun4i_dma_free_contract(struct virt_dma_desc *vd)
 596 {
 597         struct sun4i_dma_contract *contract = to_sun4i_dma_contract(vd);
 598         struct sun4i_dma_promise *promise, *tmp;
 599 
 600         /* Free all the demands and completed demands */
 601         list_for_each_entry_safe(promise, tmp, &contract->demands, list)
 602                 kfree(promise);
 603 
 604         list_for_each_entry_safe(promise, tmp, &contract->completed_demands, list)
 605                 kfree(promise);
 606 
 607         kfree(contract);
 608 }
 609 
 610 static struct dma_async_tx_descriptor *
 611 sun4i_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
 612                           dma_addr_t src, size_t len, unsigned long flags)
 613 {
 614         struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
 615         struct dma_slave_config *sconfig = &vchan->cfg;
 616         struct sun4i_dma_promise *promise;
 617         struct sun4i_dma_contract *contract;
 618 
 619         contract = generate_dma_contract();
 620         if (!contract)
 621                 return NULL;
 622 
 623         /*
 624          * We can only do the copy to bus aligned addresses, so
 625          * choose the best one so we get decent performance. We also
 626          * maximize the burst size for this same reason.
 627          */
 628         sconfig->src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 629         sconfig->dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 630         sconfig->src_maxburst = 8;
 631         sconfig->dst_maxburst = 8;
 632 
 633         if (vchan->is_dedicated)
 634                 promise = generate_ddma_promise(chan, src, dest, len, sconfig);
 635         else
 636                 promise = generate_ndma_promise(chan, src, dest, len, sconfig,
 637                                                 DMA_MEM_TO_MEM);
 638 
 639         if (!promise) {
 640                 kfree(contract);
 641                 return NULL;
 642         }
 643 
 644         /* Configure memcpy mode */
 645         if (vchan->is_dedicated) {
 646                 promise->cfg |= SUN4I_DMA_CFG_SRC_DRQ_TYPE(SUN4I_DDMA_DRQ_TYPE_SDRAM) |
 647                                 SUN4I_DMA_CFG_DST_DRQ_TYPE(SUN4I_DDMA_DRQ_TYPE_SDRAM);
 648         } else {
 649                 promise->cfg |= SUN4I_DMA_CFG_SRC_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM) |
 650                                 SUN4I_DMA_CFG_DST_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM);
 651         }
 652 
 653         /* Fill the contract with our only promise */
 654         list_add_tail(&promise->list, &contract->demands);
 655 
 656         /* And add it to the vchan */
 657         return vchan_tx_prep(&vchan->vc, &contract->vd, flags);
 658 }
 659 
 660 static struct dma_async_tx_descriptor *
 661 sun4i_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf, size_t len,
 662                           size_t period_len, enum dma_transfer_direction dir,
 663                           unsigned long flags)
 664 {
 665         struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
 666         struct dma_slave_config *sconfig = &vchan->cfg;
 667         struct sun4i_dma_promise *promise;
 668         struct sun4i_dma_contract *contract;
 669         dma_addr_t src, dest;
 670         u32 endpoints;
 671         int nr_periods, offset, plength, i;
 672 
 673         if (!is_slave_direction(dir)) {
 674                 dev_err(chan2dev(chan), "Invalid DMA direction\n");
 675                 return NULL;
 676         }
 677 
 678         if (vchan->is_dedicated) {
 679                 /*
 680                  * As we are using this just for audio data, we need to use
 681                  * normal DMA. There is nothing stopping us from supporting
 682                  * dedicated DMA here as well, so if a client comes up and
 683                  * requires it, it will be simple to implement it.
 684                  */
 685                 dev_err(chan2dev(chan),
 686                         "Cyclic transfers are only supported on Normal DMA\n");
 687                 return NULL;
 688         }
 689 
 690         contract = generate_dma_contract();
 691         if (!contract)
 692                 return NULL;
 693 
 694         contract->is_cyclic = 1;
 695 
 696         /* Figure out the endpoints and the address we need */
 697         if (dir == DMA_MEM_TO_DEV) {
 698                 src = buf;
 699                 dest = sconfig->dst_addr;
 700                 endpoints = SUN4I_DMA_CFG_SRC_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM) |
 701                             SUN4I_DMA_CFG_DST_DRQ_TYPE(vchan->endpoint) |
 702                             SUN4I_DMA_CFG_DST_ADDR_MODE(SUN4I_NDMA_ADDR_MODE_IO);
 703         } else {
 704                 src = sconfig->src_addr;
 705                 dest = buf;
 706                 endpoints = SUN4I_DMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) |
 707                             SUN4I_DMA_CFG_SRC_ADDR_MODE(SUN4I_NDMA_ADDR_MODE_IO) |
 708                             SUN4I_DMA_CFG_DST_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM);
 709         }
 710 
 711         /*
 712          * We will be using half done interrupts to make two periods
 713          * out of a promise, so we need to program the DMA engine less
 714          * often
 715          */
 716 
 717         /*
 718          * The engine can interrupt on half-transfer, so we can use
 719          * this feature to program the engine half as often as if we
 720          * didn't use it (keep in mind the hardware doesn't support
 721          * linked lists).
 722          *
 723          * Say you have a set of periods (| marks the start/end, I for
 724          * interrupt, P for programming the engine to do a new
 725          * transfer), the easy but slow way would be to do
 726          *
 727          *  |---|---|---|---| (periods / promises)
 728          *  P  I,P I,P I,P  I
 729          *
 730          * Using half transfer interrupts you can do
 731          *
 732          *  |-------|-------| (promises as configured on hw)
 733          *  |---|---|---|---| (periods)
 734          *  P   I  I,P  I   I
 735          *
 736          * Which requires half the engine programming for the same
 737          * functionality.
 738          */
 739         nr_periods = DIV_ROUND_UP(len / period_len, 2);
 740         for (i = 0; i < nr_periods; i++) {
 741                 /* Calculate the offset in the buffer and the length needed */
 742                 offset = i * period_len * 2;
 743                 plength = min((len - offset), (period_len * 2));
 744                 if (dir == DMA_MEM_TO_DEV)
 745                         src = buf + offset;
 746                 else
 747                         dest = buf + offset;
 748 
 749                 /* Make the promise */
 750                 promise = generate_ndma_promise(chan, src, dest,
 751                                                 plength, sconfig, dir);
 752                 if (!promise) {
 753                         /* TODO: should we free everything? */
 754                         return NULL;
 755                 }
 756                 promise->cfg |= endpoints;
 757 
 758                 /* Then add it to the contract */
 759                 list_add_tail(&promise->list, &contract->demands);
 760         }
 761 
 762         /* And add it to the vchan */
 763         return vchan_tx_prep(&vchan->vc, &contract->vd, flags);
 764 }
 765 
 766 static struct dma_async_tx_descriptor *
 767 sun4i_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
 768                         unsigned int sg_len, enum dma_transfer_direction dir,
 769                         unsigned long flags, void *context)
 770 {
 771         struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
 772         struct dma_slave_config *sconfig = &vchan->cfg;
 773         struct sun4i_dma_promise *promise;
 774         struct sun4i_dma_contract *contract;
 775         u8 ram_type, io_mode, linear_mode;
 776         struct scatterlist *sg;
 777         dma_addr_t srcaddr, dstaddr;
 778         u32 endpoints, para;
 779         int i;
 780 
 781         if (!sgl)
 782                 return NULL;
 783 
 784         if (!is_slave_direction(dir)) {
 785                 dev_err(chan2dev(chan), "Invalid DMA direction\n");
 786                 return NULL;
 787         }
 788 
 789         contract = generate_dma_contract();
 790         if (!contract)
 791                 return NULL;
 792 
 793         if (vchan->is_dedicated) {
 794                 io_mode = SUN4I_DDMA_ADDR_MODE_IO;
 795                 linear_mode = SUN4I_DDMA_ADDR_MODE_LINEAR;
 796                 ram_type = SUN4I_DDMA_DRQ_TYPE_SDRAM;
 797         } else {
 798                 io_mode = SUN4I_NDMA_ADDR_MODE_IO;
 799                 linear_mode = SUN4I_NDMA_ADDR_MODE_LINEAR;
 800                 ram_type = SUN4I_NDMA_DRQ_TYPE_SDRAM;
 801         }
 802 
 803         if (dir == DMA_MEM_TO_DEV)
 804                 endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(vchan->endpoint) |
 805                             SUN4I_DMA_CFG_DST_ADDR_MODE(io_mode) |
 806                             SUN4I_DMA_CFG_SRC_DRQ_TYPE(ram_type) |
 807                             SUN4I_DMA_CFG_SRC_ADDR_MODE(linear_mode);
 808         else
 809                 endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(ram_type) |
 810                             SUN4I_DMA_CFG_DST_ADDR_MODE(linear_mode) |
 811                             SUN4I_DMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) |
 812                             SUN4I_DMA_CFG_SRC_ADDR_MODE(io_mode);
 813 
 814         for_each_sg(sgl, sg, sg_len, i) {
 815                 /* Figure out addresses */
 816                 if (dir == DMA_MEM_TO_DEV) {
 817                         srcaddr = sg_dma_address(sg);
 818                         dstaddr = sconfig->dst_addr;
 819                 } else {
 820                         srcaddr = sconfig->src_addr;
 821                         dstaddr = sg_dma_address(sg);
 822                 }
 823 
 824                 /*
 825                  * These are the magic DMA engine timings that keep SPI going.
 826                  * I haven't seen any interface on DMAEngine to configure
 827                  * timings, and so far they seem to work for everything we
 828                  * support, so I've kept them here. I don't know if other
 829                  * devices need different timings because, as usual, we only
 830                  * have the "para" bitfield meanings, but no comment on what
 831                  * the values should be when doing a certain operation :|
 832                  */
 833                 para = SUN4I_DDMA_MAGIC_SPI_PARAMETERS;
 834 
 835                 /* And make a suitable promise */
 836                 if (vchan->is_dedicated)
 837                         promise = generate_ddma_promise(chan, srcaddr, dstaddr,
 838                                                         sg_dma_len(sg),
 839                                                         sconfig);
 840                 else
 841                         promise = generate_ndma_promise(chan, srcaddr, dstaddr,
 842                                                         sg_dma_len(sg),
 843                                                         sconfig, dir);
 844 
 845                 if (!promise)
 846                         return NULL; /* TODO: should we free everything? */
 847 
 848                 promise->cfg |= endpoints;
 849                 promise->para = para;
 850 
 851                 /* Then add it to the contract */
 852                 list_add_tail(&promise->list, &contract->demands);
 853         }
 854 
 855         /*
 856          * Once we've got all the promises ready, add the contract
 857          * to the pending list on the vchan
 858          */
 859         return vchan_tx_prep(&vchan->vc, &contract->vd, flags);
 860 }
 861 
 862 static int sun4i_dma_terminate_all(struct dma_chan *chan)
 863 {
 864         struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device);
 865         struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
 866         struct sun4i_dma_pchan *pchan = vchan->pchan;
 867         LIST_HEAD(head);
 868         unsigned long flags;
 869 
 870         spin_lock_irqsave(&vchan->vc.lock, flags);
 871         vchan_get_all_descriptors(&vchan->vc, &head);
 872         spin_unlock_irqrestore(&vchan->vc.lock, flags);
 873 
 874         /*
 875          * Clearing the configuration register will halt the pchan. Interrupts
 876          * may still trigger, so don't forget to disable them.
 877          */
 878         if (pchan) {
 879                 if (pchan->is_dedicated)
 880                         writel(0, pchan->base + SUN4I_DDMA_CFG_REG);
 881                 else
 882                         writel(0, pchan->base + SUN4I_NDMA_CFG_REG);
 883                 set_pchan_interrupt(priv, pchan, 0, 0);
 884                 release_pchan(priv, pchan);
 885         }
 886 
 887         spin_lock_irqsave(&vchan->vc.lock, flags);
 888         vchan_dma_desc_free_list(&vchan->vc, &head);
 889         /* Clear these so the vchan is usable again */
 890         vchan->processing = NULL;
 891         vchan->pchan = NULL;
 892         spin_unlock_irqrestore(&vchan->vc.lock, flags);
 893 
 894         return 0;
 895 }
 896 
 897 static int sun4i_dma_config(struct dma_chan *chan,
 898                             struct dma_slave_config *config)
 899 {
 900         struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
 901 
 902         memcpy(&vchan->cfg, config, sizeof(*config));
 903 
 904         return 0;
 905 }
 906 
 907 static struct dma_chan *sun4i_dma_of_xlate(struct of_phandle_args *dma_spec,
 908                                            struct of_dma *ofdma)
 909 {
 910         struct sun4i_dma_dev *priv = ofdma->of_dma_data;
 911         struct sun4i_dma_vchan *vchan;
 912         struct dma_chan *chan;
 913         u8 is_dedicated = dma_spec->args[0];
 914         u8 endpoint = dma_spec->args[1];
 915 
 916         /* Check if type is Normal or Dedicated */
 917         if (is_dedicated != 0 && is_dedicated != 1)
 918                 return NULL;
 919 
 920         /* Make sure the endpoint looks sane */
 921         if ((is_dedicated && endpoint >= SUN4I_DDMA_DRQ_TYPE_LIMIT) ||
 922             (!is_dedicated && endpoint >= SUN4I_NDMA_DRQ_TYPE_LIMIT))
 923                 return NULL;
 924 
 925         chan = dma_get_any_slave_channel(&priv->slave);
 926         if (!chan)
 927                 return NULL;
 928 
 929         /* Assign the endpoint to the vchan */
 930         vchan = to_sun4i_dma_vchan(chan);
 931         vchan->is_dedicated = is_dedicated;
 932         vchan->endpoint = endpoint;
 933 
 934         return chan;
 935 }
 936 
 937 static enum dma_status sun4i_dma_tx_status(struct dma_chan *chan,
 938                                            dma_cookie_t cookie,
 939                                            struct dma_tx_state *state)
 940 {
 941         struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
 942         struct sun4i_dma_pchan *pchan = vchan->pchan;
 943         struct sun4i_dma_contract *contract;
 944         struct sun4i_dma_promise *promise;
 945         struct virt_dma_desc *vd;
 946         unsigned long flags;
 947         enum dma_status ret;
 948         size_t bytes = 0;
 949 
 950         ret = dma_cookie_status(chan, cookie, state);
 951         if (!state || (ret == DMA_COMPLETE))
 952                 return ret;
 953 
 954         spin_lock_irqsave(&vchan->vc.lock, flags);
 955         vd = vchan_find_desc(&vchan->vc, cookie);
 956         if (!vd)
 957                 goto exit;
 958         contract = to_sun4i_dma_contract(vd);
 959 
 960         list_for_each_entry(promise, &contract->demands, list)
 961                 bytes += promise->len;
 962 
 963         /*
 964          * The hardware is configured to return the remaining byte
 965          * quantity. If possible, replace the first listed element's
 966          * full size with the actual remaining amount
 967          */
 968         promise = list_first_entry_or_null(&contract->demands,
 969                                            struct sun4i_dma_promise, list);
 970         if (promise && pchan) {
 971                 bytes -= promise->len;
 972                 if (pchan->is_dedicated)
 973                         bytes += readl(pchan->base + SUN4I_DDMA_BYTE_COUNT_REG);
 974                 else
 975                         bytes += readl(pchan->base + SUN4I_NDMA_BYTE_COUNT_REG);
 976         }
 977 
 978 exit:
 979 
 980         dma_set_residue(state, bytes);
 981         spin_unlock_irqrestore(&vchan->vc.lock, flags);
 982 
 983         return ret;
 984 }
 985 
 986 static void sun4i_dma_issue_pending(struct dma_chan *chan)
 987 {
 988         struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device);
 989         struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
 990         unsigned long flags;
 991 
 992         spin_lock_irqsave(&vchan->vc.lock, flags);
 993 
 994         /*
 995          * If there are pending transactions for this vchan, push one of
 996          * them into the engine to get the ball rolling.
 997          */
 998         if (vchan_issue_pending(&vchan->vc))
 999                 __execute_vchan_pending(priv, vchan);
1000 
1001         spin_unlock_irqrestore(&vchan->vc.lock, flags);
1002 }
1003 
1004 static irqreturn_t sun4i_dma_interrupt(int irq, void *dev_id)
1005 {
1006         struct sun4i_dma_dev *priv = dev_id;
1007         struct sun4i_dma_pchan *pchans = priv->pchans, *pchan;
1008         struct sun4i_dma_vchan *vchan;
1009         struct sun4i_dma_contract *contract;
1010         struct sun4i_dma_promise *promise;
1011         unsigned long pendirq, irqs, disableirqs;
1012         int bit, i, free_room, allow_mitigation = 1;
1013 
1014         pendirq = readl_relaxed(priv->base + SUN4I_DMA_IRQ_PENDING_STATUS_REG);
1015 
1016 handle_pending:
1017 
1018         disableirqs = 0;
1019         free_room = 0;
1020 
1021         for_each_set_bit(bit, &pendirq, 32) {
1022                 pchan = &pchans[bit >> 1];
1023                 vchan = pchan->vchan;
1024                 if (!vchan) /* a terminated channel may still interrupt */
1025                         continue;
1026                 contract = vchan->contract;
1027 
1028                 /*
1029                  * Disable the IRQ and free the pchan if it's an end
1030                  * interrupt (odd bit)
1031                  */
1032                 if (bit & 1) {
1033                         spin_lock(&vchan->vc.lock);
1034 
1035                         /*
1036                          * Move the promise into the completed list now that
1037                          * we're done with it
1038                          */
1039                         list_del(&vchan->processing->list);
1040                         list_add_tail(&vchan->processing->list,
1041                                       &contract->completed_demands);
1042 
1043                         /*
1044                          * Cyclic DMA transfers are special:
1045                          * - There's always something we can dispatch
1046                          * - We need to run the callback
1047                          * - Latency is very important, as this is used by audio
1048                          * We therefore just cycle through the list and dispatch
1049                          * whatever we have here, reusing the pchan. There's
1050                          * no need to run the thread after this.
1051                          *
1052                          * For non-cyclic transfers we need to look around,
1053                          * so we can program some more work, or notify the
1054                          * client that their transfers have been completed.
1055                          */
1056                         if (contract->is_cyclic) {
1057                                 promise = get_next_cyclic_promise(contract);
1058                                 vchan->processing = promise;
1059                                 configure_pchan(pchan, promise);
1060                                 vchan_cyclic_callback(&contract->vd);
1061                         } else {
1062                                 vchan->processing = NULL;
1063                                 vchan->pchan = NULL;
1064 
1065                                 free_room = 1;
1066                                 disableirqs |= BIT(bit);
1067                                 release_pchan(priv, pchan);
1068                         }
1069 
1070                         spin_unlock(&vchan->vc.lock);
1071                 } else {
1072                         /* Half done interrupt */
1073                         if (contract->is_cyclic)
1074                                 vchan_cyclic_callback(&contract->vd);
1075                         else
1076                                 disableirqs |= BIT(bit);
1077                 }
1078         }
1079 
1080         /* Disable the IRQs for events we handled */
1081         spin_lock(&priv->lock);
1082         irqs = readl_relaxed(priv->base + SUN4I_DMA_IRQ_ENABLE_REG);
1083         writel_relaxed(irqs & ~disableirqs,
1084                        priv->base + SUN4I_DMA_IRQ_ENABLE_REG);
1085         spin_unlock(&priv->lock);
1086 
1087         /* Writing 1 to the pending field will clear the pending interrupt */
1088         writel_relaxed(pendirq, priv->base + SUN4I_DMA_IRQ_PENDING_STATUS_REG);
1089 
1090         /*
1091          * If a pchan was freed, we may be able to schedule something else,
1092          * so have a look around
1093          */
1094         if (free_room) {
1095                 for (i = 0; i < SUN4I_DMA_NR_MAX_VCHANS; i++) {
1096                         vchan = &priv->vchans[i];
1097                         spin_lock(&vchan->vc.lock);
1098                         __execute_vchan_pending(priv, vchan);
1099                         spin_unlock(&vchan->vc.lock);
1100                 }
1101         }
1102 
1103         /*
1104          * Handle newer interrupts if some showed up, but only do it once
1105          * to avoid a too long a loop
1106          */
1107         if (allow_mitigation) {
1108                 pendirq = readl_relaxed(priv->base +
1109                                         SUN4I_DMA_IRQ_PENDING_STATUS_REG);
1110                 if (pendirq) {
1111                         allow_mitigation = 0;
1112                         goto handle_pending;
1113                 }
1114         }
1115 
1116         return IRQ_HANDLED;
1117 }
1118 
1119 static int sun4i_dma_probe(struct platform_device *pdev)
1120 {
1121         struct sun4i_dma_dev *priv;
1122         struct resource *res;
1123         int i, j, ret;
1124 
1125         priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
1126         if (!priv)
1127                 return -ENOMEM;
1128 
1129         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1130         priv->base = devm_ioremap_resource(&pdev->dev, res);
1131         if (IS_ERR(priv->base))
1132                 return PTR_ERR(priv->base);
1133 
1134         priv->irq = platform_get_irq(pdev, 0);
1135         if (priv->irq < 0)
1136                 return priv->irq;
1137 
1138         priv->clk = devm_clk_get(&pdev->dev, NULL);
1139         if (IS_ERR(priv->clk)) {
1140                 dev_err(&pdev->dev, "No clock specified\n");
1141                 return PTR_ERR(priv->clk);
1142         }
1143 
1144         platform_set_drvdata(pdev, priv);
1145         spin_lock_init(&priv->lock);
1146 
1147         dma_cap_zero(priv->slave.cap_mask);
1148         dma_cap_set(DMA_PRIVATE, priv->slave.cap_mask);
1149         dma_cap_set(DMA_MEMCPY, priv->slave.cap_mask);
1150         dma_cap_set(DMA_CYCLIC, priv->slave.cap_mask);
1151         dma_cap_set(DMA_SLAVE, priv->slave.cap_mask);
1152 
1153         INIT_LIST_HEAD(&priv->slave.channels);
1154         priv->slave.device_free_chan_resources  = sun4i_dma_free_chan_resources;
1155         priv->slave.device_tx_status            = sun4i_dma_tx_status;
1156         priv->slave.device_issue_pending        = sun4i_dma_issue_pending;
1157         priv->slave.device_prep_slave_sg        = sun4i_dma_prep_slave_sg;
1158         priv->slave.device_prep_dma_memcpy      = sun4i_dma_prep_dma_memcpy;
1159         priv->slave.device_prep_dma_cyclic      = sun4i_dma_prep_dma_cyclic;
1160         priv->slave.device_config               = sun4i_dma_config;
1161         priv->slave.device_terminate_all        = sun4i_dma_terminate_all;
1162         priv->slave.copy_align                  = 2;
1163         priv->slave.src_addr_widths             = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
1164                                                   BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
1165                                                   BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
1166         priv->slave.dst_addr_widths             = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
1167                                                   BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
1168                                                   BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
1169         priv->slave.directions                  = BIT(DMA_DEV_TO_MEM) |
1170                                                   BIT(DMA_MEM_TO_DEV);
1171         priv->slave.residue_granularity         = DMA_RESIDUE_GRANULARITY_BURST;
1172 
1173         priv->slave.dev = &pdev->dev;
1174 
1175         priv->pchans = devm_kcalloc(&pdev->dev, SUN4I_DMA_NR_MAX_CHANNELS,
1176                                     sizeof(struct sun4i_dma_pchan), GFP_KERNEL);
1177         priv->vchans = devm_kcalloc(&pdev->dev, SUN4I_DMA_NR_MAX_VCHANS,
1178                                     sizeof(struct sun4i_dma_vchan), GFP_KERNEL);
1179         if (!priv->vchans || !priv->pchans)
1180                 return -ENOMEM;
1181 
1182         /*
1183          * [0..SUN4I_NDMA_NR_MAX_CHANNELS) are normal pchans, and
1184          * [SUN4I_NDMA_NR_MAX_CHANNELS..SUN4I_DMA_NR_MAX_CHANNELS) are
1185          * dedicated ones
1186          */
1187         for (i = 0; i < SUN4I_NDMA_NR_MAX_CHANNELS; i++)
1188                 priv->pchans[i].base = priv->base +
1189                         SUN4I_NDMA_CHANNEL_REG_BASE(i);
1190 
1191         for (j = 0; i < SUN4I_DMA_NR_MAX_CHANNELS; i++, j++) {
1192                 priv->pchans[i].base = priv->base +
1193                         SUN4I_DDMA_CHANNEL_REG_BASE(j);
1194                 priv->pchans[i].is_dedicated = 1;
1195         }
1196 
1197         for (i = 0; i < SUN4I_DMA_NR_MAX_VCHANS; i++) {
1198                 struct sun4i_dma_vchan *vchan = &priv->vchans[i];
1199 
1200                 spin_lock_init(&vchan->vc.lock);
1201                 vchan->vc.desc_free = sun4i_dma_free_contract;
1202                 vchan_init(&vchan->vc, &priv->slave);
1203         }
1204 
1205         ret = clk_prepare_enable(priv->clk);
1206         if (ret) {
1207                 dev_err(&pdev->dev, "Couldn't enable the clock\n");
1208                 return ret;
1209         }
1210 
1211         /*
1212          * Make sure the IRQs are all disabled and accounted for. The bootloader
1213          * likes to leave these dirty
1214          */
1215         writel(0, priv->base + SUN4I_DMA_IRQ_ENABLE_REG);
1216         writel(0xFFFFFFFF, priv->base + SUN4I_DMA_IRQ_PENDING_STATUS_REG);
1217 
1218         ret = devm_request_irq(&pdev->dev, priv->irq, sun4i_dma_interrupt,
1219                                0, dev_name(&pdev->dev), priv);
1220         if (ret) {
1221                 dev_err(&pdev->dev, "Cannot request IRQ\n");
1222                 goto err_clk_disable;
1223         }
1224 
1225         ret = dma_async_device_register(&priv->slave);
1226         if (ret) {
1227                 dev_warn(&pdev->dev, "Failed to register DMA engine device\n");
1228                 goto err_clk_disable;
1229         }
1230 
1231         ret = of_dma_controller_register(pdev->dev.of_node, sun4i_dma_of_xlate,
1232                                          priv);
1233         if (ret) {
1234                 dev_err(&pdev->dev, "of_dma_controller_register failed\n");
1235                 goto err_dma_unregister;
1236         }
1237 
1238         dev_dbg(&pdev->dev, "Successfully probed SUN4I_DMA\n");
1239 
1240         return 0;
1241 
1242 err_dma_unregister:
1243         dma_async_device_unregister(&priv->slave);
1244 err_clk_disable:
1245         clk_disable_unprepare(priv->clk);
1246         return ret;
1247 }
1248 
1249 static int sun4i_dma_remove(struct platform_device *pdev)
1250 {
1251         struct sun4i_dma_dev *priv = platform_get_drvdata(pdev);
1252 
1253         /* Disable IRQ so no more work is scheduled */
1254         disable_irq(priv->irq);
1255 
1256         of_dma_controller_free(pdev->dev.of_node);
1257         dma_async_device_unregister(&priv->slave);
1258 
1259         clk_disable_unprepare(priv->clk);
1260 
1261         return 0;
1262 }
1263 
1264 static const struct of_device_id sun4i_dma_match[] = {
1265         { .compatible = "allwinner,sun4i-a10-dma" },
1266         { /* sentinel */ },
1267 };
1268 MODULE_DEVICE_TABLE(of, sun4i_dma_match);
1269 
1270 static struct platform_driver sun4i_dma_driver = {
1271         .probe  = sun4i_dma_probe,
1272         .remove = sun4i_dma_remove,
1273         .driver = {
1274                 .name           = "sun4i-dma",
1275                 .of_match_table = sun4i_dma_match,
1276         },
1277 };
1278 
1279 module_platform_driver(sun4i_dma_driver);
1280 
1281 MODULE_DESCRIPTION("Allwinner A10 Dedicated DMA Controller Driver");
1282 MODULE_AUTHOR("Emilio López <emilio@elopez.com.ar>");
1283 MODULE_LICENSE("GPL");

/* [<][>][^][v][top][bottom][index][help] */