This source file includes following definitions.
- dma_read
- dma_write
- vdma_desc_write
- dma_ctrl_read
- dma_ctrl_write
- dma_ctrl_clr
- dma_ctrl_set
- vdma_desc_write_64
- dma_writeq
- xilinx_write
- xilinx_axidma_buf
- xilinx_vdma_alloc_tx_segment
- xilinx_cdma_alloc_tx_segment
- xilinx_axidma_alloc_tx_segment
- xilinx_dma_clean_hw_desc
- xilinx_dma_free_tx_segment
- xilinx_cdma_free_tx_segment
- xilinx_vdma_free_tx_segment
- xilinx_dma_alloc_tx_descriptor
- xilinx_dma_free_tx_descriptor
- xilinx_dma_free_desc_list
- xilinx_dma_free_descriptors
- xilinx_dma_free_chan_resources
- xilinx_dma_chan_handle_cyclic
- xilinx_dma_chan_desc_cleanup
- xilinx_dma_do_tasklet
- xilinx_dma_alloc_chan_resources
- xilinx_dma_calc_copysize
- xilinx_dma_tx_status
- xilinx_dma_stop_transfer
- xilinx_cdma_stop_transfer
- xilinx_dma_start
- xilinx_vdma_start_transfer
- xilinx_cdma_start_transfer
- xilinx_dma_start_transfer
- xilinx_dma_issue_pending
- xilinx_dma_complete_descriptor
- xilinx_dma_reset
- xilinx_dma_chan_reset
- xilinx_dma_irq_handler
- append_desc_queue
- xilinx_dma_tx_submit
- xilinx_vdma_dma_prep_interleaved
- xilinx_cdma_prep_memcpy
- xilinx_dma_prep_slave_sg
- xilinx_dma_prep_dma_cyclic
- xilinx_dma_prep_interleaved
- xilinx_dma_terminate_all
- xilinx_vdma_channel_set_config
- xilinx_dma_chan_remove
- axidma_clk_init
- axicdma_clk_init
- axivdma_clk_init
- xdma_disable_allclks
- xilinx_dma_chan_probe
- xilinx_dma_child_probe
- of_dma_xilinx_xlate
- xilinx_dma_probe
- xilinx_dma_remove
   1 
   2 
   3 
   4 
   5 
   6 
   7 
   8 
   9 
  10 
  11 
  12 
  13 
  14 
  15 
  16 
  17 
  18 
  19 
  20 
  21 
  22 
  23 
  24 
  25 
  26 
  27 
  28 
  29 
  30 #include <linux/bitops.h>
  31 #include <linux/dmapool.h>
  32 #include <linux/dma/xilinx_dma.h>
  33 #include <linux/init.h>
  34 #include <linux/interrupt.h>
  35 #include <linux/io.h>
  36 #include <linux/iopoll.h>
  37 #include <linux/module.h>
  38 #include <linux/of_address.h>
  39 #include <linux/of_dma.h>
  40 #include <linux/of_platform.h>
  41 #include <linux/of_irq.h>
  42 #include <linux/slab.h>
  43 #include <linux/clk.h>
  44 #include <linux/io-64-nonatomic-lo-hi.h>
  45 
  46 #include "../dmaengine.h"
  47 
  48 
  49 #define XILINX_DMA_MM2S_CTRL_OFFSET             0x0000
  50 #define XILINX_DMA_S2MM_CTRL_OFFSET             0x0030
  51 #define XILINX_VDMA_MM2S_DESC_OFFSET            0x0050
  52 #define XILINX_VDMA_S2MM_DESC_OFFSET            0x00a0
  53 
  54 
  55 #define XILINX_DMA_REG_DMACR                    0x0000
  56 #define XILINX_DMA_DMACR_DELAY_MAX              0xff
  57 #define XILINX_DMA_DMACR_DELAY_SHIFT            24
  58 #define XILINX_DMA_DMACR_FRAME_COUNT_MAX        0xff
  59 #define XILINX_DMA_DMACR_FRAME_COUNT_SHIFT      16
  60 #define XILINX_DMA_DMACR_ERR_IRQ                BIT(14)
  61 #define XILINX_DMA_DMACR_DLY_CNT_IRQ            BIT(13)
  62 #define XILINX_DMA_DMACR_FRM_CNT_IRQ            BIT(12)
  63 #define XILINX_DMA_DMACR_MASTER_SHIFT           8
  64 #define XILINX_DMA_DMACR_FSYNCSRC_SHIFT 5
  65 #define XILINX_DMA_DMACR_FRAMECNT_EN            BIT(4)
  66 #define XILINX_DMA_DMACR_GENLOCK_EN             BIT(3)
  67 #define XILINX_DMA_DMACR_RESET                  BIT(2)
  68 #define XILINX_DMA_DMACR_CIRC_EN                BIT(1)
  69 #define XILINX_DMA_DMACR_RUNSTOP                BIT(0)
  70 #define XILINX_DMA_DMACR_FSYNCSRC_MASK          GENMASK(6, 5)
  71 #define XILINX_DMA_DMACR_DELAY_MASK             GENMASK(31, 24)
  72 #define XILINX_DMA_DMACR_FRAME_COUNT_MASK       GENMASK(23, 16)
  73 #define XILINX_DMA_DMACR_MASTER_MASK            GENMASK(11, 8)
  74 
  75 #define XILINX_DMA_REG_DMASR                    0x0004
  76 #define XILINX_DMA_DMASR_EOL_LATE_ERR           BIT(15)
  77 #define XILINX_DMA_DMASR_ERR_IRQ                BIT(14)
  78 #define XILINX_DMA_DMASR_DLY_CNT_IRQ            BIT(13)
  79 #define XILINX_DMA_DMASR_FRM_CNT_IRQ            BIT(12)
  80 #define XILINX_DMA_DMASR_SOF_LATE_ERR           BIT(11)
  81 #define XILINX_DMA_DMASR_SG_DEC_ERR             BIT(10)
  82 #define XILINX_DMA_DMASR_SG_SLV_ERR             BIT(9)
  83 #define XILINX_DMA_DMASR_EOF_EARLY_ERR          BIT(8)
  84 #define XILINX_DMA_DMASR_SOF_EARLY_ERR          BIT(7)
  85 #define XILINX_DMA_DMASR_DMA_DEC_ERR            BIT(6)
  86 #define XILINX_DMA_DMASR_DMA_SLAVE_ERR          BIT(5)
  87 #define XILINX_DMA_DMASR_DMA_INT_ERR            BIT(4)
  88 #define XILINX_DMA_DMASR_SG_MASK                BIT(3)
  89 #define XILINX_DMA_DMASR_IDLE                   BIT(1)
  90 #define XILINX_DMA_DMASR_HALTED         BIT(0)
  91 #define XILINX_DMA_DMASR_DELAY_MASK             GENMASK(31, 24)
  92 #define XILINX_DMA_DMASR_FRAME_COUNT_MASK       GENMASK(23, 16)
  93 
  94 #define XILINX_DMA_REG_CURDESC                  0x0008
  95 #define XILINX_DMA_REG_TAILDESC         0x0010
  96 #define XILINX_DMA_REG_REG_INDEX                0x0014
  97 #define XILINX_DMA_REG_FRMSTORE         0x0018
  98 #define XILINX_DMA_REG_THRESHOLD                0x001c
  99 #define XILINX_DMA_REG_FRMPTR_STS               0x0024
 100 #define XILINX_DMA_REG_PARK_PTR         0x0028
 101 #define XILINX_DMA_PARK_PTR_WR_REF_SHIFT        8
 102 #define XILINX_DMA_PARK_PTR_WR_REF_MASK         GENMASK(12, 8)
 103 #define XILINX_DMA_PARK_PTR_RD_REF_SHIFT        0
 104 #define XILINX_DMA_PARK_PTR_RD_REF_MASK         GENMASK(4, 0)
 105 #define XILINX_DMA_REG_VDMA_VERSION             0x002c
 106 
 107 
 108 #define XILINX_DMA_REG_VSIZE                    0x0000
 109 #define XILINX_DMA_REG_HSIZE                    0x0004
 110 
 111 #define XILINX_DMA_REG_FRMDLY_STRIDE            0x0008
 112 #define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT   24
 113 #define XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT   0
 114 
 115 #define XILINX_VDMA_REG_START_ADDRESS(n)        (0x000c + 4 * (n))
 116 #define XILINX_VDMA_REG_START_ADDRESS_64(n)     (0x000c + 8 * (n))
 117 
 118 #define XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP    0x00ec
 119 #define XILINX_VDMA_ENABLE_VERTICAL_FLIP        BIT(0)
 120 
 121 
 122 #define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x20
 123 
 124 #define XILINX_DMA_DMAXR_ALL_IRQ_MASK   \
 125                 (XILINX_DMA_DMASR_FRM_CNT_IRQ | \
 126                  XILINX_DMA_DMASR_DLY_CNT_IRQ | \
 127                  XILINX_DMA_DMASR_ERR_IRQ)
 128 
 129 #define XILINX_DMA_DMASR_ALL_ERR_MASK   \
 130                 (XILINX_DMA_DMASR_EOL_LATE_ERR | \
 131                  XILINX_DMA_DMASR_SOF_LATE_ERR | \
 132                  XILINX_DMA_DMASR_SG_DEC_ERR | \
 133                  XILINX_DMA_DMASR_SG_SLV_ERR | \
 134                  XILINX_DMA_DMASR_EOF_EARLY_ERR | \
 135                  XILINX_DMA_DMASR_SOF_EARLY_ERR | \
 136                  XILINX_DMA_DMASR_DMA_DEC_ERR | \
 137                  XILINX_DMA_DMASR_DMA_SLAVE_ERR | \
 138                  XILINX_DMA_DMASR_DMA_INT_ERR)
 139 
 140 
 141 
 142 
 143 
 144 
 145 #define XILINX_DMA_DMASR_ERR_RECOVER_MASK       \
 146                 (XILINX_DMA_DMASR_SOF_LATE_ERR | \
 147                  XILINX_DMA_DMASR_EOF_EARLY_ERR | \
 148                  XILINX_DMA_DMASR_SOF_EARLY_ERR | \
 149                  XILINX_DMA_DMASR_DMA_INT_ERR)
 150 
 151 
 152 #define XILINX_DMA_FLUSH_S2MM           3
 153 #define XILINX_DMA_FLUSH_MM2S           2
 154 #define XILINX_DMA_FLUSH_BOTH           1
 155 
 156 
 157 #define XILINX_DMA_LOOP_COUNT           1000000
 158 
 159 
 160 #define XILINX_DMA_REG_SRCDSTADDR       0x18
 161 #define XILINX_DMA_REG_BTT              0x28
 162 
 163 
 164 #define XILINX_DMA_MAX_TRANS_LEN_MIN    8
 165 #define XILINX_DMA_MAX_TRANS_LEN_MAX    23
 166 #define XILINX_DMA_V2_MAX_TRANS_LEN_MAX 26
 167 #define XILINX_DMA_CR_COALESCE_MAX      GENMASK(23, 16)
 168 #define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4)
 169 #define XILINX_DMA_CR_COALESCE_SHIFT    16
 170 #define XILINX_DMA_BD_SOP               BIT(27)
 171 #define XILINX_DMA_BD_EOP               BIT(26)
 172 #define XILINX_DMA_COALESCE_MAX         255
 173 #define XILINX_DMA_NUM_DESCS            255
 174 #define XILINX_DMA_NUM_APP_WORDS        5
 175 
 176 
 177 #define XILINX_DMA_MCRX_CDESC(x)        (0x40 + (x-1) * 0x20)
 178 #define XILINX_DMA_MCRX_TDESC(x)        (0x48 + (x-1) * 0x20)
 179 
 180 
 181 #define XILINX_DMA_BD_HSIZE_MASK        GENMASK(15, 0)
 182 #define XILINX_DMA_BD_STRIDE_MASK       GENMASK(15, 0)
 183 #define XILINX_DMA_BD_VSIZE_MASK        GENMASK(31, 19)
 184 #define XILINX_DMA_BD_TDEST_MASK        GENMASK(4, 0)
 185 #define XILINX_DMA_BD_STRIDE_SHIFT      0
 186 #define XILINX_DMA_BD_VSIZE_SHIFT       19
 187 
 188 
 189 #define XILINX_CDMA_REG_SRCADDR         0x18
 190 #define XILINX_CDMA_REG_DSTADDR         0x20
 191 
 192 
 193 #define XILINX_CDMA_CR_SGMODE          BIT(3)
 194 
 195 #define xilinx_prep_dma_addr_t(addr)    \
 196         ((dma_addr_t)((u64)addr##_##msb << 32 | (addr)))
 197 
 198 
 199 
 200 
 201 
 202 
 203 
 204 
 205 
 206 
 207 
 208 struct xilinx_vdma_desc_hw {
 209         u32 next_desc;
 210         u32 pad1;
 211         u32 buf_addr;
 212         u32 buf_addr_msb;
 213         u32 vsize;
 214         u32 hsize;
 215         u32 stride;
 216 } __aligned(64);
 217 
 218 
 219 
 220 
 221 
 222 
 223 
 224 
 225 
 226 
 227 
 228 
 229 
 230 struct xilinx_axidma_desc_hw {
 231         u32 next_desc;
 232         u32 next_desc_msb;
 233         u32 buf_addr;
 234         u32 buf_addr_msb;
 235         u32 mcdma_control;
 236         u32 vsize_stride;
 237         u32 control;
 238         u32 status;
 239         u32 app[XILINX_DMA_NUM_APP_WORDS];
 240 } __aligned(64);
 241 
 242 
 243 
 244 
 245 
 246 
 247 
 248 
 249 
 250 
 251 
 252 
 253 struct xilinx_cdma_desc_hw {
 254         u32 next_desc;
 255         u32 next_desc_msb;
 256         u32 src_addr;
 257         u32 src_addr_msb;
 258         u32 dest_addr;
 259         u32 dest_addr_msb;
 260         u32 control;
 261         u32 status;
 262 } __aligned(64);
 263 
 264 
 265 
 266 
 267 
 268 
 269 
 270 struct xilinx_vdma_tx_segment {
 271         struct xilinx_vdma_desc_hw hw;
 272         struct list_head node;
 273         dma_addr_t phys;
 274 } __aligned(64);
 275 
 276 
 277 
 278 
 279 
 280 
 281 
 282 struct xilinx_axidma_tx_segment {
 283         struct xilinx_axidma_desc_hw hw;
 284         struct list_head node;
 285         dma_addr_t phys;
 286 } __aligned(64);
 287 
 288 
 289 
 290 
 291 
 292 
 293 
 294 struct xilinx_cdma_tx_segment {
 295         struct xilinx_cdma_desc_hw hw;
 296         struct list_head node;
 297         dma_addr_t phys;
 298 } __aligned(64);
 299 
 300 
 301 
 302 
 303 
 304 
 305 
 306 
 307 struct xilinx_dma_tx_descriptor {
 308         struct dma_async_tx_descriptor async_tx;
 309         struct list_head segments;
 310         struct list_head node;
 311         bool cyclic;
 312 };
 313 
 314 
 315 
 316 
 317 
 318 
 319 
 320 
 321 
 322 
 323 
 324 
 325 
 326 
 327 
 328 
 329 
 330 
 331 
 332 
 333 
 334 
 335 
 336 
 337 
 338 
 339 
 340 
 341 
 342 
 343 
 344 
 345 
 346 
 347 
 348 
 349 
 350 
 351 
 352 struct xilinx_dma_chan {
 353         struct xilinx_dma_device *xdev;
 354         u32 ctrl_offset;
 355         u32 desc_offset;
 356         spinlock_t lock;
 357         struct list_head pending_list;
 358         struct list_head active_list;
 359         struct list_head done_list;
 360         struct list_head free_seg_list;
 361         struct dma_chan common;
 362         struct dma_pool *desc_pool;
 363         struct device *dev;
 364         int irq;
 365         int id;
 366         enum dma_transfer_direction direction;
 367         int num_frms;
 368         bool has_sg;
 369         bool cyclic;
 370         bool genlock;
 371         bool err;
 372         bool idle;
 373         struct tasklet_struct tasklet;
 374         struct xilinx_vdma_config config;
 375         bool flush_on_fsync;
 376         u32 desc_pendingcount;
 377         bool ext_addr;
 378         u32 desc_submitcount;
 379         u32 residue;
 380         struct xilinx_axidma_tx_segment *seg_v;
 381         dma_addr_t seg_p;
 382         struct xilinx_axidma_tx_segment *cyclic_seg_v;
 383         dma_addr_t cyclic_seg_p;
 384         void (*start_transfer)(struct xilinx_dma_chan *chan);
 385         int (*stop_transfer)(struct xilinx_dma_chan *chan);
 386         u16 tdest;
 387         bool has_vflip;
 388 };
 389 
 390 
 391 
 392 
 393 
 394 
 395 
 396 
 397 
 398 enum xdma_ip_type {
 399         XDMA_TYPE_AXIDMA = 0,
 400         XDMA_TYPE_CDMA,
 401         XDMA_TYPE_VDMA,
 402 };
 403 
 404 struct xilinx_dma_config {
 405         enum xdma_ip_type dmatype;
 406         int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk,
 407                         struct clk **tx_clk, struct clk **txs_clk,
 408                         struct clk **rx_clk, struct clk **rxs_clk);
 409 };
 410 
 411 
 412 
 413 
 414 
 415 
 416 
 417 
 418 
 419 
 420 
 421 
 422 
 423 
 424 
 425 
 426 
 427 
 428 
 429 
 430 
 431 struct xilinx_dma_device {
 432         void __iomem *regs;
 433         struct device *dev;
 434         struct dma_device common;
 435         struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE];
 436         bool mcdma;
 437         u32 flush_on_fsync;
 438         bool ext_addr;
 439         struct platform_device  *pdev;
 440         const struct xilinx_dma_config *dma_config;
 441         struct clk *axi_clk;
 442         struct clk *tx_clk;
 443         struct clk *txs_clk;
 444         struct clk *rx_clk;
 445         struct clk *rxs_clk;
 446         u32 nr_channels;
 447         u32 chan_id;
 448         u32 max_buffer_len;
 449 };
 450 
 451 
 452 #define to_xilinx_chan(chan) \
 453         container_of(chan, struct xilinx_dma_chan, common)
 454 #define to_dma_tx_descriptor(tx) \
 455         container_of(tx, struct xilinx_dma_tx_descriptor, async_tx)
 456 #define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \
 457         readl_poll_timeout(chan->xdev->regs + chan->ctrl_offset + reg, val, \
 458                            cond, delay_us, timeout_us)
 459 
 460 
 461 static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg)
 462 {
 463         return ioread32(chan->xdev->regs + reg);
 464 }
 465 
 466 static inline void dma_write(struct xilinx_dma_chan *chan, u32 reg, u32 value)
 467 {
 468         iowrite32(value, chan->xdev->regs + reg);
 469 }
 470 
 471 static inline void vdma_desc_write(struct xilinx_dma_chan *chan, u32 reg,
 472                                    u32 value)
 473 {
 474         dma_write(chan, chan->desc_offset + reg, value);
 475 }
 476 
 477 static inline u32 dma_ctrl_read(struct xilinx_dma_chan *chan, u32 reg)
 478 {
 479         return dma_read(chan, chan->ctrl_offset + reg);
 480 }
 481 
 482 static inline void dma_ctrl_write(struct xilinx_dma_chan *chan, u32 reg,
 483                                    u32 value)
 484 {
 485         dma_write(chan, chan->ctrl_offset + reg, value);
 486 }
 487 
 488 static inline void dma_ctrl_clr(struct xilinx_dma_chan *chan, u32 reg,
 489                                  u32 clr)
 490 {
 491         dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) & ~clr);
 492 }
 493 
 494 static inline void dma_ctrl_set(struct xilinx_dma_chan *chan, u32 reg,
 495                                  u32 set)
 496 {
 497         dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) | set);
 498 }
 499 
 500 
 501 
 502 
 503 
 504 
 505 
 506 
 507 
 508 
 509 
 510 
 511 static inline void vdma_desc_write_64(struct xilinx_dma_chan *chan, u32 reg,
 512                                       u32 value_lsb, u32 value_msb)
 513 {
 514         
 515         writel(value_lsb, chan->xdev->regs + chan->desc_offset + reg);
 516 
 517         
 518         writel(value_msb, chan->xdev->regs + chan->desc_offset + reg + 4);
 519 }
 520 
 521 static inline void dma_writeq(struct xilinx_dma_chan *chan, u32 reg, u64 value)
 522 {
 523         lo_hi_writeq(value, chan->xdev->regs + chan->ctrl_offset + reg);
 524 }
 525 
 526 static inline void xilinx_write(struct xilinx_dma_chan *chan, u32 reg,
 527                                 dma_addr_t addr)
 528 {
 529         if (chan->ext_addr)
 530                 dma_writeq(chan, reg, addr);
 531         else
 532                 dma_ctrl_write(chan, reg, addr);
 533 }
 534 
 535 static inline void xilinx_axidma_buf(struct xilinx_dma_chan *chan,
 536                                      struct xilinx_axidma_desc_hw *hw,
 537                                      dma_addr_t buf_addr, size_t sg_used,
 538                                      size_t period_len)
 539 {
 540         if (chan->ext_addr) {
 541                 hw->buf_addr = lower_32_bits(buf_addr + sg_used + period_len);
 542                 hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used +
 543                                                  period_len);
 544         } else {
 545                 hw->buf_addr = buf_addr + sg_used + period_len;
 546         }
 547 }
 548 
 549 
 550 
 551 
 552 
 553 
 554 
 555 
 556 
 557 
 558 
 559 static struct xilinx_vdma_tx_segment *
 560 xilinx_vdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
 561 {
 562         struct xilinx_vdma_tx_segment *segment;
 563         dma_addr_t phys;
 564 
 565         segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
 566         if (!segment)
 567                 return NULL;
 568 
 569         segment->phys = phys;
 570 
 571         return segment;
 572 }
 573 
 574 
 575 
 576 
 577 
 578 
 579 
 580 static struct xilinx_cdma_tx_segment *
 581 xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
 582 {
 583         struct xilinx_cdma_tx_segment *segment;
 584         dma_addr_t phys;
 585 
 586         segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
 587         if (!segment)
 588                 return NULL;
 589 
 590         segment->phys = phys;
 591 
 592         return segment;
 593 }
 594 
 595 
 596 
 597 
 598 
 599 
 600 
 601 static struct xilinx_axidma_tx_segment *
 602 xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan)
 603 {
 604         struct xilinx_axidma_tx_segment *segment = NULL;
 605         unsigned long flags;
 606 
 607         spin_lock_irqsave(&chan->lock, flags);
 608         if (!list_empty(&chan->free_seg_list)) {
 609                 segment = list_first_entry(&chan->free_seg_list,
 610                                            struct xilinx_axidma_tx_segment,
 611                                            node);
 612                 list_del(&segment->node);
 613         }
 614         spin_unlock_irqrestore(&chan->lock, flags);
 615 
 616         return segment;
 617 }
 618 
 619 static void xilinx_dma_clean_hw_desc(struct xilinx_axidma_desc_hw *hw)
 620 {
 621         u32 next_desc = hw->next_desc;
 622         u32 next_desc_msb = hw->next_desc_msb;
 623 
 624         memset(hw, 0, sizeof(struct xilinx_axidma_desc_hw));
 625 
 626         hw->next_desc = next_desc;
 627         hw->next_desc_msb = next_desc_msb;
 628 }
 629 
 630 
 631 
 632 
 633 
 634 
 635 static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan,
 636                                 struct xilinx_axidma_tx_segment *segment)
 637 {
 638         xilinx_dma_clean_hw_desc(&segment->hw);
 639 
 640         list_add_tail(&segment->node, &chan->free_seg_list);
 641 }
 642 
 643 
 644 
 645 
 646 
 647 
 648 static void xilinx_cdma_free_tx_segment(struct xilinx_dma_chan *chan,
 649                                 struct xilinx_cdma_tx_segment *segment)
 650 {
 651         dma_pool_free(chan->desc_pool, segment, segment->phys);
 652 }
 653 
 654 
 655 
 656 
 657 
 658 
 659 static void xilinx_vdma_free_tx_segment(struct xilinx_dma_chan *chan,
 660                                         struct xilinx_vdma_tx_segment *segment)
 661 {
 662         dma_pool_free(chan->desc_pool, segment, segment->phys);
 663 }
 664 
 665 
 666 
 667 
 668 
 669 
 670 
 671 static struct xilinx_dma_tx_descriptor *
 672 xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan *chan)
 673 {
 674         struct xilinx_dma_tx_descriptor *desc;
 675 
 676         desc = kzalloc(sizeof(*desc), GFP_KERNEL);
 677         if (!desc)
 678                 return NULL;
 679 
 680         INIT_LIST_HEAD(&desc->segments);
 681 
 682         return desc;
 683 }
 684 
 685 
 686 
 687 
 688 
 689 
 690 static void
 691 xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan,
 692                                struct xilinx_dma_tx_descriptor *desc)
 693 {
 694         struct xilinx_vdma_tx_segment *segment, *next;
 695         struct xilinx_cdma_tx_segment *cdma_segment, *cdma_next;
 696         struct xilinx_axidma_tx_segment *axidma_segment, *axidma_next;
 697 
 698         if (!desc)
 699                 return;
 700 
 701         if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
 702                 list_for_each_entry_safe(segment, next, &desc->segments, node) {
 703                         list_del(&segment->node);
 704                         xilinx_vdma_free_tx_segment(chan, segment);
 705                 }
 706         } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
 707                 list_for_each_entry_safe(cdma_segment, cdma_next,
 708                                          &desc->segments, node) {
 709                         list_del(&cdma_segment->node);
 710                         xilinx_cdma_free_tx_segment(chan, cdma_segment);
 711                 }
 712         } else {
 713                 list_for_each_entry_safe(axidma_segment, axidma_next,
 714                                          &desc->segments, node) {
 715                         list_del(&axidma_segment->node);
 716                         xilinx_dma_free_tx_segment(chan, axidma_segment);
 717                 }
 718         }
 719 
 720         kfree(desc);
 721 }
 722 
 723 
 724 
 725 
 726 
 727 
 728 
 729 
 730 static void xilinx_dma_free_desc_list(struct xilinx_dma_chan *chan,
 731                                         struct list_head *list)
 732 {
 733         struct xilinx_dma_tx_descriptor *desc, *next;
 734 
 735         list_for_each_entry_safe(desc, next, list, node) {
 736                 list_del(&desc->node);
 737                 xilinx_dma_free_tx_descriptor(chan, desc);
 738         }
 739 }
 740 
 741 
 742 
 743 
 744 
 745 static void xilinx_dma_free_descriptors(struct xilinx_dma_chan *chan)
 746 {
 747         unsigned long flags;
 748 
 749         spin_lock_irqsave(&chan->lock, flags);
 750 
 751         xilinx_dma_free_desc_list(chan, &chan->pending_list);
 752         xilinx_dma_free_desc_list(chan, &chan->done_list);
 753         xilinx_dma_free_desc_list(chan, &chan->active_list);
 754 
 755         spin_unlock_irqrestore(&chan->lock, flags);
 756 }
 757 
 758 
 759 
 760 
 761 
 762 static void xilinx_dma_free_chan_resources(struct dma_chan *dchan)
 763 {
 764         struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
 765         unsigned long flags;
 766 
 767         dev_dbg(chan->dev, "Free all channel resources.\n");
 768 
 769         xilinx_dma_free_descriptors(chan);
 770 
 771         if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
 772                 spin_lock_irqsave(&chan->lock, flags);
 773                 INIT_LIST_HEAD(&chan->free_seg_list);
 774                 spin_unlock_irqrestore(&chan->lock, flags);
 775 
 776                 
 777                 dma_free_coherent(chan->dev, sizeof(*chan->seg_v) *
 778                                   XILINX_DMA_NUM_DESCS, chan->seg_v,
 779                                   chan->seg_p);
 780 
 781                 
 782                 dma_free_coherent(chan->dev, sizeof(*chan->cyclic_seg_v),
 783                                   chan->cyclic_seg_v, chan->cyclic_seg_p);
 784         }
 785 
 786         if (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA) {
 787                 dma_pool_destroy(chan->desc_pool);
 788                 chan->desc_pool = NULL;
 789         }
 790 }
 791 
 792 
 793 
 794 
 795 
 796 
 797 
 798 static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan *chan,
 799                                           struct xilinx_dma_tx_descriptor *desc,
 800                                           unsigned long *flags)
 801 {
 802         dma_async_tx_callback callback;
 803         void *callback_param;
 804 
 805         callback = desc->async_tx.callback;
 806         callback_param = desc->async_tx.callback_param;
 807         if (callback) {
 808                 spin_unlock_irqrestore(&chan->lock, *flags);
 809                 callback(callback_param);
 810                 spin_lock_irqsave(&chan->lock, *flags);
 811         }
 812 }
 813 
 814 
 815 
 816 
 817 
 818 static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
 819 {
 820         struct xilinx_dma_tx_descriptor *desc, *next;
 821         unsigned long flags;
 822 
 823         spin_lock_irqsave(&chan->lock, flags);
 824 
 825         list_for_each_entry_safe(desc, next, &chan->done_list, node) {
 826                 struct dmaengine_desc_callback cb;
 827 
 828                 if (desc->cyclic) {
 829                         xilinx_dma_chan_handle_cyclic(chan, desc, &flags);
 830                         break;
 831                 }
 832 
 833                 
 834                 list_del(&desc->node);
 835 
 836                 
 837                 dmaengine_desc_get_callback(&desc->async_tx, &cb);
 838                 if (dmaengine_desc_callback_valid(&cb)) {
 839                         spin_unlock_irqrestore(&chan->lock, flags);
 840                         dmaengine_desc_callback_invoke(&cb, NULL);
 841                         spin_lock_irqsave(&chan->lock, flags);
 842                 }
 843 
 844                 
 845                 dma_run_dependencies(&desc->async_tx);
 846                 xilinx_dma_free_tx_descriptor(chan, desc);
 847         }
 848 
 849         spin_unlock_irqrestore(&chan->lock, flags);
 850 }
 851 
 852 
 853 
 854 
 855 
 856 static void xilinx_dma_do_tasklet(unsigned long data)
 857 {
 858         struct xilinx_dma_chan *chan = (struct xilinx_dma_chan *)data;
 859 
 860         xilinx_dma_chan_desc_cleanup(chan);
 861 }
 862 
 863 
 864 
 865 
 866 
 867 
 868 
 869 static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
 870 {
 871         struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
 872         int i;
 873 
 874         
 875         if (chan->desc_pool)
 876                 return 0;
 877 
 878         
 879 
 880 
 881 
 882         if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
 883                 
 884                 chan->seg_v = dma_alloc_coherent(chan->dev,
 885                                                  sizeof(*chan->seg_v) * XILINX_DMA_NUM_DESCS,
 886                                                  &chan->seg_p, GFP_KERNEL);
 887                 if (!chan->seg_v) {
 888                         dev_err(chan->dev,
 889                                 "unable to allocate channel %d descriptors\n",
 890                                 chan->id);
 891                         return -ENOMEM;
 892                 }
 893                 
 894 
 895 
 896 
 897 
 898 
 899                 chan->cyclic_seg_v = dma_alloc_coherent(chan->dev,
 900                                                         sizeof(*chan->cyclic_seg_v),
 901                                                         &chan->cyclic_seg_p,
 902                                                         GFP_KERNEL);
 903                 if (!chan->cyclic_seg_v) {
 904                         dev_err(chan->dev,
 905                                 "unable to allocate desc segment for cyclic DMA\n");
 906                         dma_free_coherent(chan->dev, sizeof(*chan->seg_v) *
 907                                 XILINX_DMA_NUM_DESCS, chan->seg_v,
 908                                 chan->seg_p);
 909                         return -ENOMEM;
 910                 }
 911                 chan->cyclic_seg_v->phys = chan->cyclic_seg_p;
 912 
 913                 for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) {
 914                         chan->seg_v[i].hw.next_desc =
 915                         lower_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
 916                                 ((i + 1) % XILINX_DMA_NUM_DESCS));
 917                         chan->seg_v[i].hw.next_desc_msb =
 918                         upper_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
 919                                 ((i + 1) % XILINX_DMA_NUM_DESCS));
 920                         chan->seg_v[i].phys = chan->seg_p +
 921                                 sizeof(*chan->seg_v) * i;
 922                         list_add_tail(&chan->seg_v[i].node,
 923                                       &chan->free_seg_list);
 924                 }
 925         } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
 926                 chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool",
 927                                    chan->dev,
 928                                    sizeof(struct xilinx_cdma_tx_segment),
 929                                    __alignof__(struct xilinx_cdma_tx_segment),
 930                                    0);
 931         } else {
 932                 chan->desc_pool = dma_pool_create("xilinx_vdma_desc_pool",
 933                                      chan->dev,
 934                                      sizeof(struct xilinx_vdma_tx_segment),
 935                                      __alignof__(struct xilinx_vdma_tx_segment),
 936                                      0);
 937         }
 938 
 939         if (!chan->desc_pool &&
 940             (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA)) {
 941                 dev_err(chan->dev,
 942                         "unable to allocate channel %d descriptor pool\n",
 943                         chan->id);
 944                 return -ENOMEM;
 945         }
 946 
 947         dma_cookie_init(dchan);
 948 
 949         if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
 950                 
 951 
 952 
 953                 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
 954                               XILINX_DMA_DMAXR_ALL_IRQ_MASK);
 955         }
 956 
 957         if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
 958                 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
 959                              XILINX_CDMA_CR_SGMODE);
 960 
 961         return 0;
 962 }
 963 
 964 
 965 
 966 
 967 
 968 
 969 
 970 
 971 
 972 static int xilinx_dma_calc_copysize(struct xilinx_dma_chan *chan,
 973                                     int size, int done)
 974 {
 975         size_t copy;
 976 
 977         copy = min_t(size_t, size - done,
 978                      chan->xdev->max_buffer_len);
 979 
 980         if ((copy + done < size) &&
 981             chan->xdev->common.copy_align) {
 982                 
 983 
 984 
 985 
 986                 copy = rounddown(copy,
 987                                  (1 << chan->xdev->common.copy_align));
 988         }
 989         return copy;
 990 }
 991 
 992 
 993 
 994 
 995 
 996 
 997 
 998 
 999 
1000 static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
1001                                         dma_cookie_t cookie,
1002                                         struct dma_tx_state *txstate)
1003 {
1004         struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1005         struct xilinx_dma_tx_descriptor *desc;
1006         struct xilinx_axidma_tx_segment *segment;
1007         struct xilinx_axidma_desc_hw *hw;
1008         enum dma_status ret;
1009         unsigned long flags;
1010         u32 residue = 0;
1011 
1012         ret = dma_cookie_status(dchan, cookie, txstate);
1013         if (ret == DMA_COMPLETE || !txstate)
1014                 return ret;
1015 
1016         if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
1017                 spin_lock_irqsave(&chan->lock, flags);
1018 
1019                 desc = list_last_entry(&chan->active_list,
1020                                        struct xilinx_dma_tx_descriptor, node);
1021                 if (chan->has_sg) {
1022                         list_for_each_entry(segment, &desc->segments, node) {
1023                                 hw = &segment->hw;
1024                                 residue += (hw->control - hw->status) &
1025                                            chan->xdev->max_buffer_len;
1026                         }
1027                 }
1028                 spin_unlock_irqrestore(&chan->lock, flags);
1029 
1030                 chan->residue = residue;
1031                 dma_set_residue(txstate, chan->residue);
1032         }
1033 
1034         return ret;
1035 }
1036 
1037 
1038 
1039 
1040 
1041 
1042 
1043 static int xilinx_dma_stop_transfer(struct xilinx_dma_chan *chan)
1044 {
1045         u32 val;
1046 
1047         dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
1048 
1049         
1050         return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1051                                        val & XILINX_DMA_DMASR_HALTED, 0,
1052                                        XILINX_DMA_LOOP_COUNT);
1053 }
1054 
1055 
1056 
1057 
1058 
1059 
1060 
1061 static int xilinx_cdma_stop_transfer(struct xilinx_dma_chan *chan)
1062 {
1063         u32 val;
1064 
1065         return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1066                                        val & XILINX_DMA_DMASR_IDLE, 0,
1067                                        XILINX_DMA_LOOP_COUNT);
1068 }
1069 
1070 
1071 
1072 
1073 
1074 static void xilinx_dma_start(struct xilinx_dma_chan *chan)
1075 {
1076         int err;
1077         u32 val;
1078 
1079         dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
1080 
1081         
1082         err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1083                                       !(val & XILINX_DMA_DMASR_HALTED), 0,
1084                                       XILINX_DMA_LOOP_COUNT);
1085 
1086         if (err) {
1087                 dev_err(chan->dev, "Cannot start channel %p: %x\n",
1088                         chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
1089 
1090                 chan->err = true;
1091         }
1092 }
1093 
1094 
1095 
1096 
1097 
1098 static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
1099 {
1100         struct xilinx_vdma_config *config = &chan->config;
1101         struct xilinx_dma_tx_descriptor *desc;
1102         u32 reg, j;
1103         struct xilinx_vdma_tx_segment *segment, *last = NULL;
1104         int i = 0;
1105 
1106         
1107         if (chan->err)
1108                 return;
1109 
1110         if (!chan->idle)
1111                 return;
1112 
1113         if (list_empty(&chan->pending_list))
1114                 return;
1115 
1116         desc = list_first_entry(&chan->pending_list,
1117                                 struct xilinx_dma_tx_descriptor, node);
1118 
1119         
1120         if (chan->has_vflip) {
1121                 reg = dma_read(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP);
1122                 reg &= ~XILINX_VDMA_ENABLE_VERTICAL_FLIP;
1123                 reg |= config->vflip_en;
1124                 dma_write(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP,
1125                           reg);
1126         }
1127 
1128         reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1129 
1130         if (config->frm_cnt_en)
1131                 reg |= XILINX_DMA_DMACR_FRAMECNT_EN;
1132         else
1133                 reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN;
1134 
1135         
1136         if (config->park)
1137                 reg &= ~XILINX_DMA_DMACR_CIRC_EN;
1138         else
1139                 reg |= XILINX_DMA_DMACR_CIRC_EN;
1140 
1141         dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1142 
1143         j = chan->desc_submitcount;
1144         reg = dma_read(chan, XILINX_DMA_REG_PARK_PTR);
1145         if (chan->direction == DMA_MEM_TO_DEV) {
1146                 reg &= ~XILINX_DMA_PARK_PTR_RD_REF_MASK;
1147                 reg |= j << XILINX_DMA_PARK_PTR_RD_REF_SHIFT;
1148         } else {
1149                 reg &= ~XILINX_DMA_PARK_PTR_WR_REF_MASK;
1150                 reg |= j << XILINX_DMA_PARK_PTR_WR_REF_SHIFT;
1151         }
1152         dma_write(chan, XILINX_DMA_REG_PARK_PTR, reg);
1153 
1154         
1155         xilinx_dma_start(chan);
1156 
1157         if (chan->err)
1158                 return;
1159 
1160         
1161         if (chan->desc_submitcount < chan->num_frms)
1162                 i = chan->desc_submitcount;
1163 
1164         list_for_each_entry(segment, &desc->segments, node) {
1165                 if (chan->ext_addr)
1166                         vdma_desc_write_64(chan,
1167                                    XILINX_VDMA_REG_START_ADDRESS_64(i++),
1168                                    segment->hw.buf_addr,
1169                                    segment->hw.buf_addr_msb);
1170                 else
1171                         vdma_desc_write(chan,
1172                                         XILINX_VDMA_REG_START_ADDRESS(i++),
1173                                         segment->hw.buf_addr);
1174 
1175                 last = segment;
1176         }
1177 
1178         if (!last)
1179                 return;
1180 
1181         
1182         vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize);
1183         vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE,
1184                         last->hw.stride);
1185         vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize);
1186 
1187         chan->desc_submitcount++;
1188         chan->desc_pendingcount--;
1189         list_del(&desc->node);
1190         list_add_tail(&desc->node, &chan->active_list);
1191         if (chan->desc_submitcount == chan->num_frms)
1192                 chan->desc_submitcount = 0;
1193 
1194         chan->idle = false;
1195 }
1196 
1197 
1198 
1199 
1200 
1201 static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
1202 {
1203         struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
1204         struct xilinx_cdma_tx_segment *tail_segment;
1205         u32 ctrl_reg = dma_read(chan, XILINX_DMA_REG_DMACR);
1206 
1207         if (chan->err)
1208                 return;
1209 
1210         if (!chan->idle)
1211                 return;
1212 
1213         if (list_empty(&chan->pending_list))
1214                 return;
1215 
1216         head_desc = list_first_entry(&chan->pending_list,
1217                                      struct xilinx_dma_tx_descriptor, node);
1218         tail_desc = list_last_entry(&chan->pending_list,
1219                                     struct xilinx_dma_tx_descriptor, node);
1220         tail_segment = list_last_entry(&tail_desc->segments,
1221                                        struct xilinx_cdma_tx_segment, node);
1222 
1223         if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
1224                 ctrl_reg &= ~XILINX_DMA_CR_COALESCE_MAX;
1225                 ctrl_reg |= chan->desc_pendingcount <<
1226                                 XILINX_DMA_CR_COALESCE_SHIFT;
1227                 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, ctrl_reg);
1228         }
1229 
1230         if (chan->has_sg) {
1231                 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
1232                              XILINX_CDMA_CR_SGMODE);
1233 
1234                 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1235                              XILINX_CDMA_CR_SGMODE);
1236 
1237                 xilinx_write(chan, XILINX_DMA_REG_CURDESC,
1238                              head_desc->async_tx.phys);
1239 
1240                 
1241                 xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1242                              tail_segment->phys);
1243         } else {
1244                 
1245                 struct xilinx_cdma_tx_segment *segment;
1246                 struct xilinx_cdma_desc_hw *hw;
1247 
1248                 segment = list_first_entry(&head_desc->segments,
1249                                            struct xilinx_cdma_tx_segment,
1250                                            node);
1251 
1252                 hw = &segment->hw;
1253 
1254                 xilinx_write(chan, XILINX_CDMA_REG_SRCADDR,
1255                              xilinx_prep_dma_addr_t(hw->src_addr));
1256                 xilinx_write(chan, XILINX_CDMA_REG_DSTADDR,
1257                              xilinx_prep_dma_addr_t(hw->dest_addr));
1258 
1259                 
1260                 dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
1261                                 hw->control & chan->xdev->max_buffer_len);
1262         }
1263 
1264         list_splice_tail_init(&chan->pending_list, &chan->active_list);
1265         chan->desc_pendingcount = 0;
1266         chan->idle = false;
1267 }
1268 
1269 
1270 
1271 
1272 
1273 static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
1274 {
1275         struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
1276         struct xilinx_axidma_tx_segment *tail_segment;
1277         u32 reg;
1278 
1279         if (chan->err)
1280                 return;
1281 
1282         if (list_empty(&chan->pending_list))
1283                 return;
1284 
1285         if (!chan->idle)
1286                 return;
1287 
1288         head_desc = list_first_entry(&chan->pending_list,
1289                                      struct xilinx_dma_tx_descriptor, node);
1290         tail_desc = list_last_entry(&chan->pending_list,
1291                                     struct xilinx_dma_tx_descriptor, node);
1292         tail_segment = list_last_entry(&tail_desc->segments,
1293                                        struct xilinx_axidma_tx_segment, node);
1294 
1295         reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1296 
1297         if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
1298                 reg &= ~XILINX_DMA_CR_COALESCE_MAX;
1299                 reg |= chan->desc_pendingcount <<
1300                                   XILINX_DMA_CR_COALESCE_SHIFT;
1301                 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1302         }
1303 
1304         if (chan->has_sg && !chan->xdev->mcdma)
1305                 xilinx_write(chan, XILINX_DMA_REG_CURDESC,
1306                              head_desc->async_tx.phys);
1307 
1308         if (chan->has_sg && chan->xdev->mcdma) {
1309                 if (chan->direction == DMA_MEM_TO_DEV) {
1310                         dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
1311                                        head_desc->async_tx.phys);
1312                 } else {
1313                         if (!chan->tdest) {
1314                                 dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
1315                                        head_desc->async_tx.phys);
1316                         } else {
1317                                 dma_ctrl_write(chan,
1318                                         XILINX_DMA_MCRX_CDESC(chan->tdest),
1319                                        head_desc->async_tx.phys);
1320                         }
1321                 }
1322         }
1323 
1324         xilinx_dma_start(chan);
1325 
1326         if (chan->err)
1327                 return;
1328 
1329         
1330         if (chan->has_sg && !chan->xdev->mcdma) {
1331                 if (chan->cyclic)
1332                         xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1333                                      chan->cyclic_seg_v->phys);
1334                 else
1335                         xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1336                                      tail_segment->phys);
1337         } else if (chan->has_sg && chan->xdev->mcdma) {
1338                 if (chan->direction == DMA_MEM_TO_DEV) {
1339                         dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
1340                                tail_segment->phys);
1341                 } else {
1342                         if (!chan->tdest) {
1343                                 dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
1344                                                tail_segment->phys);
1345                         } else {
1346                                 dma_ctrl_write(chan,
1347                                         XILINX_DMA_MCRX_TDESC(chan->tdest),
1348                                         tail_segment->phys);
1349                         }
1350                 }
1351         } else {
1352                 struct xilinx_axidma_tx_segment *segment;
1353                 struct xilinx_axidma_desc_hw *hw;
1354 
1355                 segment = list_first_entry(&head_desc->segments,
1356                                            struct xilinx_axidma_tx_segment,
1357                                            node);
1358                 hw = &segment->hw;
1359 
1360                 xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR,
1361                              xilinx_prep_dma_addr_t(hw->buf_addr));
1362 
1363                 
1364                 dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
1365                                hw->control & chan->xdev->max_buffer_len);
1366         }
1367 
1368         list_splice_tail_init(&chan->pending_list, &chan->active_list);
1369         chan->desc_pendingcount = 0;
1370         chan->idle = false;
1371 }
1372 
1373 
1374 
1375 
1376 
1377 static void xilinx_dma_issue_pending(struct dma_chan *dchan)
1378 {
1379         struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1380         unsigned long flags;
1381 
1382         spin_lock_irqsave(&chan->lock, flags);
1383         chan->start_transfer(chan);
1384         spin_unlock_irqrestore(&chan->lock, flags);
1385 }
1386 
1387 
1388 
1389 
1390 
1391 
1392 
1393 static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan)
1394 {
1395         struct xilinx_dma_tx_descriptor *desc, *next;
1396 
1397         
1398         if (list_empty(&chan->active_list))
1399                 return;
1400 
1401         list_for_each_entry_safe(desc, next, &chan->active_list, node) {
1402                 list_del(&desc->node);
1403                 if (!desc->cyclic)
1404                         dma_cookie_complete(&desc->async_tx);
1405                 list_add_tail(&desc->node, &chan->done_list);
1406         }
1407 }
1408 
1409 
1410 
1411 
1412 
1413 
1414 
1415 static int xilinx_dma_reset(struct xilinx_dma_chan *chan)
1416 {
1417         int err;
1418         u32 tmp;
1419 
1420         dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RESET);
1421 
1422         
1423         err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMACR, tmp,
1424                                       !(tmp & XILINX_DMA_DMACR_RESET), 0,
1425                                       XILINX_DMA_LOOP_COUNT);
1426 
1427         if (err) {
1428                 dev_err(chan->dev, "reset timeout, cr %x, sr %x\n",
1429                         dma_ctrl_read(chan, XILINX_DMA_REG_DMACR),
1430                         dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
1431                 return -ETIMEDOUT;
1432         }
1433 
1434         chan->err = false;
1435         chan->idle = true;
1436         chan->desc_pendingcount = 0;
1437         chan->desc_submitcount = 0;
1438 
1439         return err;
1440 }
1441 
1442 
1443 
1444 
1445 
1446 
1447 
1448 static int xilinx_dma_chan_reset(struct xilinx_dma_chan *chan)
1449 {
1450         int err;
1451 
1452         
1453         err = xilinx_dma_reset(chan);
1454         if (err)
1455                 return err;
1456 
1457         
1458         dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1459                       XILINX_DMA_DMAXR_ALL_IRQ_MASK);
1460 
1461         return 0;
1462 }
1463 
1464 
1465 
1466 
1467 
1468 
1469 
1470 
1471 static irqreturn_t xilinx_dma_irq_handler(int irq, void *data)
1472 {
1473         struct xilinx_dma_chan *chan = data;
1474         u32 status;
1475 
1476         
1477         status = dma_ctrl_read(chan, XILINX_DMA_REG_DMASR);
1478         if (!(status & XILINX_DMA_DMAXR_ALL_IRQ_MASK))
1479                 return IRQ_NONE;
1480 
1481         dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
1482                         status & XILINX_DMA_DMAXR_ALL_IRQ_MASK);
1483 
1484         if (status & XILINX_DMA_DMASR_ERR_IRQ) {
1485                 
1486 
1487 
1488 
1489 
1490 
1491 
1492                 u32 errors = status & XILINX_DMA_DMASR_ALL_ERR_MASK;
1493 
1494                 dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
1495                                 errors & XILINX_DMA_DMASR_ERR_RECOVER_MASK);
1496 
1497                 if (!chan->flush_on_fsync ||
1498                     (errors & ~XILINX_DMA_DMASR_ERR_RECOVER_MASK)) {
1499                         dev_err(chan->dev,
1500                                 "Channel %p has errors %x, cdr %x tdr %x\n",
1501                                 chan, errors,
1502                                 dma_ctrl_read(chan, XILINX_DMA_REG_CURDESC),
1503                                 dma_ctrl_read(chan, XILINX_DMA_REG_TAILDESC));
1504                         chan->err = true;
1505                 }
1506         }
1507 
1508         if (status & XILINX_DMA_DMASR_DLY_CNT_IRQ) {
1509                 
1510 
1511 
1512 
1513                 dev_dbg(chan->dev, "Inter-packet latency too long\n");
1514         }
1515 
1516         if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) {
1517                 spin_lock(&chan->lock);
1518                 xilinx_dma_complete_descriptor(chan);
1519                 chan->idle = true;
1520                 chan->start_transfer(chan);
1521                 spin_unlock(&chan->lock);
1522         }
1523 
1524         tasklet_schedule(&chan->tasklet);
1525         return IRQ_HANDLED;
1526 }
1527 
1528 
1529 
1530 
1531 
1532 
1533 static void append_desc_queue(struct xilinx_dma_chan *chan,
1534                               struct xilinx_dma_tx_descriptor *desc)
1535 {
1536         struct xilinx_vdma_tx_segment *tail_segment;
1537         struct xilinx_dma_tx_descriptor *tail_desc;
1538         struct xilinx_axidma_tx_segment *axidma_tail_segment;
1539         struct xilinx_cdma_tx_segment *cdma_tail_segment;
1540 
1541         if (list_empty(&chan->pending_list))
1542                 goto append;
1543 
1544         
1545 
1546 
1547 
1548         tail_desc = list_last_entry(&chan->pending_list,
1549                                     struct xilinx_dma_tx_descriptor, node);
1550         if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
1551                 tail_segment = list_last_entry(&tail_desc->segments,
1552                                                struct xilinx_vdma_tx_segment,
1553                                                node);
1554                 tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1555         } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
1556                 cdma_tail_segment = list_last_entry(&tail_desc->segments,
1557                                                 struct xilinx_cdma_tx_segment,
1558                                                 node);
1559                 cdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1560         } else {
1561                 axidma_tail_segment = list_last_entry(&tail_desc->segments,
1562                                                struct xilinx_axidma_tx_segment,
1563                                                node);
1564                 axidma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1565         }
1566 
1567         
1568 
1569 
1570 
1571 append:
1572         list_add_tail(&desc->node, &chan->pending_list);
1573         chan->desc_pendingcount++;
1574 
1575         if (chan->has_sg && (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA)
1576             && unlikely(chan->desc_pendingcount > chan->num_frms)) {
1577                 dev_dbg(chan->dev, "desc pendingcount is too high\n");
1578                 chan->desc_pendingcount = chan->num_frms;
1579         }
1580 }
1581 
1582 
1583 
1584 
1585 
1586 
1587 
1588 static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
1589 {
1590         struct xilinx_dma_tx_descriptor *desc = to_dma_tx_descriptor(tx);
1591         struct xilinx_dma_chan *chan = to_xilinx_chan(tx->chan);
1592         dma_cookie_t cookie;
1593         unsigned long flags;
1594         int err;
1595 
1596         if (chan->cyclic) {
1597                 xilinx_dma_free_tx_descriptor(chan, desc);
1598                 return -EBUSY;
1599         }
1600 
1601         if (chan->err) {
1602                 
1603 
1604 
1605 
1606                 err = xilinx_dma_chan_reset(chan);
1607                 if (err < 0)
1608                         return err;
1609         }
1610 
1611         spin_lock_irqsave(&chan->lock, flags);
1612 
1613         cookie = dma_cookie_assign(tx);
1614 
1615         
1616         append_desc_queue(chan, desc);
1617 
1618         if (desc->cyclic)
1619                 chan->cyclic = true;
1620 
1621         spin_unlock_irqrestore(&chan->lock, flags);
1622 
1623         return cookie;
1624 }
1625 
1626 
1627 
1628 
1629 
1630 
1631 
1632 
1633 
1634 
1635 static struct dma_async_tx_descriptor *
1636 xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
1637                                  struct dma_interleaved_template *xt,
1638                                  unsigned long flags)
1639 {
1640         struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1641         struct xilinx_dma_tx_descriptor *desc;
1642         struct xilinx_vdma_tx_segment *segment;
1643         struct xilinx_vdma_desc_hw *hw;
1644 
1645         if (!is_slave_direction(xt->dir))
1646                 return NULL;
1647 
1648         if (!xt->numf || !xt->sgl[0].size)
1649                 return NULL;
1650 
1651         if (xt->frame_size != 1)
1652                 return NULL;
1653 
1654         
1655         desc = xilinx_dma_alloc_tx_descriptor(chan);
1656         if (!desc)
1657                 return NULL;
1658 
1659         dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1660         desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1661         async_tx_ack(&desc->async_tx);
1662 
1663         
1664         segment = xilinx_vdma_alloc_tx_segment(chan);
1665         if (!segment)
1666                 goto error;
1667 
1668         
1669         hw = &segment->hw;
1670         hw->vsize = xt->numf;
1671         hw->hsize = xt->sgl[0].size;
1672         hw->stride = (xt->sgl[0].icg + xt->sgl[0].size) <<
1673                         XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT;
1674         hw->stride |= chan->config.frm_dly <<
1675                         XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT;
1676 
1677         if (xt->dir != DMA_MEM_TO_DEV) {
1678                 if (chan->ext_addr) {
1679                         hw->buf_addr = lower_32_bits(xt->dst_start);
1680                         hw->buf_addr_msb = upper_32_bits(xt->dst_start);
1681                 } else {
1682                         hw->buf_addr = xt->dst_start;
1683                 }
1684         } else {
1685                 if (chan->ext_addr) {
1686                         hw->buf_addr = lower_32_bits(xt->src_start);
1687                         hw->buf_addr_msb = upper_32_bits(xt->src_start);
1688                 } else {
1689                         hw->buf_addr = xt->src_start;
1690                 }
1691         }
1692 
1693         
1694         list_add_tail(&segment->node, &desc->segments);
1695 
1696         
1697         segment = list_first_entry(&desc->segments,
1698                                    struct xilinx_vdma_tx_segment, node);
1699         desc->async_tx.phys = segment->phys;
1700 
1701         return &desc->async_tx;
1702 
1703 error:
1704         xilinx_dma_free_tx_descriptor(chan, desc);
1705         return NULL;
1706 }
1707 
1708 
1709 
1710 
1711 
1712 
1713 
1714 
1715 
1716 
1717 
1718 static struct dma_async_tx_descriptor *
1719 xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
1720                         dma_addr_t dma_src, size_t len, unsigned long flags)
1721 {
1722         struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1723         struct xilinx_dma_tx_descriptor *desc;
1724         struct xilinx_cdma_tx_segment *segment;
1725         struct xilinx_cdma_desc_hw *hw;
1726 
1727         if (!len || len > chan->xdev->max_buffer_len)
1728                 return NULL;
1729 
1730         desc = xilinx_dma_alloc_tx_descriptor(chan);
1731         if (!desc)
1732                 return NULL;
1733 
1734         dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1735         desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1736 
1737         
1738         segment = xilinx_cdma_alloc_tx_segment(chan);
1739         if (!segment)
1740                 goto error;
1741 
1742         hw = &segment->hw;
1743         hw->control = len;
1744         hw->src_addr = dma_src;
1745         hw->dest_addr = dma_dst;
1746         if (chan->ext_addr) {
1747                 hw->src_addr_msb = upper_32_bits(dma_src);
1748                 hw->dest_addr_msb = upper_32_bits(dma_dst);
1749         }
1750 
1751         
1752         list_add_tail(&segment->node, &desc->segments);
1753 
1754         desc->async_tx.phys = segment->phys;
1755         hw->next_desc = segment->phys;
1756 
1757         return &desc->async_tx;
1758 
1759 error:
1760         xilinx_dma_free_tx_descriptor(chan, desc);
1761         return NULL;
1762 }
1763 
1764 
1765 
1766 
1767 
1768 
1769 
1770 
1771 
1772 
1773 
1774 
1775 static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
1776         struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
1777         enum dma_transfer_direction direction, unsigned long flags,
1778         void *context)
1779 {
1780         struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1781         struct xilinx_dma_tx_descriptor *desc;
1782         struct xilinx_axidma_tx_segment *segment = NULL;
1783         u32 *app_w = (u32 *)context;
1784         struct scatterlist *sg;
1785         size_t copy;
1786         size_t sg_used;
1787         unsigned int i;
1788 
1789         if (!is_slave_direction(direction))
1790                 return NULL;
1791 
1792         
1793         desc = xilinx_dma_alloc_tx_descriptor(chan);
1794         if (!desc)
1795                 return NULL;
1796 
1797         dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1798         desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1799 
1800         
1801         for_each_sg(sgl, sg, sg_len, i) {
1802                 sg_used = 0;
1803 
1804                 
1805                 while (sg_used < sg_dma_len(sg)) {
1806                         struct xilinx_axidma_desc_hw *hw;
1807 
1808                         
1809                         segment = xilinx_axidma_alloc_tx_segment(chan);
1810                         if (!segment)
1811                                 goto error;
1812 
1813                         
1814 
1815 
1816 
1817                         copy = xilinx_dma_calc_copysize(chan, sg_dma_len(sg),
1818                                                         sg_used);
1819                         hw = &segment->hw;
1820 
1821                         
1822                         xilinx_axidma_buf(chan, hw, sg_dma_address(sg),
1823                                           sg_used, 0);
1824 
1825                         hw->control = copy;
1826 
1827                         if (chan->direction == DMA_MEM_TO_DEV) {
1828                                 if (app_w)
1829                                         memcpy(hw->app, app_w, sizeof(u32) *
1830                                                XILINX_DMA_NUM_APP_WORDS);
1831                         }
1832 
1833                         sg_used += copy;
1834 
1835                         
1836 
1837 
1838 
1839                         list_add_tail(&segment->node, &desc->segments);
1840                 }
1841         }
1842 
1843         segment = list_first_entry(&desc->segments,
1844                                    struct xilinx_axidma_tx_segment, node);
1845         desc->async_tx.phys = segment->phys;
1846 
1847         
1848         if (chan->direction == DMA_MEM_TO_DEV) {
1849                 segment->hw.control |= XILINX_DMA_BD_SOP;
1850                 segment = list_last_entry(&desc->segments,
1851                                           struct xilinx_axidma_tx_segment,
1852                                           node);
1853                 segment->hw.control |= XILINX_DMA_BD_EOP;
1854         }
1855 
1856         return &desc->async_tx;
1857 
1858 error:
1859         xilinx_dma_free_tx_descriptor(chan, desc);
1860         return NULL;
1861 }
1862 
1863 
1864 
1865 
1866 
1867 
1868 
1869 
1870 
1871 
1872 
1873 
1874 static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic(
1875         struct dma_chan *dchan, dma_addr_t buf_addr, size_t buf_len,
1876         size_t period_len, enum dma_transfer_direction direction,
1877         unsigned long flags)
1878 {
1879         struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1880         struct xilinx_dma_tx_descriptor *desc;
1881         struct xilinx_axidma_tx_segment *segment, *head_segment, *prev = NULL;
1882         size_t copy, sg_used;
1883         unsigned int num_periods;
1884         int i;
1885         u32 reg;
1886 
1887         if (!period_len)
1888                 return NULL;
1889 
1890         num_periods = buf_len / period_len;
1891 
1892         if (!num_periods)
1893                 return NULL;
1894 
1895         if (!is_slave_direction(direction))
1896                 return NULL;
1897 
1898         
1899         desc = xilinx_dma_alloc_tx_descriptor(chan);
1900         if (!desc)
1901                 return NULL;
1902 
1903         chan->direction = direction;
1904         dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1905         desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1906 
1907         for (i = 0; i < num_periods; ++i) {
1908                 sg_used = 0;
1909 
1910                 while (sg_used < period_len) {
1911                         struct xilinx_axidma_desc_hw *hw;
1912 
1913                         
1914                         segment = xilinx_axidma_alloc_tx_segment(chan);
1915                         if (!segment)
1916                                 goto error;
1917 
1918                         
1919 
1920 
1921 
1922                         copy = xilinx_dma_calc_copysize(chan, period_len,
1923                                                         sg_used);
1924                         hw = &segment->hw;
1925                         xilinx_axidma_buf(chan, hw, buf_addr, sg_used,
1926                                           period_len * i);
1927                         hw->control = copy;
1928 
1929                         if (prev)
1930                                 prev->hw.next_desc = segment->phys;
1931 
1932                         prev = segment;
1933                         sg_used += copy;
1934 
1935                         
1936 
1937 
1938 
1939                         list_add_tail(&segment->node, &desc->segments);
1940                 }
1941         }
1942 
1943         head_segment = list_first_entry(&desc->segments,
1944                                    struct xilinx_axidma_tx_segment, node);
1945         desc->async_tx.phys = head_segment->phys;
1946 
1947         desc->cyclic = true;
1948         reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1949         reg |= XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
1950         dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1951 
1952         segment = list_last_entry(&desc->segments,
1953                                   struct xilinx_axidma_tx_segment,
1954                                   node);
1955         segment->hw.next_desc = (u32) head_segment->phys;
1956 
1957         
1958         if (direction == DMA_MEM_TO_DEV) {
1959                 head_segment->hw.control |= XILINX_DMA_BD_SOP;
1960                 segment->hw.control |= XILINX_DMA_BD_EOP;
1961         }
1962 
1963         return &desc->async_tx;
1964 
1965 error:
1966         xilinx_dma_free_tx_descriptor(chan, desc);
1967         return NULL;
1968 }
1969 
1970 
1971 
1972 
1973 
1974 
1975 
1976 
1977 
1978 
1979 static struct dma_async_tx_descriptor *
1980 xilinx_dma_prep_interleaved(struct dma_chan *dchan,
1981                                  struct dma_interleaved_template *xt,
1982                                  unsigned long flags)
1983 {
1984         struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1985         struct xilinx_dma_tx_descriptor *desc;
1986         struct xilinx_axidma_tx_segment *segment;
1987         struct xilinx_axidma_desc_hw *hw;
1988 
1989         if (!is_slave_direction(xt->dir))
1990                 return NULL;
1991 
1992         if (!xt->numf || !xt->sgl[0].size)
1993                 return NULL;
1994 
1995         if (xt->frame_size != 1)
1996                 return NULL;
1997 
1998         
1999         desc = xilinx_dma_alloc_tx_descriptor(chan);
2000         if (!desc)
2001                 return NULL;
2002 
2003         chan->direction = xt->dir;
2004         dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
2005         desc->async_tx.tx_submit = xilinx_dma_tx_submit;
2006 
2007         
2008         segment = xilinx_axidma_alloc_tx_segment(chan);
2009         if (!segment)
2010                 goto error;
2011 
2012         hw = &segment->hw;
2013 
2014         
2015         if (xt->dir != DMA_MEM_TO_DEV)
2016                 hw->buf_addr = xt->dst_start;
2017         else
2018                 hw->buf_addr = xt->src_start;
2019 
2020         hw->mcdma_control = chan->tdest & XILINX_DMA_BD_TDEST_MASK;
2021         hw->vsize_stride = (xt->numf << XILINX_DMA_BD_VSIZE_SHIFT) &
2022                             XILINX_DMA_BD_VSIZE_MASK;
2023         hw->vsize_stride |= (xt->sgl[0].icg + xt->sgl[0].size) &
2024                             XILINX_DMA_BD_STRIDE_MASK;
2025         hw->control = xt->sgl[0].size & XILINX_DMA_BD_HSIZE_MASK;
2026 
2027         
2028 
2029 
2030 
2031         list_add_tail(&segment->node, &desc->segments);
2032 
2033 
2034         segment = list_first_entry(&desc->segments,
2035                                    struct xilinx_axidma_tx_segment, node);
2036         desc->async_tx.phys = segment->phys;
2037 
2038         
2039         if (xt->dir == DMA_MEM_TO_DEV) {
2040                 segment->hw.control |= XILINX_DMA_BD_SOP;
2041                 segment = list_last_entry(&desc->segments,
2042                                           struct xilinx_axidma_tx_segment,
2043                                           node);
2044                 segment->hw.control |= XILINX_DMA_BD_EOP;
2045         }
2046 
2047         return &desc->async_tx;
2048 
2049 error:
2050         xilinx_dma_free_tx_descriptor(chan, desc);
2051         return NULL;
2052 }
2053 
2054 
2055 
2056 
2057 
2058 
2059 
2060 static int xilinx_dma_terminate_all(struct dma_chan *dchan)
2061 {
2062         struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2063         u32 reg;
2064         int err;
2065 
2066         if (chan->cyclic)
2067                 xilinx_dma_chan_reset(chan);
2068 
2069         err = chan->stop_transfer(chan);
2070         if (err) {
2071                 dev_err(chan->dev, "Cannot stop channel %p: %x\n",
2072                         chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
2073                 chan->err = true;
2074         }
2075 
2076         
2077         xilinx_dma_free_descriptors(chan);
2078         chan->idle = true;
2079 
2080         if (chan->cyclic) {
2081                 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
2082                 reg &= ~XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
2083                 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
2084                 chan->cyclic = false;
2085         }
2086 
2087         if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
2088                 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
2089                              XILINX_CDMA_CR_SGMODE);
2090 
2091         return 0;
2092 }
2093 
2094 
2095 
2096 
2097 
2098 
2099 
2100 
2101 
2102 
2103 
2104 
2105 
2106 
2107 int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
2108                                         struct xilinx_vdma_config *cfg)
2109 {
2110         struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2111         u32 dmacr;
2112 
2113         if (cfg->reset)
2114                 return xilinx_dma_chan_reset(chan);
2115 
2116         dmacr = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
2117 
2118         chan->config.frm_dly = cfg->frm_dly;
2119         chan->config.park = cfg->park;
2120 
2121         
2122         chan->config.gen_lock = cfg->gen_lock;
2123         chan->config.master = cfg->master;
2124 
2125         dmacr &= ~XILINX_DMA_DMACR_GENLOCK_EN;
2126         if (cfg->gen_lock && chan->genlock) {
2127                 dmacr |= XILINX_DMA_DMACR_GENLOCK_EN;
2128                 dmacr &= ~XILINX_DMA_DMACR_MASTER_MASK;
2129                 dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT;
2130         }
2131 
2132         chan->config.frm_cnt_en = cfg->frm_cnt_en;
2133         chan->config.vflip_en = cfg->vflip_en;
2134 
2135         if (cfg->park)
2136                 chan->config.park_frm = cfg->park_frm;
2137         else
2138                 chan->config.park_frm = -1;
2139 
2140         chan->config.coalesc = cfg->coalesc;
2141         chan->config.delay = cfg->delay;
2142 
2143         if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) {
2144                 dmacr &= ~XILINX_DMA_DMACR_FRAME_COUNT_MASK;
2145                 dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT;
2146                 chan->config.coalesc = cfg->coalesc;
2147         }
2148 
2149         if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) {
2150                 dmacr &= ~XILINX_DMA_DMACR_DELAY_MASK;
2151                 dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT;
2152                 chan->config.delay = cfg->delay;
2153         }
2154 
2155         
2156         dmacr &= ~XILINX_DMA_DMACR_FSYNCSRC_MASK;
2157         dmacr |= cfg->ext_fsync << XILINX_DMA_DMACR_FSYNCSRC_SHIFT;
2158 
2159         dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, dmacr);
2160 
2161         return 0;
2162 }
2163 EXPORT_SYMBOL(xilinx_vdma_channel_set_config);
2164 
2165 
2166 
2167 
2168 
2169 
2170 
2171 
2172 
2173 static void xilinx_dma_chan_remove(struct xilinx_dma_chan *chan)
2174 {
2175         
2176         dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
2177                       XILINX_DMA_DMAXR_ALL_IRQ_MASK);
2178 
2179         if (chan->irq > 0)
2180                 free_irq(chan->irq, chan);
2181 
2182         tasklet_kill(&chan->tasklet);
2183 
2184         list_del(&chan->common.device_node);
2185 }
2186 
2187 static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2188                             struct clk **tx_clk, struct clk **rx_clk,
2189                             struct clk **sg_clk, struct clk **tmp_clk)
2190 {
2191         int err;
2192 
2193         *tmp_clk = NULL;
2194 
2195         *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2196         if (IS_ERR(*axi_clk)) {
2197                 err = PTR_ERR(*axi_clk);
2198                 dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n", err);
2199                 return err;
2200         }
2201 
2202         *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
2203         if (IS_ERR(*tx_clk))
2204                 *tx_clk = NULL;
2205 
2206         *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
2207         if (IS_ERR(*rx_clk))
2208                 *rx_clk = NULL;
2209 
2210         *sg_clk = devm_clk_get(&pdev->dev, "m_axi_sg_aclk");
2211         if (IS_ERR(*sg_clk))
2212                 *sg_clk = NULL;
2213 
2214         err = clk_prepare_enable(*axi_clk);
2215         if (err) {
2216                 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
2217                 return err;
2218         }
2219 
2220         err = clk_prepare_enable(*tx_clk);
2221         if (err) {
2222                 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
2223                 goto err_disable_axiclk;
2224         }
2225 
2226         err = clk_prepare_enable(*rx_clk);
2227         if (err) {
2228                 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
2229                 goto err_disable_txclk;
2230         }
2231 
2232         err = clk_prepare_enable(*sg_clk);
2233         if (err) {
2234                 dev_err(&pdev->dev, "failed to enable sg_clk (%d)\n", err);
2235                 goto err_disable_rxclk;
2236         }
2237 
2238         return 0;
2239 
2240 err_disable_rxclk:
2241         clk_disable_unprepare(*rx_clk);
2242 err_disable_txclk:
2243         clk_disable_unprepare(*tx_clk);
2244 err_disable_axiclk:
2245         clk_disable_unprepare(*axi_clk);
2246 
2247         return err;
2248 }
2249 
2250 static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2251                             struct clk **dev_clk, struct clk **tmp_clk,
2252                             struct clk **tmp1_clk, struct clk **tmp2_clk)
2253 {
2254         int err;
2255 
2256         *tmp_clk = NULL;
2257         *tmp1_clk = NULL;
2258         *tmp2_clk = NULL;
2259 
2260         *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2261         if (IS_ERR(*axi_clk)) {
2262                 err = PTR_ERR(*axi_clk);
2263                 dev_err(&pdev->dev, "failed to get axi_clk (%d)\n", err);
2264                 return err;
2265         }
2266 
2267         *dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk");
2268         if (IS_ERR(*dev_clk)) {
2269                 err = PTR_ERR(*dev_clk);
2270                 dev_err(&pdev->dev, "failed to get dev_clk (%d)\n", err);
2271                 return err;
2272         }
2273 
2274         err = clk_prepare_enable(*axi_clk);
2275         if (err) {
2276                 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
2277                 return err;
2278         }
2279 
2280         err = clk_prepare_enable(*dev_clk);
2281         if (err) {
2282                 dev_err(&pdev->dev, "failed to enable dev_clk (%d)\n", err);
2283                 goto err_disable_axiclk;
2284         }
2285 
2286         return 0;
2287 
2288 err_disable_axiclk:
2289         clk_disable_unprepare(*axi_clk);
2290 
2291         return err;
2292 }
2293 
2294 static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2295                             struct clk **tx_clk, struct clk **txs_clk,
2296                             struct clk **rx_clk, struct clk **rxs_clk)
2297 {
2298         int err;
2299 
2300         *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2301         if (IS_ERR(*axi_clk)) {
2302                 err = PTR_ERR(*axi_clk);
2303                 dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n", err);
2304                 return err;
2305         }
2306 
2307         *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
2308         if (IS_ERR(*tx_clk))
2309                 *tx_clk = NULL;
2310 
2311         *txs_clk = devm_clk_get(&pdev->dev, "m_axis_mm2s_aclk");
2312         if (IS_ERR(*txs_clk))
2313                 *txs_clk = NULL;
2314 
2315         *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
2316         if (IS_ERR(*rx_clk))
2317                 *rx_clk = NULL;
2318 
2319         *rxs_clk = devm_clk_get(&pdev->dev, "s_axis_s2mm_aclk");
2320         if (IS_ERR(*rxs_clk))
2321                 *rxs_clk = NULL;
2322 
2323         err = clk_prepare_enable(*axi_clk);
2324         if (err) {
2325                 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
2326                 return err;
2327         }
2328 
2329         err = clk_prepare_enable(*tx_clk);
2330         if (err) {
2331                 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
2332                 goto err_disable_axiclk;
2333         }
2334 
2335         err = clk_prepare_enable(*txs_clk);
2336         if (err) {
2337                 dev_err(&pdev->dev, "failed to enable txs_clk (%d)\n", err);
2338                 goto err_disable_txclk;
2339         }
2340 
2341         err = clk_prepare_enable(*rx_clk);
2342         if (err) {
2343                 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
2344                 goto err_disable_txsclk;
2345         }
2346 
2347         err = clk_prepare_enable(*rxs_clk);
2348         if (err) {
2349                 dev_err(&pdev->dev, "failed to enable rxs_clk (%d)\n", err);
2350                 goto err_disable_rxclk;
2351         }
2352 
2353         return 0;
2354 
2355 err_disable_rxclk:
2356         clk_disable_unprepare(*rx_clk);
2357 err_disable_txsclk:
2358         clk_disable_unprepare(*txs_clk);
2359 err_disable_txclk:
2360         clk_disable_unprepare(*tx_clk);
2361 err_disable_axiclk:
2362         clk_disable_unprepare(*axi_clk);
2363 
2364         return err;
2365 }
2366 
2367 static void xdma_disable_allclks(struct xilinx_dma_device *xdev)
2368 {
2369         clk_disable_unprepare(xdev->rxs_clk);
2370         clk_disable_unprepare(xdev->rx_clk);
2371         clk_disable_unprepare(xdev->txs_clk);
2372         clk_disable_unprepare(xdev->tx_clk);
2373         clk_disable_unprepare(xdev->axi_clk);
2374 }
2375 
2376 
2377 
2378 
2379 
2380 
2381 
2382 
2383 
2384 
2385 
2386 
2387 static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
2388                                   struct device_node *node, int chan_id)
2389 {
2390         struct xilinx_dma_chan *chan;
2391         bool has_dre = false;
2392         u32 value, width;
2393         int err;
2394 
2395         
2396         chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL);
2397         if (!chan)
2398                 return -ENOMEM;
2399 
2400         chan->dev = xdev->dev;
2401         chan->xdev = xdev;
2402         chan->desc_pendingcount = 0x0;
2403         chan->ext_addr = xdev->ext_addr;
2404         
2405 
2406 
2407 
2408 
2409         chan->idle = true;
2410 
2411         spin_lock_init(&chan->lock);
2412         INIT_LIST_HEAD(&chan->pending_list);
2413         INIT_LIST_HEAD(&chan->done_list);
2414         INIT_LIST_HEAD(&chan->active_list);
2415         INIT_LIST_HEAD(&chan->free_seg_list);
2416 
2417         
2418         has_dre = of_property_read_bool(node, "xlnx,include-dre");
2419 
2420         chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode");
2421 
2422         err = of_property_read_u32(node, "xlnx,datawidth", &value);
2423         if (err) {
2424                 dev_err(xdev->dev, "missing xlnx,datawidth property\n");
2425                 return err;
2426         }
2427         width = value >> 3; 
2428 
2429         
2430         if (width > 8)
2431                 has_dre = false;
2432 
2433         if (!has_dre)
2434                 xdev->common.copy_align = fls(width - 1);
2435 
2436         if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") ||
2437             of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") ||
2438             of_device_is_compatible(node, "xlnx,axi-cdma-channel")) {
2439                 chan->direction = DMA_MEM_TO_DEV;
2440                 chan->id = chan_id;
2441                 chan->tdest = chan_id;
2442 
2443                 chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET;
2444                 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2445                         chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET;
2446                         chan->config.park = 1;
2447 
2448                         if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
2449                             xdev->flush_on_fsync == XILINX_DMA_FLUSH_MM2S)
2450                                 chan->flush_on_fsync = true;
2451                 }
2452         } else if (of_device_is_compatible(node,
2453                                            "xlnx,axi-vdma-s2mm-channel") ||
2454                    of_device_is_compatible(node,
2455                                            "xlnx,axi-dma-s2mm-channel")) {
2456                 chan->direction = DMA_DEV_TO_MEM;
2457                 chan->id = chan_id;
2458                 chan->tdest = chan_id - xdev->nr_channels;
2459                 chan->has_vflip = of_property_read_bool(node,
2460                                         "xlnx,enable-vert-flip");
2461                 if (chan->has_vflip) {
2462                         chan->config.vflip_en = dma_read(chan,
2463                                 XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP) &
2464                                 XILINX_VDMA_ENABLE_VERTICAL_FLIP;
2465                 }
2466 
2467                 chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET;
2468                 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2469                         chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET;
2470                         chan->config.park = 1;
2471 
2472                         if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
2473                             xdev->flush_on_fsync == XILINX_DMA_FLUSH_S2MM)
2474                                 chan->flush_on_fsync = true;
2475                 }
2476         } else {
2477                 dev_err(xdev->dev, "Invalid channel compatible node\n");
2478                 return -EINVAL;
2479         }
2480 
2481         
2482         chan->irq = irq_of_parse_and_map(node, 0);
2483         err = request_irq(chan->irq, xilinx_dma_irq_handler, IRQF_SHARED,
2484                           "xilinx-dma-controller", chan);
2485         if (err) {
2486                 dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq);
2487                 return err;
2488         }
2489 
2490         if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
2491                 chan->start_transfer = xilinx_dma_start_transfer;
2492                 chan->stop_transfer = xilinx_dma_stop_transfer;
2493         } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
2494                 chan->start_transfer = xilinx_cdma_start_transfer;
2495                 chan->stop_transfer = xilinx_cdma_stop_transfer;
2496         } else {
2497                 chan->start_transfer = xilinx_vdma_start_transfer;
2498                 chan->stop_transfer = xilinx_dma_stop_transfer;
2499         }
2500 
2501         
2502         if (xdev->dma_config->dmatype != XDMA_TYPE_VDMA) {
2503                 if (dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) &
2504                     XILINX_DMA_DMASR_SG_MASK)
2505                         chan->has_sg = true;
2506                 dev_dbg(chan->dev, "ch %d: SG %s\n", chan->id,
2507                         chan->has_sg ? "enabled" : "disabled");
2508         }
2509 
2510         
2511         tasklet_init(&chan->tasklet, xilinx_dma_do_tasklet,
2512                         (unsigned long)chan);
2513 
2514         
2515 
2516 
2517 
2518         chan->common.device = &xdev->common;
2519 
2520         list_add_tail(&chan->common.device_node, &xdev->common.channels);
2521         xdev->chan[chan->id] = chan;
2522 
2523         
2524         err = xilinx_dma_chan_reset(chan);
2525         if (err < 0) {
2526                 dev_err(xdev->dev, "Reset channel failed\n");
2527                 return err;
2528         }
2529 
2530         return 0;
2531 }
2532 
2533 
2534 
2535 
2536 
2537 
2538 
2539 
2540 
2541 
2542 
2543 static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
2544                                     struct device_node *node)
2545 {
2546         int ret, i, nr_channels = 1;
2547 
2548         ret = of_property_read_u32(node, "dma-channels", &nr_channels);
2549         if ((ret < 0) && xdev->mcdma)
2550                 dev_warn(xdev->dev, "missing dma-channels property\n");
2551 
2552         for (i = 0; i < nr_channels; i++)
2553                 xilinx_dma_chan_probe(xdev, node, xdev->chan_id++);
2554 
2555         xdev->nr_channels += nr_channels;
2556 
2557         return 0;
2558 }
2559 
2560 
2561 
2562 
2563 
2564 
2565 
2566 
2567 static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
2568                                                 struct of_dma *ofdma)
2569 {
2570         struct xilinx_dma_device *xdev = ofdma->of_dma_data;
2571         int chan_id = dma_spec->args[0];
2572 
2573         if (chan_id >= xdev->nr_channels || !xdev->chan[chan_id])
2574                 return NULL;
2575 
2576         return dma_get_slave_channel(&xdev->chan[chan_id]->common);
2577 }
2578 
2579 static const struct xilinx_dma_config axidma_config = {
2580         .dmatype = XDMA_TYPE_AXIDMA,
2581         .clk_init = axidma_clk_init,
2582 };
2583 
2584 static const struct xilinx_dma_config axicdma_config = {
2585         .dmatype = XDMA_TYPE_CDMA,
2586         .clk_init = axicdma_clk_init,
2587 };
2588 
2589 static const struct xilinx_dma_config axivdma_config = {
2590         .dmatype = XDMA_TYPE_VDMA,
2591         .clk_init = axivdma_clk_init,
2592 };
2593 
2594 static const struct of_device_id xilinx_dma_of_ids[] = {
2595         { .compatible = "xlnx,axi-dma-1.00.a", .data = &axidma_config },
2596         { .compatible = "xlnx,axi-cdma-1.00.a", .data = &axicdma_config },
2597         { .compatible = "xlnx,axi-vdma-1.00.a", .data = &axivdma_config },
2598         {}
2599 };
2600 MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids);
2601 
2602 
2603 
2604 
2605 
2606 
2607 
2608 static int xilinx_dma_probe(struct platform_device *pdev)
2609 {
2610         int (*clk_init)(struct platform_device *, struct clk **, struct clk **,
2611                         struct clk **, struct clk **, struct clk **)
2612                                         = axivdma_clk_init;
2613         struct device_node *node = pdev->dev.of_node;
2614         struct xilinx_dma_device *xdev;
2615         struct device_node *child, *np = pdev->dev.of_node;
2616         struct resource *io;
2617         u32 num_frames, addr_width, len_width;
2618         int i, err;
2619 
2620         
2621         xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
2622         if (!xdev)
2623                 return -ENOMEM;
2624 
2625         xdev->dev = &pdev->dev;
2626         if (np) {
2627                 const struct of_device_id *match;
2628 
2629                 match = of_match_node(xilinx_dma_of_ids, np);
2630                 if (match && match->data) {
2631                         xdev->dma_config = match->data;
2632                         clk_init = xdev->dma_config->clk_init;
2633                 }
2634         }
2635 
2636         err = clk_init(pdev, &xdev->axi_clk, &xdev->tx_clk, &xdev->txs_clk,
2637                        &xdev->rx_clk, &xdev->rxs_clk);
2638         if (err)
2639                 return err;
2640 
2641         
2642         io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2643         xdev->regs = devm_ioremap_resource(&pdev->dev, io);
2644         if (IS_ERR(xdev->regs))
2645                 return PTR_ERR(xdev->regs);
2646 
2647         
2648         xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0);
2649 
2650         if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
2651                 xdev->mcdma = of_property_read_bool(node, "xlnx,mcdma");
2652                 if (!of_property_read_u32(node, "xlnx,sg-length-width",
2653                                           &len_width)) {
2654                         if (len_width < XILINX_DMA_MAX_TRANS_LEN_MIN ||
2655                             len_width > XILINX_DMA_V2_MAX_TRANS_LEN_MAX) {
2656                                 dev_warn(xdev->dev,
2657                                          "invalid xlnx,sg-length-width property value. Using default width\n");
2658                         } else {
2659                                 if (len_width > XILINX_DMA_MAX_TRANS_LEN_MAX)
2660                                         dev_warn(xdev->dev, "Please ensure that IP supports buffer length > 23 bits\n");
2661                                 xdev->max_buffer_len =
2662                                         GENMASK(len_width - 1, 0);
2663                         }
2664                 }
2665         }
2666 
2667         if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2668                 err = of_property_read_u32(node, "xlnx,num-fstores",
2669                                            &num_frames);
2670                 if (err < 0) {
2671                         dev_err(xdev->dev,
2672                                 "missing xlnx,num-fstores property\n");
2673                         return err;
2674                 }
2675 
2676                 err = of_property_read_u32(node, "xlnx,flush-fsync",
2677                                            &xdev->flush_on_fsync);
2678                 if (err < 0)
2679                         dev_warn(xdev->dev,
2680                                  "missing xlnx,flush-fsync property\n");
2681         }
2682 
2683         err = of_property_read_u32(node, "xlnx,addrwidth", &addr_width);
2684         if (err < 0)
2685                 dev_warn(xdev->dev, "missing xlnx,addrwidth property\n");
2686 
2687         if (addr_width > 32)
2688                 xdev->ext_addr = true;
2689         else
2690                 xdev->ext_addr = false;
2691 
2692         
2693         dma_set_mask(xdev->dev, DMA_BIT_MASK(addr_width));
2694 
2695         
2696         xdev->common.dev = &pdev->dev;
2697 
2698         INIT_LIST_HEAD(&xdev->common.channels);
2699         if (!(xdev->dma_config->dmatype == XDMA_TYPE_CDMA)) {
2700                 dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
2701                 dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
2702         }
2703 
2704         xdev->common.device_alloc_chan_resources =
2705                                 xilinx_dma_alloc_chan_resources;
2706         xdev->common.device_free_chan_resources =
2707                                 xilinx_dma_free_chan_resources;
2708         xdev->common.device_terminate_all = xilinx_dma_terminate_all;
2709         xdev->common.device_tx_status = xilinx_dma_tx_status;
2710         xdev->common.device_issue_pending = xilinx_dma_issue_pending;
2711         if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
2712                 dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask);
2713                 xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
2714                 xdev->common.device_prep_dma_cyclic =
2715                                           xilinx_dma_prep_dma_cyclic;
2716                 xdev->common.device_prep_interleaved_dma =
2717                                         xilinx_dma_prep_interleaved;
2718                 
2719                 xdev->common.residue_granularity =
2720                                           DMA_RESIDUE_GRANULARITY_SEGMENT;
2721         } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
2722                 dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
2723                 xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy;
2724         } else {
2725                 xdev->common.device_prep_interleaved_dma =
2726                                 xilinx_vdma_dma_prep_interleaved;
2727         }
2728 
2729         platform_set_drvdata(pdev, xdev);
2730 
2731         
2732         for_each_child_of_node(node, child) {
2733                 err = xilinx_dma_child_probe(xdev, child);
2734                 if (err < 0)
2735                         goto disable_clks;
2736         }
2737 
2738         if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2739                 for (i = 0; i < xdev->nr_channels; i++)
2740                         if (xdev->chan[i])
2741                                 xdev->chan[i]->num_frms = num_frames;
2742         }
2743 
2744         
2745         dma_async_device_register(&xdev->common);
2746 
2747         err = of_dma_controller_register(node, of_dma_xilinx_xlate,
2748                                          xdev);
2749         if (err < 0) {
2750                 dev_err(&pdev->dev, "Unable to register DMA to DT\n");
2751                 dma_async_device_unregister(&xdev->common);
2752                 goto error;
2753         }
2754 
2755         if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
2756                 dev_info(&pdev->dev, "Xilinx AXI DMA Engine Driver Probed!!\n");
2757         else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA)
2758                 dev_info(&pdev->dev, "Xilinx AXI CDMA Engine Driver Probed!!\n");
2759         else
2760                 dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n");
2761 
2762         return 0;
2763 
2764 disable_clks:
2765         xdma_disable_allclks(xdev);
2766 error:
2767         for (i = 0; i < xdev->nr_channels; i++)
2768                 if (xdev->chan[i])
2769                         xilinx_dma_chan_remove(xdev->chan[i]);
2770 
2771         return err;
2772 }
2773 
2774 
2775 
2776 
2777 
2778 
2779 
2780 static int xilinx_dma_remove(struct platform_device *pdev)
2781 {
2782         struct xilinx_dma_device *xdev = platform_get_drvdata(pdev);
2783         int i;
2784 
2785         of_dma_controller_free(pdev->dev.of_node);
2786 
2787         dma_async_device_unregister(&xdev->common);
2788 
2789         for (i = 0; i < xdev->nr_channels; i++)
2790                 if (xdev->chan[i])
2791                         xilinx_dma_chan_remove(xdev->chan[i]);
2792 
2793         xdma_disable_allclks(xdev);
2794 
2795         return 0;
2796 }
2797 
2798 static struct platform_driver xilinx_vdma_driver = {
2799         .driver = {
2800                 .name = "xilinx-vdma",
2801                 .of_match_table = xilinx_dma_of_ids,
2802         },
2803         .probe = xilinx_dma_probe,
2804         .remove = xilinx_dma_remove,
2805 };
2806 
2807 module_platform_driver(xilinx_vdma_driver);
2808 
2809 MODULE_AUTHOR("Xilinx, Inc.");
2810 MODULE_DESCRIPTION("Xilinx VDMA driver");
2811 MODULE_LICENSE("GPL v2");