1/* 2 * OMAP DMAengine support 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 */ 8#include <linux/delay.h> 9#include <linux/dmaengine.h> 10#include <linux/dma-mapping.h> 11#include <linux/err.h> 12#include <linux/init.h> 13#include <linux/interrupt.h> 14#include <linux/list.h> 15#include <linux/module.h> 16#include <linux/omap-dma.h> 17#include <linux/platform_device.h> 18#include <linux/slab.h> 19#include <linux/spinlock.h> 20#include <linux/of_dma.h> 21#include <linux/of_device.h> 22 23#include "virt-dma.h" 24 25struct omap_dmadev { 26 struct dma_device ddev; 27 spinlock_t lock; 28 struct tasklet_struct task; 29 struct list_head pending; 30 void __iomem *base; 31 const struct omap_dma_reg *reg_map; 32 struct omap_system_dma_plat_info *plat; 33 bool legacy; 34 spinlock_t irq_lock; 35 uint32_t irq_enable_mask; 36 struct omap_chan *lch_map[32]; 37}; 38 39struct omap_chan { 40 struct virt_dma_chan vc; 41 struct list_head node; 42 void __iomem *channel_base; 43 const struct omap_dma_reg *reg_map; 44 uint32_t ccr; 45 46 struct dma_slave_config cfg; 47 unsigned dma_sig; 48 bool cyclic; 49 bool paused; 50 51 int dma_ch; 52 struct omap_desc *desc; 53 unsigned sgidx; 54}; 55 56struct omap_sg { 57 dma_addr_t addr; 58 uint32_t en; /* number of elements (24-bit) */ 59 uint32_t fn; /* number of frames (16-bit) */ 60}; 61 62struct omap_desc { 63 struct virt_dma_desc vd; 64 enum dma_transfer_direction dir; 65 dma_addr_t dev_addr; 66 67 int16_t fi; /* for OMAP_DMA_SYNC_PACKET */ 68 uint8_t es; /* CSDP_DATA_TYPE_xxx */ 69 uint32_t ccr; /* CCR value */ 70 uint16_t clnk_ctrl; /* CLNK_CTRL value */ 71 uint16_t cicr; /* CICR value */ 72 uint32_t csdp; /* CSDP value */ 73 74 unsigned sglen; 75 struct omap_sg sg[0]; 76}; 77 78enum { 79 CCR_FS = BIT(5), 80 CCR_READ_PRIORITY = BIT(6), 81 CCR_ENABLE = BIT(7), 82 CCR_AUTO_INIT = BIT(8), /* OMAP1 only */ 83 CCR_REPEAT = BIT(9), /* OMAP1 only */ 84 CCR_OMAP31_DISABLE = BIT(10), /* OMAP1 only */ 85 CCR_SUSPEND_SENSITIVE = BIT(8), /* OMAP2+ only */ 86 CCR_RD_ACTIVE = BIT(9), /* OMAP2+ only */ 87 CCR_WR_ACTIVE = BIT(10), /* OMAP2+ only */ 88 CCR_SRC_AMODE_CONSTANT = 0 << 12, 89 CCR_SRC_AMODE_POSTINC = 1 << 12, 90 CCR_SRC_AMODE_SGLIDX = 2 << 12, 91 CCR_SRC_AMODE_DBLIDX = 3 << 12, 92 CCR_DST_AMODE_CONSTANT = 0 << 14, 93 CCR_DST_AMODE_POSTINC = 1 << 14, 94 CCR_DST_AMODE_SGLIDX = 2 << 14, 95 CCR_DST_AMODE_DBLIDX = 3 << 14, 96 CCR_CONSTANT_FILL = BIT(16), 97 CCR_TRANSPARENT_COPY = BIT(17), 98 CCR_BS = BIT(18), 99 CCR_SUPERVISOR = BIT(22), 100 CCR_PREFETCH = BIT(23), 101 CCR_TRIGGER_SRC = BIT(24), 102 CCR_BUFFERING_DISABLE = BIT(25), 103 CCR_WRITE_PRIORITY = BIT(26), 104 CCR_SYNC_ELEMENT = 0, 105 CCR_SYNC_FRAME = CCR_FS, 106 CCR_SYNC_BLOCK = CCR_BS, 107 CCR_SYNC_PACKET = CCR_BS | CCR_FS, 108 109 CSDP_DATA_TYPE_8 = 0, 110 CSDP_DATA_TYPE_16 = 1, 111 CSDP_DATA_TYPE_32 = 2, 112 CSDP_SRC_PORT_EMIFF = 0 << 2, /* OMAP1 only */ 113 CSDP_SRC_PORT_EMIFS = 1 << 2, /* OMAP1 only */ 114 CSDP_SRC_PORT_OCP_T1 = 2 << 2, /* OMAP1 only */ 115 CSDP_SRC_PORT_TIPB = 3 << 2, /* OMAP1 only */ 116 CSDP_SRC_PORT_OCP_T2 = 4 << 2, /* OMAP1 only */ 117 CSDP_SRC_PORT_MPUI = 5 << 2, /* OMAP1 only */ 118 CSDP_SRC_PACKED = BIT(6), 119 CSDP_SRC_BURST_1 = 0 << 7, 120 CSDP_SRC_BURST_16 = 1 << 7, 121 CSDP_SRC_BURST_32 = 2 << 7, 122 CSDP_SRC_BURST_64 = 3 << 7, 123 CSDP_DST_PORT_EMIFF = 0 << 9, /* OMAP1 only */ 124 CSDP_DST_PORT_EMIFS = 1 << 9, /* OMAP1 only */ 125 CSDP_DST_PORT_OCP_T1 = 2 << 9, /* OMAP1 only */ 126 CSDP_DST_PORT_TIPB = 3 << 9, /* OMAP1 only */ 127 CSDP_DST_PORT_OCP_T2 = 4 << 9, /* OMAP1 only */ 128 CSDP_DST_PORT_MPUI = 5 << 9, /* OMAP1 only */ 129 CSDP_DST_PACKED = BIT(13), 130 CSDP_DST_BURST_1 = 0 << 14, 131 CSDP_DST_BURST_16 = 1 << 14, 132 CSDP_DST_BURST_32 = 2 << 14, 133 CSDP_DST_BURST_64 = 3 << 14, 134 135 CICR_TOUT_IE = BIT(0), /* OMAP1 only */ 136 CICR_DROP_IE = BIT(1), 137 CICR_HALF_IE = BIT(2), 138 CICR_FRAME_IE = BIT(3), 139 CICR_LAST_IE = BIT(4), 140 CICR_BLOCK_IE = BIT(5), 141 CICR_PKT_IE = BIT(7), /* OMAP2+ only */ 142 CICR_TRANS_ERR_IE = BIT(8), /* OMAP2+ only */ 143 CICR_SUPERVISOR_ERR_IE = BIT(10), /* OMAP2+ only */ 144 CICR_MISALIGNED_ERR_IE = BIT(11), /* OMAP2+ only */ 145 CICR_DRAIN_IE = BIT(12), /* OMAP2+ only */ 146 CICR_SUPER_BLOCK_IE = BIT(14), /* OMAP2+ only */ 147 148 CLNK_CTRL_ENABLE_LNK = BIT(15), 149}; 150 151static const unsigned es_bytes[] = { 152 [CSDP_DATA_TYPE_8] = 1, 153 [CSDP_DATA_TYPE_16] = 2, 154 [CSDP_DATA_TYPE_32] = 4, 155}; 156 157static struct of_dma_filter_info omap_dma_info = { 158 .filter_fn = omap_dma_filter_fn, 159}; 160 161static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d) 162{ 163 return container_of(d, struct omap_dmadev, ddev); 164} 165 166static inline struct omap_chan *to_omap_dma_chan(struct dma_chan *c) 167{ 168 return container_of(c, struct omap_chan, vc.chan); 169} 170 171static inline struct omap_desc *to_omap_dma_desc(struct dma_async_tx_descriptor *t) 172{ 173 return container_of(t, struct omap_desc, vd.tx); 174} 175 176static void omap_dma_desc_free(struct virt_dma_desc *vd) 177{ 178 kfree(container_of(vd, struct omap_desc, vd)); 179} 180 181static void omap_dma_write(uint32_t val, unsigned type, void __iomem *addr) 182{ 183 switch (type) { 184 case OMAP_DMA_REG_16BIT: 185 writew_relaxed(val, addr); 186 break; 187 case OMAP_DMA_REG_2X16BIT: 188 writew_relaxed(val, addr); 189 writew_relaxed(val >> 16, addr + 2); 190 break; 191 case OMAP_DMA_REG_32BIT: 192 writel_relaxed(val, addr); 193 break; 194 default: 195 WARN_ON(1); 196 } 197} 198 199static unsigned omap_dma_read(unsigned type, void __iomem *addr) 200{ 201 unsigned val; 202 203 switch (type) { 204 case OMAP_DMA_REG_16BIT: 205 val = readw_relaxed(addr); 206 break; 207 case OMAP_DMA_REG_2X16BIT: 208 val = readw_relaxed(addr); 209 val |= readw_relaxed(addr + 2) << 16; 210 break; 211 case OMAP_DMA_REG_32BIT: 212 val = readl_relaxed(addr); 213 break; 214 default: 215 WARN_ON(1); 216 val = 0; 217 } 218 219 return val; 220} 221 222static void omap_dma_glbl_write(struct omap_dmadev *od, unsigned reg, unsigned val) 223{ 224 const struct omap_dma_reg *r = od->reg_map + reg; 225 226 WARN_ON(r->stride); 227 228 omap_dma_write(val, r->type, od->base + r->offset); 229} 230 231static unsigned omap_dma_glbl_read(struct omap_dmadev *od, unsigned reg) 232{ 233 const struct omap_dma_reg *r = od->reg_map + reg; 234 235 WARN_ON(r->stride); 236 237 return omap_dma_read(r->type, od->base + r->offset); 238} 239 240static void omap_dma_chan_write(struct omap_chan *c, unsigned reg, unsigned val) 241{ 242 const struct omap_dma_reg *r = c->reg_map + reg; 243 244 omap_dma_write(val, r->type, c->channel_base + r->offset); 245} 246 247static unsigned omap_dma_chan_read(struct omap_chan *c, unsigned reg) 248{ 249 const struct omap_dma_reg *r = c->reg_map + reg; 250 251 return omap_dma_read(r->type, c->channel_base + r->offset); 252} 253 254static void omap_dma_clear_csr(struct omap_chan *c) 255{ 256 if (dma_omap1()) 257 omap_dma_chan_read(c, CSR); 258 else 259 omap_dma_chan_write(c, CSR, ~0); 260} 261 262static unsigned omap_dma_get_csr(struct omap_chan *c) 263{ 264 unsigned val = omap_dma_chan_read(c, CSR); 265 266 if (!dma_omap1()) 267 omap_dma_chan_write(c, CSR, val); 268 269 return val; 270} 271 272static void omap_dma_assign(struct omap_dmadev *od, struct omap_chan *c, 273 unsigned lch) 274{ 275 c->channel_base = od->base + od->plat->channel_stride * lch; 276 277 od->lch_map[lch] = c; 278} 279 280static void omap_dma_start(struct omap_chan *c, struct omap_desc *d) 281{ 282 struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device); 283 284 if (__dma_omap15xx(od->plat->dma_attr)) 285 omap_dma_chan_write(c, CPC, 0); 286 else 287 omap_dma_chan_write(c, CDAC, 0); 288 289 omap_dma_clear_csr(c); 290 291 /* Enable interrupts */ 292 omap_dma_chan_write(c, CICR, d->cicr); 293 294 /* Enable channel */ 295 omap_dma_chan_write(c, CCR, d->ccr | CCR_ENABLE); 296} 297 298static void omap_dma_stop(struct omap_chan *c) 299{ 300 struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device); 301 uint32_t val; 302 303 /* disable irq */ 304 omap_dma_chan_write(c, CICR, 0); 305 306 omap_dma_clear_csr(c); 307 308 val = omap_dma_chan_read(c, CCR); 309 if (od->plat->errata & DMA_ERRATA_i541 && val & CCR_TRIGGER_SRC) { 310 uint32_t sysconfig; 311 unsigned i; 312 313 sysconfig = omap_dma_glbl_read(od, OCP_SYSCONFIG); 314 val = sysconfig & ~DMA_SYSCONFIG_MIDLEMODE_MASK; 315 val |= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE); 316 omap_dma_glbl_write(od, OCP_SYSCONFIG, val); 317 318 val = omap_dma_chan_read(c, CCR); 319 val &= ~CCR_ENABLE; 320 omap_dma_chan_write(c, CCR, val); 321 322 /* Wait for sDMA FIFO to drain */ 323 for (i = 0; ; i++) { 324 val = omap_dma_chan_read(c, CCR); 325 if (!(val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE))) 326 break; 327 328 if (i > 100) 329 break; 330 331 udelay(5); 332 } 333 334 if (val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE)) 335 dev_err(c->vc.chan.device->dev, 336 "DMA drain did not complete on lch %d\n", 337 c->dma_ch); 338 339 omap_dma_glbl_write(od, OCP_SYSCONFIG, sysconfig); 340 } else { 341 val &= ~CCR_ENABLE; 342 omap_dma_chan_write(c, CCR, val); 343 } 344 345 mb(); 346 347 if (!__dma_omap15xx(od->plat->dma_attr) && c->cyclic) { 348 val = omap_dma_chan_read(c, CLNK_CTRL); 349 350 if (dma_omap1()) 351 val |= 1 << 14; /* set the STOP_LNK bit */ 352 else 353 val &= ~CLNK_CTRL_ENABLE_LNK; 354 355 omap_dma_chan_write(c, CLNK_CTRL, val); 356 } 357} 358 359static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d, 360 unsigned idx) 361{ 362 struct omap_sg *sg = d->sg + idx; 363 unsigned cxsa, cxei, cxfi; 364 365 if (d->dir == DMA_DEV_TO_MEM) { 366 cxsa = CDSA; 367 cxei = CDEI; 368 cxfi = CDFI; 369 } else { 370 cxsa = CSSA; 371 cxei = CSEI; 372 cxfi = CSFI; 373 } 374 375 omap_dma_chan_write(c, cxsa, sg->addr); 376 omap_dma_chan_write(c, cxei, 0); 377 omap_dma_chan_write(c, cxfi, 0); 378 omap_dma_chan_write(c, CEN, sg->en); 379 omap_dma_chan_write(c, CFN, sg->fn); 380 381 omap_dma_start(c, d); 382} 383 384static void omap_dma_start_desc(struct omap_chan *c) 385{ 386 struct virt_dma_desc *vd = vchan_next_desc(&c->vc); 387 struct omap_desc *d; 388 unsigned cxsa, cxei, cxfi; 389 390 if (!vd) { 391 c->desc = NULL; 392 return; 393 } 394 395 list_del(&vd->node); 396 397 c->desc = d = to_omap_dma_desc(&vd->tx); 398 c->sgidx = 0; 399 400 /* 401 * This provides the necessary barrier to ensure data held in 402 * DMA coherent memory is visible to the DMA engine prior to 403 * the transfer starting. 404 */ 405 mb(); 406 407 omap_dma_chan_write(c, CCR, d->ccr); 408 if (dma_omap1()) 409 omap_dma_chan_write(c, CCR2, d->ccr >> 16); 410 411 if (d->dir == DMA_DEV_TO_MEM) { 412 cxsa = CSSA; 413 cxei = CSEI; 414 cxfi = CSFI; 415 } else { 416 cxsa = CDSA; 417 cxei = CDEI; 418 cxfi = CDFI; 419 } 420 421 omap_dma_chan_write(c, cxsa, d->dev_addr); 422 omap_dma_chan_write(c, cxei, 0); 423 omap_dma_chan_write(c, cxfi, d->fi); 424 omap_dma_chan_write(c, CSDP, d->csdp); 425 omap_dma_chan_write(c, CLNK_CTRL, d->clnk_ctrl); 426 427 omap_dma_start_sg(c, d, 0); 428} 429 430static void omap_dma_callback(int ch, u16 status, void *data) 431{ 432 struct omap_chan *c = data; 433 struct omap_desc *d; 434 unsigned long flags; 435 436 spin_lock_irqsave(&c->vc.lock, flags); 437 d = c->desc; 438 if (d) { 439 if (!c->cyclic) { 440 if (++c->sgidx < d->sglen) { 441 omap_dma_start_sg(c, d, c->sgidx); 442 } else { 443 omap_dma_start_desc(c); 444 vchan_cookie_complete(&d->vd); 445 } 446 } else { 447 vchan_cyclic_callback(&d->vd); 448 } 449 } 450 spin_unlock_irqrestore(&c->vc.lock, flags); 451} 452 453/* 454 * This callback schedules all pending channels. We could be more 455 * clever here by postponing allocation of the real DMA channels to 456 * this point, and freeing them when our virtual channel becomes idle. 457 * 458 * We would then need to deal with 'all channels in-use' 459 */ 460static void omap_dma_sched(unsigned long data) 461{ 462 struct omap_dmadev *d = (struct omap_dmadev *)data; 463 LIST_HEAD(head); 464 465 spin_lock_irq(&d->lock); 466 list_splice_tail_init(&d->pending, &head); 467 spin_unlock_irq(&d->lock); 468 469 while (!list_empty(&head)) { 470 struct omap_chan *c = list_first_entry(&head, 471 struct omap_chan, node); 472 473 spin_lock_irq(&c->vc.lock); 474 list_del_init(&c->node); 475 omap_dma_start_desc(c); 476 spin_unlock_irq(&c->vc.lock); 477 } 478} 479 480static irqreturn_t omap_dma_irq(int irq, void *devid) 481{ 482 struct omap_dmadev *od = devid; 483 unsigned status, channel; 484 485 spin_lock(&od->irq_lock); 486 487 status = omap_dma_glbl_read(od, IRQSTATUS_L1); 488 status &= od->irq_enable_mask; 489 if (status == 0) { 490 spin_unlock(&od->irq_lock); 491 return IRQ_NONE; 492 } 493 494 while ((channel = ffs(status)) != 0) { 495 unsigned mask, csr; 496 struct omap_chan *c; 497 498 channel -= 1; 499 mask = BIT(channel); 500 status &= ~mask; 501 502 c = od->lch_map[channel]; 503 if (c == NULL) { 504 /* This should never happen */ 505 dev_err(od->ddev.dev, "invalid channel %u\n", channel); 506 continue; 507 } 508 509 csr = omap_dma_get_csr(c); 510 omap_dma_glbl_write(od, IRQSTATUS_L1, mask); 511 512 omap_dma_callback(channel, csr, c); 513 } 514 515 spin_unlock(&od->irq_lock); 516 517 return IRQ_HANDLED; 518} 519 520static int omap_dma_alloc_chan_resources(struct dma_chan *chan) 521{ 522 struct omap_dmadev *od = to_omap_dma_dev(chan->device); 523 struct omap_chan *c = to_omap_dma_chan(chan); 524 int ret; 525 526 if (od->legacy) { 527 ret = omap_request_dma(c->dma_sig, "DMA engine", 528 omap_dma_callback, c, &c->dma_ch); 529 } else { 530 ret = omap_request_dma(c->dma_sig, "DMA engine", NULL, NULL, 531 &c->dma_ch); 532 } 533 534 dev_dbg(od->ddev.dev, "allocating channel %u for %u\n", 535 c->dma_ch, c->dma_sig); 536 537 if (ret >= 0) { 538 omap_dma_assign(od, c, c->dma_ch); 539 540 if (!od->legacy) { 541 unsigned val; 542 543 spin_lock_irq(&od->irq_lock); 544 val = BIT(c->dma_ch); 545 omap_dma_glbl_write(od, IRQSTATUS_L1, val); 546 od->irq_enable_mask |= val; 547 omap_dma_glbl_write(od, IRQENABLE_L1, od->irq_enable_mask); 548 549 val = omap_dma_glbl_read(od, IRQENABLE_L0); 550 val &= ~BIT(c->dma_ch); 551 omap_dma_glbl_write(od, IRQENABLE_L0, val); 552 spin_unlock_irq(&od->irq_lock); 553 } 554 } 555 556 if (dma_omap1()) { 557 if (__dma_omap16xx(od->plat->dma_attr)) { 558 c->ccr = CCR_OMAP31_DISABLE; 559 /* Duplicate what plat-omap/dma.c does */ 560 c->ccr |= c->dma_ch + 1; 561 } else { 562 c->ccr = c->dma_sig & 0x1f; 563 } 564 } else { 565 c->ccr = c->dma_sig & 0x1f; 566 c->ccr |= (c->dma_sig & ~0x1f) << 14; 567 } 568 if (od->plat->errata & DMA_ERRATA_IFRAME_BUFFERING) 569 c->ccr |= CCR_BUFFERING_DISABLE; 570 571 return ret; 572} 573 574static void omap_dma_free_chan_resources(struct dma_chan *chan) 575{ 576 struct omap_dmadev *od = to_omap_dma_dev(chan->device); 577 struct omap_chan *c = to_omap_dma_chan(chan); 578 579 if (!od->legacy) { 580 spin_lock_irq(&od->irq_lock); 581 od->irq_enable_mask &= ~BIT(c->dma_ch); 582 omap_dma_glbl_write(od, IRQENABLE_L1, od->irq_enable_mask); 583 spin_unlock_irq(&od->irq_lock); 584 } 585 586 c->channel_base = NULL; 587 od->lch_map[c->dma_ch] = NULL; 588 vchan_free_chan_resources(&c->vc); 589 omap_free_dma(c->dma_ch); 590 591 dev_dbg(od->ddev.dev, "freeing channel for %u\n", c->dma_sig); 592} 593 594static size_t omap_dma_sg_size(struct omap_sg *sg) 595{ 596 return sg->en * sg->fn; 597} 598 599static size_t omap_dma_desc_size(struct omap_desc *d) 600{ 601 unsigned i; 602 size_t size; 603 604 for (size = i = 0; i < d->sglen; i++) 605 size += omap_dma_sg_size(&d->sg[i]); 606 607 return size * es_bytes[d->es]; 608} 609 610static size_t omap_dma_desc_size_pos(struct omap_desc *d, dma_addr_t addr) 611{ 612 unsigned i; 613 size_t size, es_size = es_bytes[d->es]; 614 615 for (size = i = 0; i < d->sglen; i++) { 616 size_t this_size = omap_dma_sg_size(&d->sg[i]) * es_size; 617 618 if (size) 619 size += this_size; 620 else if (addr >= d->sg[i].addr && 621 addr < d->sg[i].addr + this_size) 622 size += d->sg[i].addr + this_size - addr; 623 } 624 return size; 625} 626 627/* 628 * OMAP 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is 629 * read before the DMA controller finished disabling the channel. 630 */ 631static uint32_t omap_dma_chan_read_3_3(struct omap_chan *c, unsigned reg) 632{ 633 struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device); 634 uint32_t val; 635 636 val = omap_dma_chan_read(c, reg); 637 if (val == 0 && od->plat->errata & DMA_ERRATA_3_3) 638 val = omap_dma_chan_read(c, reg); 639 640 return val; 641} 642 643static dma_addr_t omap_dma_get_src_pos(struct omap_chan *c) 644{ 645 struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device); 646 dma_addr_t addr, cdac; 647 648 if (__dma_omap15xx(od->plat->dma_attr)) { 649 addr = omap_dma_chan_read(c, CPC); 650 } else { 651 addr = omap_dma_chan_read_3_3(c, CSAC); 652 cdac = omap_dma_chan_read_3_3(c, CDAC); 653 654 /* 655 * CDAC == 0 indicates that the DMA transfer on the channel has 656 * not been started (no data has been transferred so far). 657 * Return the programmed source start address in this case. 658 */ 659 if (cdac == 0) 660 addr = omap_dma_chan_read(c, CSSA); 661 } 662 663 if (dma_omap1()) 664 addr |= omap_dma_chan_read(c, CSSA) & 0xffff0000; 665 666 return addr; 667} 668 669static dma_addr_t omap_dma_get_dst_pos(struct omap_chan *c) 670{ 671 struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device); 672 dma_addr_t addr; 673 674 if (__dma_omap15xx(od->plat->dma_attr)) { 675 addr = omap_dma_chan_read(c, CPC); 676 } else { 677 addr = omap_dma_chan_read_3_3(c, CDAC); 678 679 /* 680 * CDAC == 0 indicates that the DMA transfer on the channel 681 * has not been started (no data has been transferred so 682 * far). Return the programmed destination start address in 683 * this case. 684 */ 685 if (addr == 0) 686 addr = omap_dma_chan_read(c, CDSA); 687 } 688 689 if (dma_omap1()) 690 addr |= omap_dma_chan_read(c, CDSA) & 0xffff0000; 691 692 return addr; 693} 694 695static enum dma_status omap_dma_tx_status(struct dma_chan *chan, 696 dma_cookie_t cookie, struct dma_tx_state *txstate) 697{ 698 struct omap_chan *c = to_omap_dma_chan(chan); 699 struct virt_dma_desc *vd; 700 enum dma_status ret; 701 unsigned long flags; 702 703 ret = dma_cookie_status(chan, cookie, txstate); 704 if (ret == DMA_COMPLETE || !txstate) 705 return ret; 706 707 spin_lock_irqsave(&c->vc.lock, flags); 708 vd = vchan_find_desc(&c->vc, cookie); 709 if (vd) { 710 txstate->residue = omap_dma_desc_size(to_omap_dma_desc(&vd->tx)); 711 } else if (c->desc && c->desc->vd.tx.cookie == cookie) { 712 struct omap_desc *d = c->desc; 713 dma_addr_t pos; 714 715 if (d->dir == DMA_MEM_TO_DEV) 716 pos = omap_dma_get_src_pos(c); 717 else if (d->dir == DMA_DEV_TO_MEM) 718 pos = omap_dma_get_dst_pos(c); 719 else 720 pos = 0; 721 722 txstate->residue = omap_dma_desc_size_pos(d, pos); 723 } else { 724 txstate->residue = 0; 725 } 726 spin_unlock_irqrestore(&c->vc.lock, flags); 727 728 return ret; 729} 730 731static void omap_dma_issue_pending(struct dma_chan *chan) 732{ 733 struct omap_chan *c = to_omap_dma_chan(chan); 734 unsigned long flags; 735 736 spin_lock_irqsave(&c->vc.lock, flags); 737 if (vchan_issue_pending(&c->vc) && !c->desc) { 738 /* 739 * c->cyclic is used only by audio and in this case the DMA need 740 * to be started without delay. 741 */ 742 if (!c->cyclic) { 743 struct omap_dmadev *d = to_omap_dma_dev(chan->device); 744 spin_lock(&d->lock); 745 if (list_empty(&c->node)) 746 list_add_tail(&c->node, &d->pending); 747 spin_unlock(&d->lock); 748 tasklet_schedule(&d->task); 749 } else { 750 omap_dma_start_desc(c); 751 } 752 } 753 spin_unlock_irqrestore(&c->vc.lock, flags); 754} 755 756static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg( 757 struct dma_chan *chan, struct scatterlist *sgl, unsigned sglen, 758 enum dma_transfer_direction dir, unsigned long tx_flags, void *context) 759{ 760 struct omap_dmadev *od = to_omap_dma_dev(chan->device); 761 struct omap_chan *c = to_omap_dma_chan(chan); 762 enum dma_slave_buswidth dev_width; 763 struct scatterlist *sgent; 764 struct omap_desc *d; 765 dma_addr_t dev_addr; 766 unsigned i, j = 0, es, en, frame_bytes; 767 u32 burst; 768 769 if (dir == DMA_DEV_TO_MEM) { 770 dev_addr = c->cfg.src_addr; 771 dev_width = c->cfg.src_addr_width; 772 burst = c->cfg.src_maxburst; 773 } else if (dir == DMA_MEM_TO_DEV) { 774 dev_addr = c->cfg.dst_addr; 775 dev_width = c->cfg.dst_addr_width; 776 burst = c->cfg.dst_maxburst; 777 } else { 778 dev_err(chan->device->dev, "%s: bad direction?\n", __func__); 779 return NULL; 780 } 781 782 /* Bus width translates to the element size (ES) */ 783 switch (dev_width) { 784 case DMA_SLAVE_BUSWIDTH_1_BYTE: 785 es = CSDP_DATA_TYPE_8; 786 break; 787 case DMA_SLAVE_BUSWIDTH_2_BYTES: 788 es = CSDP_DATA_TYPE_16; 789 break; 790 case DMA_SLAVE_BUSWIDTH_4_BYTES: 791 es = CSDP_DATA_TYPE_32; 792 break; 793 default: /* not reached */ 794 return NULL; 795 } 796 797 /* Now allocate and setup the descriptor. */ 798 d = kzalloc(sizeof(*d) + sglen * sizeof(d->sg[0]), GFP_ATOMIC); 799 if (!d) 800 return NULL; 801 802 d->dir = dir; 803 d->dev_addr = dev_addr; 804 d->es = es; 805 806 d->ccr = c->ccr | CCR_SYNC_FRAME; 807 if (dir == DMA_DEV_TO_MEM) 808 d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_CONSTANT; 809 else 810 d->ccr |= CCR_DST_AMODE_CONSTANT | CCR_SRC_AMODE_POSTINC; 811 812 d->cicr = CICR_DROP_IE | CICR_BLOCK_IE; 813 d->csdp = es; 814 815 if (dma_omap1()) { 816 d->cicr |= CICR_TOUT_IE; 817 818 if (dir == DMA_DEV_TO_MEM) 819 d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_TIPB; 820 else 821 d->csdp |= CSDP_DST_PORT_TIPB | CSDP_SRC_PORT_EMIFF; 822 } else { 823 if (dir == DMA_DEV_TO_MEM) 824 d->ccr |= CCR_TRIGGER_SRC; 825 826 d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE; 827 } 828 if (od->plat->errata & DMA_ERRATA_PARALLEL_CHANNELS) 829 d->clnk_ctrl = c->dma_ch; 830 831 /* 832 * Build our scatterlist entries: each contains the address, 833 * the number of elements (EN) in each frame, and the number of 834 * frames (FN). Number of bytes for this entry = ES * EN * FN. 835 * 836 * Burst size translates to number of elements with frame sync. 837 * Note: DMA engine defines burst to be the number of dev-width 838 * transfers. 839 */ 840 en = burst; 841 frame_bytes = es_bytes[es] * en; 842 for_each_sg(sgl, sgent, sglen, i) { 843 d->sg[j].addr = sg_dma_address(sgent); 844 d->sg[j].en = en; 845 d->sg[j].fn = sg_dma_len(sgent) / frame_bytes; 846 j++; 847 } 848 849 d->sglen = j; 850 851 return vchan_tx_prep(&c->vc, &d->vd, tx_flags); 852} 853 854static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic( 855 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, 856 size_t period_len, enum dma_transfer_direction dir, unsigned long flags) 857{ 858 struct omap_dmadev *od = to_omap_dma_dev(chan->device); 859 struct omap_chan *c = to_omap_dma_chan(chan); 860 enum dma_slave_buswidth dev_width; 861 struct omap_desc *d; 862 dma_addr_t dev_addr; 863 unsigned es; 864 u32 burst; 865 866 if (dir == DMA_DEV_TO_MEM) { 867 dev_addr = c->cfg.src_addr; 868 dev_width = c->cfg.src_addr_width; 869 burst = c->cfg.src_maxburst; 870 } else if (dir == DMA_MEM_TO_DEV) { 871 dev_addr = c->cfg.dst_addr; 872 dev_width = c->cfg.dst_addr_width; 873 burst = c->cfg.dst_maxburst; 874 } else { 875 dev_err(chan->device->dev, "%s: bad direction?\n", __func__); 876 return NULL; 877 } 878 879 /* Bus width translates to the element size (ES) */ 880 switch (dev_width) { 881 case DMA_SLAVE_BUSWIDTH_1_BYTE: 882 es = CSDP_DATA_TYPE_8; 883 break; 884 case DMA_SLAVE_BUSWIDTH_2_BYTES: 885 es = CSDP_DATA_TYPE_16; 886 break; 887 case DMA_SLAVE_BUSWIDTH_4_BYTES: 888 es = CSDP_DATA_TYPE_32; 889 break; 890 default: /* not reached */ 891 return NULL; 892 } 893 894 /* Now allocate and setup the descriptor. */ 895 d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC); 896 if (!d) 897 return NULL; 898 899 d->dir = dir; 900 d->dev_addr = dev_addr; 901 d->fi = burst; 902 d->es = es; 903 d->sg[0].addr = buf_addr; 904 d->sg[0].en = period_len / es_bytes[es]; 905 d->sg[0].fn = buf_len / period_len; 906 d->sglen = 1; 907 908 d->ccr = c->ccr; 909 if (dir == DMA_DEV_TO_MEM) 910 d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_CONSTANT; 911 else 912 d->ccr |= CCR_DST_AMODE_CONSTANT | CCR_SRC_AMODE_POSTINC; 913 914 d->cicr = CICR_DROP_IE; 915 if (flags & DMA_PREP_INTERRUPT) 916 d->cicr |= CICR_FRAME_IE; 917 918 d->csdp = es; 919 920 if (dma_omap1()) { 921 d->cicr |= CICR_TOUT_IE; 922 923 if (dir == DMA_DEV_TO_MEM) 924 d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_MPUI; 925 else 926 d->csdp |= CSDP_DST_PORT_MPUI | CSDP_SRC_PORT_EMIFF; 927 } else { 928 if (burst) 929 d->ccr |= CCR_SYNC_PACKET; 930 else 931 d->ccr |= CCR_SYNC_ELEMENT; 932 933 if (dir == DMA_DEV_TO_MEM) 934 d->ccr |= CCR_TRIGGER_SRC; 935 936 d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE; 937 938 d->csdp |= CSDP_DST_BURST_64 | CSDP_SRC_BURST_64; 939 } 940 941 if (__dma_omap15xx(od->plat->dma_attr)) 942 d->ccr |= CCR_AUTO_INIT | CCR_REPEAT; 943 else 944 d->clnk_ctrl = c->dma_ch | CLNK_CTRL_ENABLE_LNK; 945 946 c->cyclic = true; 947 948 return vchan_tx_prep(&c->vc, &d->vd, flags); 949} 950 951static int omap_dma_slave_config(struct dma_chan *chan, struct dma_slave_config *cfg) 952{ 953 struct omap_chan *c = to_omap_dma_chan(chan); 954 955 if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || 956 cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) 957 return -EINVAL; 958 959 memcpy(&c->cfg, cfg, sizeof(c->cfg)); 960 961 return 0; 962} 963 964static int omap_dma_terminate_all(struct dma_chan *chan) 965{ 966 struct omap_chan *c = to_omap_dma_chan(chan); 967 struct omap_dmadev *d = to_omap_dma_dev(c->vc.chan.device); 968 unsigned long flags; 969 LIST_HEAD(head); 970 971 spin_lock_irqsave(&c->vc.lock, flags); 972 973 /* Prevent this channel being scheduled */ 974 spin_lock(&d->lock); 975 list_del_init(&c->node); 976 spin_unlock(&d->lock); 977 978 /* 979 * Stop DMA activity: we assume the callback will not be called 980 * after omap_dma_stop() returns (even if it does, it will see 981 * c->desc is NULL and exit.) 982 */ 983 if (c->desc) { 984 omap_dma_desc_free(&c->desc->vd); 985 c->desc = NULL; 986 /* Avoid stopping the dma twice */ 987 if (!c->paused) 988 omap_dma_stop(c); 989 } 990 991 if (c->cyclic) { 992 c->cyclic = false; 993 c->paused = false; 994 } 995 996 vchan_get_all_descriptors(&c->vc, &head); 997 spin_unlock_irqrestore(&c->vc.lock, flags); 998 vchan_dma_desc_free_list(&c->vc, &head); 999 1000 return 0; 1001} 1002 1003static int omap_dma_pause(struct dma_chan *chan) 1004{ 1005 struct omap_chan *c = to_omap_dma_chan(chan); 1006 1007 /* Pause/Resume only allowed with cyclic mode */ 1008 if (!c->cyclic) 1009 return -EINVAL; 1010 1011 if (!c->paused) { 1012 omap_dma_stop(c); 1013 c->paused = true; 1014 } 1015 1016 return 0; 1017} 1018 1019static int omap_dma_resume(struct dma_chan *chan) 1020{ 1021 struct omap_chan *c = to_omap_dma_chan(chan); 1022 1023 /* Pause/Resume only allowed with cyclic mode */ 1024 if (!c->cyclic) 1025 return -EINVAL; 1026 1027 if (c->paused) { 1028 mb(); 1029 1030 /* Restore channel link register */ 1031 omap_dma_chan_write(c, CLNK_CTRL, c->desc->clnk_ctrl); 1032 1033 omap_dma_start(c, c->desc); 1034 c->paused = false; 1035 } 1036 1037 return 0; 1038} 1039 1040static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig) 1041{ 1042 struct omap_chan *c; 1043 1044 c = kzalloc(sizeof(*c), GFP_KERNEL); 1045 if (!c) 1046 return -ENOMEM; 1047 1048 c->reg_map = od->reg_map; 1049 c->dma_sig = dma_sig; 1050 c->vc.desc_free = omap_dma_desc_free; 1051 vchan_init(&c->vc, &od->ddev); 1052 INIT_LIST_HEAD(&c->node); 1053 1054 return 0; 1055} 1056 1057static void omap_dma_free(struct omap_dmadev *od) 1058{ 1059 tasklet_kill(&od->task); 1060 while (!list_empty(&od->ddev.channels)) { 1061 struct omap_chan *c = list_first_entry(&od->ddev.channels, 1062 struct omap_chan, vc.chan.device_node); 1063 1064 list_del(&c->vc.chan.device_node); 1065 tasklet_kill(&c->vc.task); 1066 kfree(c); 1067 } 1068} 1069 1070#define OMAP_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ 1071 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ 1072 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) 1073 1074static int omap_dma_probe(struct platform_device *pdev) 1075{ 1076 struct omap_dmadev *od; 1077 struct resource *res; 1078 int rc, i, irq; 1079 1080 od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL); 1081 if (!od) 1082 return -ENOMEM; 1083 1084 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1085 od->base = devm_ioremap_resource(&pdev->dev, res); 1086 if (IS_ERR(od->base)) 1087 return PTR_ERR(od->base); 1088 1089 od->plat = omap_get_plat_info(); 1090 if (!od->plat) 1091 return -EPROBE_DEFER; 1092 1093 od->reg_map = od->plat->reg_map; 1094 1095 dma_cap_set(DMA_SLAVE, od->ddev.cap_mask); 1096 dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask); 1097 od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources; 1098 od->ddev.device_free_chan_resources = omap_dma_free_chan_resources; 1099 od->ddev.device_tx_status = omap_dma_tx_status; 1100 od->ddev.device_issue_pending = omap_dma_issue_pending; 1101 od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg; 1102 od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic; 1103 od->ddev.device_config = omap_dma_slave_config; 1104 od->ddev.device_pause = omap_dma_pause; 1105 od->ddev.device_resume = omap_dma_resume; 1106 od->ddev.device_terminate_all = omap_dma_terminate_all; 1107 od->ddev.src_addr_widths = OMAP_DMA_BUSWIDTHS; 1108 od->ddev.dst_addr_widths = OMAP_DMA_BUSWIDTHS; 1109 od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); 1110 od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; 1111 od->ddev.dev = &pdev->dev; 1112 INIT_LIST_HEAD(&od->ddev.channels); 1113 INIT_LIST_HEAD(&od->pending); 1114 spin_lock_init(&od->lock); 1115 spin_lock_init(&od->irq_lock); 1116 1117 tasklet_init(&od->task, omap_dma_sched, (unsigned long)od); 1118 1119 for (i = 0; i < 127; i++) { 1120 rc = omap_dma_chan_init(od, i); 1121 if (rc) { 1122 omap_dma_free(od); 1123 return rc; 1124 } 1125 } 1126 1127 irq = platform_get_irq(pdev, 1); 1128 if (irq <= 0) { 1129 dev_info(&pdev->dev, "failed to get L1 IRQ: %d\n", irq); 1130 od->legacy = true; 1131 } else { 1132 /* Disable all interrupts */ 1133 od->irq_enable_mask = 0; 1134 omap_dma_glbl_write(od, IRQENABLE_L1, 0); 1135 1136 rc = devm_request_irq(&pdev->dev, irq, omap_dma_irq, 1137 IRQF_SHARED, "omap-dma-engine", od); 1138 if (rc) 1139 return rc; 1140 } 1141 1142 rc = dma_async_device_register(&od->ddev); 1143 if (rc) { 1144 pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n", 1145 rc); 1146 omap_dma_free(od); 1147 return rc; 1148 } 1149 1150 platform_set_drvdata(pdev, od); 1151 1152 if (pdev->dev.of_node) { 1153 omap_dma_info.dma_cap = od->ddev.cap_mask; 1154 1155 /* Device-tree DMA controller registration */ 1156 rc = of_dma_controller_register(pdev->dev.of_node, 1157 of_dma_simple_xlate, &omap_dma_info); 1158 if (rc) { 1159 pr_warn("OMAP-DMA: failed to register DMA controller\n"); 1160 dma_async_device_unregister(&od->ddev); 1161 omap_dma_free(od); 1162 } 1163 } 1164 1165 dev_info(&pdev->dev, "OMAP DMA engine driver\n"); 1166 1167 return rc; 1168} 1169 1170static int omap_dma_remove(struct platform_device *pdev) 1171{ 1172 struct omap_dmadev *od = platform_get_drvdata(pdev); 1173 1174 if (pdev->dev.of_node) 1175 of_dma_controller_free(pdev->dev.of_node); 1176 1177 dma_async_device_unregister(&od->ddev); 1178 1179 if (!od->legacy) { 1180 /* Disable all interrupts */ 1181 omap_dma_glbl_write(od, IRQENABLE_L0, 0); 1182 } 1183 1184 omap_dma_free(od); 1185 1186 return 0; 1187} 1188 1189static const struct of_device_id omap_dma_match[] = { 1190 { .compatible = "ti,omap2420-sdma", }, 1191 { .compatible = "ti,omap2430-sdma", }, 1192 { .compatible = "ti,omap3430-sdma", }, 1193 { .compatible = "ti,omap3630-sdma", }, 1194 { .compatible = "ti,omap4430-sdma", }, 1195 {}, 1196}; 1197MODULE_DEVICE_TABLE(of, omap_dma_match); 1198 1199static struct platform_driver omap_dma_driver = { 1200 .probe = omap_dma_probe, 1201 .remove = omap_dma_remove, 1202 .driver = { 1203 .name = "omap-dma-engine", 1204 .of_match_table = of_match_ptr(omap_dma_match), 1205 }, 1206}; 1207 1208bool omap_dma_filter_fn(struct dma_chan *chan, void *param) 1209{ 1210 if (chan->device->dev->driver == &omap_dma_driver.driver) { 1211 struct omap_chan *c = to_omap_dma_chan(chan); 1212 unsigned req = *(unsigned *)param; 1213 1214 return req == c->dma_sig; 1215 } 1216 return false; 1217} 1218EXPORT_SYMBOL_GPL(omap_dma_filter_fn); 1219 1220static int omap_dma_init(void) 1221{ 1222 return platform_driver_register(&omap_dma_driver); 1223} 1224subsys_initcall(omap_dma_init); 1225 1226static void __exit omap_dma_exit(void) 1227{ 1228 platform_driver_unregister(&omap_dma_driver); 1229} 1230module_exit(omap_dma_exit); 1231 1232MODULE_AUTHOR("Russell King"); 1233MODULE_LICENSE("GPL"); 1234