root/drivers/dma/mediatek/mtk-uart-apdma.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. to_mtk_uart_apdma_dev
  2. to_mtk_uart_apdma_chan
  3. mtk_uart_apdma_write
  4. mtk_uart_apdma_read
  5. mtk_uart_apdma_desc_free
  6. mtk_uart_apdma_start_tx
  7. mtk_uart_apdma_start_rx
  8. mtk_uart_apdma_tx_handler
  9. mtk_uart_apdma_rx_handler
  10. mtk_uart_apdma_irq_handler
  11. mtk_uart_apdma_alloc_chan_resources
  12. mtk_uart_apdma_free_chan_resources
  13. mtk_uart_apdma_tx_status
  14. mtk_uart_apdma_issue_pending
  15. mtk_uart_apdma_slave_config
  16. mtk_uart_apdma_terminate_all
  17. mtk_uart_apdma_device_pause
  18. mtk_uart_apdma_free
  19. mtk_uart_apdma_probe
  20. mtk_uart_apdma_remove
  21. mtk_uart_apdma_suspend
  22. mtk_uart_apdma_resume
  23. mtk_uart_apdma_runtime_suspend
  24. mtk_uart_apdma_runtime_resume

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * MediaTek UART APDMA driver.
   4  *
   5  * Copyright (c) 2019 MediaTek Inc.
   6  * Author: Long Cheng <long.cheng@mediatek.com>
   7  */
   8 
   9 #include <linux/clk.h>
  10 #include <linux/dmaengine.h>
  11 #include <linux/dma-mapping.h>
  12 #include <linux/err.h>
  13 #include <linux/init.h>
  14 #include <linux/interrupt.h>
  15 #include <linux/iopoll.h>
  16 #include <linux/kernel.h>
  17 #include <linux/list.h>
  18 #include <linux/module.h>
  19 #include <linux/of_device.h>
  20 #include <linux/of_dma.h>
  21 #include <linux/platform_device.h>
  22 #include <linux/pm_runtime.h>
  23 #include <linux/slab.h>
  24 #include <linux/spinlock.h>
  25 
  26 #include "../virt-dma.h"
  27 
  28 /* The default number of virtual channel */
  29 #define MTK_UART_APDMA_NR_VCHANS        8
  30 
  31 #define VFF_EN_B                BIT(0)
  32 #define VFF_STOP_B              BIT(0)
  33 #define VFF_FLUSH_B             BIT(0)
  34 #define VFF_4G_EN_B             BIT(0)
  35 /* rx valid size >=  vff thre */
  36 #define VFF_RX_INT_EN_B         (BIT(0) | BIT(1))
  37 /* tx left size >= vff thre */
  38 #define VFF_TX_INT_EN_B         BIT(0)
  39 #define VFF_WARM_RST_B          BIT(0)
  40 #define VFF_RX_INT_CLR_B        (BIT(0) | BIT(1))
  41 #define VFF_TX_INT_CLR_B        0
  42 #define VFF_STOP_CLR_B          0
  43 #define VFF_EN_CLR_B            0
  44 #define VFF_INT_EN_CLR_B        0
  45 #define VFF_4G_SUPPORT_CLR_B    0
  46 
  47 /*
  48  * interrupt trigger level for tx
  49  * if threshold is n, no polling is required to start tx.
  50  * otherwise need polling VFF_FLUSH.
  51  */
  52 #define VFF_TX_THRE(n)          (n)
  53 /* interrupt trigger level for rx */
  54 #define VFF_RX_THRE(n)          ((n) * 3 / 4)
  55 
  56 #define VFF_RING_SIZE   0xffff
  57 /* invert this bit when wrap ring head again */
  58 #define VFF_RING_WRAP   0x10000
  59 
  60 #define VFF_INT_FLAG            0x00
  61 #define VFF_INT_EN              0x04
  62 #define VFF_EN                  0x08
  63 #define VFF_RST                 0x0c
  64 #define VFF_STOP                0x10
  65 #define VFF_FLUSH               0x14
  66 #define VFF_ADDR                0x1c
  67 #define VFF_LEN                 0x24
  68 #define VFF_THRE                0x28
  69 #define VFF_WPT                 0x2c
  70 #define VFF_RPT                 0x30
  71 /* TX: the buffer size HW can read. RX: the buffer size SW can read. */
  72 #define VFF_VALID_SIZE          0x3c
  73 /* TX: the buffer size SW can write. RX: the buffer size HW can write. */
  74 #define VFF_LEFT_SIZE           0x40
  75 #define VFF_DEBUG_STATUS        0x50
  76 #define VFF_4G_SUPPORT          0x54
  77 
  78 struct mtk_uart_apdmadev {
  79         struct dma_device ddev;
  80         struct clk *clk;
  81         bool support_33bits;
  82         unsigned int dma_requests;
  83 };
  84 
  85 struct mtk_uart_apdma_desc {
  86         struct virt_dma_desc vd;
  87 
  88         dma_addr_t addr;
  89         unsigned int avail_len;
  90 };
  91 
  92 struct mtk_chan {
  93         struct virt_dma_chan vc;
  94         struct dma_slave_config cfg;
  95         struct mtk_uart_apdma_desc *desc;
  96         enum dma_transfer_direction dir;
  97 
  98         void __iomem *base;
  99         unsigned int irq;
 100 
 101         unsigned int rx_status;
 102 };
 103 
 104 static inline struct mtk_uart_apdmadev *
 105 to_mtk_uart_apdma_dev(struct dma_device *d)
 106 {
 107         return container_of(d, struct mtk_uart_apdmadev, ddev);
 108 }
 109 
 110 static inline struct mtk_chan *to_mtk_uart_apdma_chan(struct dma_chan *c)
 111 {
 112         return container_of(c, struct mtk_chan, vc.chan);
 113 }
 114 
 115 static inline struct mtk_uart_apdma_desc *to_mtk_uart_apdma_desc
 116         (struct dma_async_tx_descriptor *t)
 117 {
 118         return container_of(t, struct mtk_uart_apdma_desc, vd.tx);
 119 }
 120 
 121 static void mtk_uart_apdma_write(struct mtk_chan *c,
 122                                unsigned int reg, unsigned int val)
 123 {
 124         writel(val, c->base + reg);
 125 }
 126 
 127 static unsigned int mtk_uart_apdma_read(struct mtk_chan *c, unsigned int reg)
 128 {
 129         return readl(c->base + reg);
 130 }
 131 
 132 static void mtk_uart_apdma_desc_free(struct virt_dma_desc *vd)
 133 {
 134         struct dma_chan *chan = vd->tx.chan;
 135         struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
 136 
 137         kfree(c->desc);
 138 }
 139 
 140 static void mtk_uart_apdma_start_tx(struct mtk_chan *c)
 141 {
 142         struct mtk_uart_apdmadev *mtkd =
 143                                 to_mtk_uart_apdma_dev(c->vc.chan.device);
 144         struct mtk_uart_apdma_desc *d = c->desc;
 145         unsigned int wpt, vff_sz;
 146 
 147         vff_sz = c->cfg.dst_port_window_size;
 148         if (!mtk_uart_apdma_read(c, VFF_LEN)) {
 149                 mtk_uart_apdma_write(c, VFF_ADDR, d->addr);
 150                 mtk_uart_apdma_write(c, VFF_LEN, vff_sz);
 151                 mtk_uart_apdma_write(c, VFF_THRE, VFF_TX_THRE(vff_sz));
 152                 mtk_uart_apdma_write(c, VFF_WPT, 0);
 153                 mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_TX_INT_CLR_B);
 154 
 155                 if (mtkd->support_33bits)
 156                         mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_EN_B);
 157         }
 158 
 159         mtk_uart_apdma_write(c, VFF_EN, VFF_EN_B);
 160         if (mtk_uart_apdma_read(c, VFF_EN) != VFF_EN_B)
 161                 dev_err(c->vc.chan.device->dev, "Enable TX fail\n");
 162 
 163         if (!mtk_uart_apdma_read(c, VFF_LEFT_SIZE)) {
 164                 mtk_uart_apdma_write(c, VFF_INT_EN, VFF_TX_INT_EN_B);
 165                 return;
 166         }
 167 
 168         wpt = mtk_uart_apdma_read(c, VFF_WPT);
 169 
 170         wpt += c->desc->avail_len;
 171         if ((wpt & VFF_RING_SIZE) == vff_sz)
 172                 wpt = (wpt & VFF_RING_WRAP) ^ VFF_RING_WRAP;
 173 
 174         /* Let DMA start moving data */
 175         mtk_uart_apdma_write(c, VFF_WPT, wpt);
 176 
 177         /* HW auto set to 0 when left size >= threshold */
 178         mtk_uart_apdma_write(c, VFF_INT_EN, VFF_TX_INT_EN_B);
 179         if (!mtk_uart_apdma_read(c, VFF_FLUSH))
 180                 mtk_uart_apdma_write(c, VFF_FLUSH, VFF_FLUSH_B);
 181 }
 182 
 183 static void mtk_uart_apdma_start_rx(struct mtk_chan *c)
 184 {
 185         struct mtk_uart_apdmadev *mtkd =
 186                                 to_mtk_uart_apdma_dev(c->vc.chan.device);
 187         struct mtk_uart_apdma_desc *d = c->desc;
 188         unsigned int vff_sz;
 189 
 190         vff_sz = c->cfg.src_port_window_size;
 191         if (!mtk_uart_apdma_read(c, VFF_LEN)) {
 192                 mtk_uart_apdma_write(c, VFF_ADDR, d->addr);
 193                 mtk_uart_apdma_write(c, VFF_LEN, vff_sz);
 194                 mtk_uart_apdma_write(c, VFF_THRE, VFF_RX_THRE(vff_sz));
 195                 mtk_uart_apdma_write(c, VFF_RPT, 0);
 196                 mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_RX_INT_CLR_B);
 197 
 198                 if (mtkd->support_33bits)
 199                         mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_EN_B);
 200         }
 201 
 202         mtk_uart_apdma_write(c, VFF_INT_EN, VFF_RX_INT_EN_B);
 203         mtk_uart_apdma_write(c, VFF_EN, VFF_EN_B);
 204         if (mtk_uart_apdma_read(c, VFF_EN) != VFF_EN_B)
 205                 dev_err(c->vc.chan.device->dev, "Enable RX fail\n");
 206 }
 207 
 208 static void mtk_uart_apdma_tx_handler(struct mtk_chan *c)
 209 {
 210         struct mtk_uart_apdma_desc *d = c->desc;
 211 
 212         mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_TX_INT_CLR_B);
 213         mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
 214         mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B);
 215 
 216         list_del(&d->vd.node);
 217         vchan_cookie_complete(&d->vd);
 218 }
 219 
 220 static void mtk_uart_apdma_rx_handler(struct mtk_chan *c)
 221 {
 222         struct mtk_uart_apdma_desc *d = c->desc;
 223         unsigned int len, wg, rg;
 224         int cnt;
 225 
 226         mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_RX_INT_CLR_B);
 227 
 228         if (!mtk_uart_apdma_read(c, VFF_VALID_SIZE))
 229                 return;
 230 
 231         mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B);
 232         mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
 233 
 234         len = c->cfg.src_port_window_size;
 235         rg = mtk_uart_apdma_read(c, VFF_RPT);
 236         wg = mtk_uart_apdma_read(c, VFF_WPT);
 237         cnt = (wg & VFF_RING_SIZE) - (rg & VFF_RING_SIZE);
 238 
 239         /*
 240          * The buffer is ring buffer. If wrap bit different,
 241          * represents the start of the next cycle for WPT
 242          */
 243         if ((rg ^ wg) & VFF_RING_WRAP)
 244                 cnt += len;
 245 
 246         c->rx_status = d->avail_len - cnt;
 247         mtk_uart_apdma_write(c, VFF_RPT, wg);
 248 
 249         list_del(&d->vd.node);
 250         vchan_cookie_complete(&d->vd);
 251 }
 252 
 253 static irqreturn_t mtk_uart_apdma_irq_handler(int irq, void *dev_id)
 254 {
 255         struct dma_chan *chan = (struct dma_chan *)dev_id;
 256         struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
 257         unsigned long flags;
 258 
 259         spin_lock_irqsave(&c->vc.lock, flags);
 260         if (c->dir == DMA_DEV_TO_MEM)
 261                 mtk_uart_apdma_rx_handler(c);
 262         else if (c->dir == DMA_MEM_TO_DEV)
 263                 mtk_uart_apdma_tx_handler(c);
 264         spin_unlock_irqrestore(&c->vc.lock, flags);
 265 
 266         return IRQ_HANDLED;
 267 }
 268 
 269 static int mtk_uart_apdma_alloc_chan_resources(struct dma_chan *chan)
 270 {
 271         struct mtk_uart_apdmadev *mtkd = to_mtk_uart_apdma_dev(chan->device);
 272         struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
 273         unsigned int status;
 274         int ret;
 275 
 276         ret = pm_runtime_get_sync(mtkd->ddev.dev);
 277         if (ret < 0) {
 278                 pm_runtime_put_noidle(chan->device->dev);
 279                 return ret;
 280         }
 281 
 282         mtk_uart_apdma_write(c, VFF_ADDR, 0);
 283         mtk_uart_apdma_write(c, VFF_THRE, 0);
 284         mtk_uart_apdma_write(c, VFF_LEN, 0);
 285         mtk_uart_apdma_write(c, VFF_RST, VFF_WARM_RST_B);
 286 
 287         ret = readx_poll_timeout(readl, c->base + VFF_EN,
 288                           status, !status, 10, 100);
 289         if (ret)
 290                 return ret;
 291 
 292         ret = request_irq(c->irq, mtk_uart_apdma_irq_handler,
 293                           IRQF_TRIGGER_NONE, KBUILD_MODNAME, chan);
 294         if (ret < 0) {
 295                 dev_err(chan->device->dev, "Can't request dma IRQ\n");
 296                 return -EINVAL;
 297         }
 298 
 299         if (mtkd->support_33bits)
 300                 mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_SUPPORT_CLR_B);
 301 
 302         return ret;
 303 }
 304 
 305 static void mtk_uart_apdma_free_chan_resources(struct dma_chan *chan)
 306 {
 307         struct mtk_uart_apdmadev *mtkd = to_mtk_uart_apdma_dev(chan->device);
 308         struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
 309 
 310         free_irq(c->irq, chan);
 311 
 312         tasklet_kill(&c->vc.task);
 313 
 314         vchan_free_chan_resources(&c->vc);
 315 
 316         pm_runtime_put_sync(mtkd->ddev.dev);
 317 }
 318 
 319 static enum dma_status mtk_uart_apdma_tx_status(struct dma_chan *chan,
 320                                          dma_cookie_t cookie,
 321                                          struct dma_tx_state *txstate)
 322 {
 323         struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
 324         enum dma_status ret;
 325 
 326         ret = dma_cookie_status(chan, cookie, txstate);
 327         if (!txstate)
 328                 return ret;
 329 
 330         dma_set_residue(txstate, c->rx_status);
 331 
 332         return ret;
 333 }
 334 
 335 /*
 336  * dmaengine_prep_slave_single will call the function. and sglen is 1.
 337  * 8250 uart using one ring buffer, and deal with one sg.
 338  */
 339 static struct dma_async_tx_descriptor *mtk_uart_apdma_prep_slave_sg
 340         (struct dma_chan *chan, struct scatterlist *sgl,
 341         unsigned int sglen, enum dma_transfer_direction dir,
 342         unsigned long tx_flags, void *context)
 343 {
 344         struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
 345         struct mtk_uart_apdma_desc *d;
 346 
 347         if (!is_slave_direction(dir) || sglen != 1)
 348                 return NULL;
 349 
 350         /* Now allocate and setup the descriptor */
 351         d = kzalloc(sizeof(*d), GFP_ATOMIC);
 352         if (!d)
 353                 return NULL;
 354 
 355         d->avail_len = sg_dma_len(sgl);
 356         d->addr = sg_dma_address(sgl);
 357         c->dir = dir;
 358 
 359         return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
 360 }
 361 
 362 static void mtk_uart_apdma_issue_pending(struct dma_chan *chan)
 363 {
 364         struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
 365         struct virt_dma_desc *vd;
 366         unsigned long flags;
 367 
 368         spin_lock_irqsave(&c->vc.lock, flags);
 369         if (vchan_issue_pending(&c->vc)) {
 370                 vd = vchan_next_desc(&c->vc);
 371                 c->desc = to_mtk_uart_apdma_desc(&vd->tx);
 372 
 373                 if (c->dir == DMA_DEV_TO_MEM)
 374                         mtk_uart_apdma_start_rx(c);
 375                 else if (c->dir == DMA_MEM_TO_DEV)
 376                         mtk_uart_apdma_start_tx(c);
 377         }
 378 
 379         spin_unlock_irqrestore(&c->vc.lock, flags);
 380 }
 381 
 382 static int mtk_uart_apdma_slave_config(struct dma_chan *chan,
 383                                    struct dma_slave_config *config)
 384 {
 385         struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
 386 
 387         memcpy(&c->cfg, config, sizeof(*config));
 388 
 389         return 0;
 390 }
 391 
 392 static int mtk_uart_apdma_terminate_all(struct dma_chan *chan)
 393 {
 394         struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
 395         unsigned long flags;
 396         unsigned int status;
 397         LIST_HEAD(head);
 398         int ret;
 399 
 400         mtk_uart_apdma_write(c, VFF_FLUSH, VFF_FLUSH_B);
 401 
 402         ret = readx_poll_timeout(readl, c->base + VFF_FLUSH,
 403                           status, status != VFF_FLUSH_B, 10, 100);
 404         if (ret)
 405                 dev_err(c->vc.chan.device->dev, "flush: fail, status=0x%x\n",
 406                         mtk_uart_apdma_read(c, VFF_DEBUG_STATUS));
 407 
 408         /*
 409          * Stop need 3 steps.
 410          * 1. set stop to 1
 411          * 2. wait en to 0
 412          * 3. set stop as 0
 413          */
 414         mtk_uart_apdma_write(c, VFF_STOP, VFF_STOP_B);
 415         ret = readx_poll_timeout(readl, c->base + VFF_EN,
 416                           status, !status, 10, 100);
 417         if (ret)
 418                 dev_err(c->vc.chan.device->dev, "stop: fail, status=0x%x\n",
 419                         mtk_uart_apdma_read(c, VFF_DEBUG_STATUS));
 420 
 421         mtk_uart_apdma_write(c, VFF_STOP, VFF_STOP_CLR_B);
 422         mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
 423 
 424         if (c->dir == DMA_DEV_TO_MEM)
 425                 mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_RX_INT_CLR_B);
 426         else if (c->dir == DMA_MEM_TO_DEV)
 427                 mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_TX_INT_CLR_B);
 428 
 429         synchronize_irq(c->irq);
 430 
 431         spin_lock_irqsave(&c->vc.lock, flags);
 432         vchan_get_all_descriptors(&c->vc, &head);
 433         vchan_dma_desc_free_list(&c->vc, &head);
 434         spin_unlock_irqrestore(&c->vc.lock, flags);
 435 
 436         return 0;
 437 }
 438 
 439 static int mtk_uart_apdma_device_pause(struct dma_chan *chan)
 440 {
 441         struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
 442         unsigned long flags;
 443 
 444         spin_lock_irqsave(&c->vc.lock, flags);
 445 
 446         mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B);
 447         mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
 448 
 449         synchronize_irq(c->irq);
 450 
 451         spin_unlock_irqrestore(&c->vc.lock, flags);
 452 
 453         return 0;
 454 }
 455 
 456 static void mtk_uart_apdma_free(struct mtk_uart_apdmadev *mtkd)
 457 {
 458         while (!list_empty(&mtkd->ddev.channels)) {
 459                 struct mtk_chan *c = list_first_entry(&mtkd->ddev.channels,
 460                         struct mtk_chan, vc.chan.device_node);
 461 
 462                 list_del(&c->vc.chan.device_node);
 463                 tasklet_kill(&c->vc.task);
 464         }
 465 }
 466 
 467 static const struct of_device_id mtk_uart_apdma_match[] = {
 468         { .compatible = "mediatek,mt6577-uart-dma", },
 469         { /* sentinel */ },
 470 };
 471 MODULE_DEVICE_TABLE(of, mtk_uart_apdma_match);
 472 
 473 static int mtk_uart_apdma_probe(struct platform_device *pdev)
 474 {
 475         struct device_node *np = pdev->dev.of_node;
 476         struct mtk_uart_apdmadev *mtkd;
 477         int bit_mask = 32, rc;
 478         struct resource *res;
 479         struct mtk_chan *c;
 480         unsigned int i;
 481 
 482         mtkd = devm_kzalloc(&pdev->dev, sizeof(*mtkd), GFP_KERNEL);
 483         if (!mtkd)
 484                 return -ENOMEM;
 485 
 486         mtkd->clk = devm_clk_get(&pdev->dev, NULL);
 487         if (IS_ERR(mtkd->clk)) {
 488                 dev_err(&pdev->dev, "No clock specified\n");
 489                 rc = PTR_ERR(mtkd->clk);
 490                 return rc;
 491         }
 492 
 493         if (of_property_read_bool(np, "mediatek,dma-33bits"))
 494                 mtkd->support_33bits = true;
 495 
 496         if (mtkd->support_33bits)
 497                 bit_mask = 33;
 498 
 499         rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(bit_mask));
 500         if (rc)
 501                 return rc;
 502 
 503         dma_cap_set(DMA_SLAVE, mtkd->ddev.cap_mask);
 504         mtkd->ddev.device_alloc_chan_resources =
 505                                 mtk_uart_apdma_alloc_chan_resources;
 506         mtkd->ddev.device_free_chan_resources =
 507                                 mtk_uart_apdma_free_chan_resources;
 508         mtkd->ddev.device_tx_status = mtk_uart_apdma_tx_status;
 509         mtkd->ddev.device_issue_pending = mtk_uart_apdma_issue_pending;
 510         mtkd->ddev.device_prep_slave_sg = mtk_uart_apdma_prep_slave_sg;
 511         mtkd->ddev.device_config = mtk_uart_apdma_slave_config;
 512         mtkd->ddev.device_pause = mtk_uart_apdma_device_pause;
 513         mtkd->ddev.device_terminate_all = mtk_uart_apdma_terminate_all;
 514         mtkd->ddev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE);
 515         mtkd->ddev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE);
 516         mtkd->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
 517         mtkd->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
 518         mtkd->ddev.dev = &pdev->dev;
 519         INIT_LIST_HEAD(&mtkd->ddev.channels);
 520 
 521         mtkd->dma_requests = MTK_UART_APDMA_NR_VCHANS;
 522         if (of_property_read_u32(np, "dma-requests", &mtkd->dma_requests)) {
 523                 dev_info(&pdev->dev,
 524                          "Using %u as missing dma-requests property\n",
 525                          MTK_UART_APDMA_NR_VCHANS);
 526         }
 527 
 528         for (i = 0; i < mtkd->dma_requests; i++) {
 529                 c = devm_kzalloc(mtkd->ddev.dev, sizeof(*c), GFP_KERNEL);
 530                 if (!c) {
 531                         rc = -ENODEV;
 532                         goto err_no_dma;
 533                 }
 534 
 535                 res = platform_get_resource(pdev, IORESOURCE_MEM, i);
 536                 if (!res) {
 537                         rc = -ENODEV;
 538                         goto err_no_dma;
 539                 }
 540 
 541                 c->base = devm_ioremap_resource(&pdev->dev, res);
 542                 if (IS_ERR(c->base)) {
 543                         rc = PTR_ERR(c->base);
 544                         goto err_no_dma;
 545                 }
 546                 c->vc.desc_free = mtk_uart_apdma_desc_free;
 547                 vchan_init(&c->vc, &mtkd->ddev);
 548 
 549                 rc = platform_get_irq(pdev, i);
 550                 if (rc < 0)
 551                         goto err_no_dma;
 552                 c->irq = rc;
 553         }
 554 
 555         pm_runtime_enable(&pdev->dev);
 556         pm_runtime_set_active(&pdev->dev);
 557 
 558         rc = dma_async_device_register(&mtkd->ddev);
 559         if (rc)
 560                 goto rpm_disable;
 561 
 562         platform_set_drvdata(pdev, mtkd);
 563 
 564         /* Device-tree DMA controller registration */
 565         rc = of_dma_controller_register(np, of_dma_xlate_by_chan_id, mtkd);
 566         if (rc)
 567                 goto dma_remove;
 568 
 569         return rc;
 570 
 571 dma_remove:
 572         dma_async_device_unregister(&mtkd->ddev);
 573 rpm_disable:
 574         pm_runtime_disable(&pdev->dev);
 575 err_no_dma:
 576         mtk_uart_apdma_free(mtkd);
 577         return rc;
 578 }
 579 
 580 static int mtk_uart_apdma_remove(struct platform_device *pdev)
 581 {
 582         struct mtk_uart_apdmadev *mtkd = platform_get_drvdata(pdev);
 583 
 584         of_dma_controller_free(pdev->dev.of_node);
 585 
 586         mtk_uart_apdma_free(mtkd);
 587 
 588         dma_async_device_unregister(&mtkd->ddev);
 589 
 590         pm_runtime_disable(&pdev->dev);
 591 
 592         return 0;
 593 }
 594 
 595 #ifdef CONFIG_PM_SLEEP
 596 static int mtk_uart_apdma_suspend(struct device *dev)
 597 {
 598         struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
 599 
 600         if (!pm_runtime_suspended(dev))
 601                 clk_disable_unprepare(mtkd->clk);
 602 
 603         return 0;
 604 }
 605 
 606 static int mtk_uart_apdma_resume(struct device *dev)
 607 {
 608         int ret;
 609         struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
 610 
 611         if (!pm_runtime_suspended(dev)) {
 612                 ret = clk_prepare_enable(mtkd->clk);
 613                 if (ret)
 614                         return ret;
 615         }
 616 
 617         return 0;
 618 }
 619 #endif /* CONFIG_PM_SLEEP */
 620 
 621 #ifdef CONFIG_PM
 622 static int mtk_uart_apdma_runtime_suspend(struct device *dev)
 623 {
 624         struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
 625 
 626         clk_disable_unprepare(mtkd->clk);
 627 
 628         return 0;
 629 }
 630 
 631 static int mtk_uart_apdma_runtime_resume(struct device *dev)
 632 {
 633         int ret;
 634         struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
 635 
 636         ret = clk_prepare_enable(mtkd->clk);
 637         if (ret)
 638                 return ret;
 639 
 640         return 0;
 641 }
 642 #endif /* CONFIG_PM */
 643 
 644 static const struct dev_pm_ops mtk_uart_apdma_pm_ops = {
 645         SET_SYSTEM_SLEEP_PM_OPS(mtk_uart_apdma_suspend, mtk_uart_apdma_resume)
 646         SET_RUNTIME_PM_OPS(mtk_uart_apdma_runtime_suspend,
 647                            mtk_uart_apdma_runtime_resume, NULL)
 648 };
 649 
 650 static struct platform_driver mtk_uart_apdma_driver = {
 651         .probe  = mtk_uart_apdma_probe,
 652         .remove = mtk_uart_apdma_remove,
 653         .driver = {
 654                 .name           = KBUILD_MODNAME,
 655                 .pm             = &mtk_uart_apdma_pm_ops,
 656                 .of_match_table = of_match_ptr(mtk_uart_apdma_match),
 657         },
 658 };
 659 
 660 module_platform_driver(mtk_uart_apdma_driver);
 661 
 662 MODULE_DESCRIPTION("MediaTek UART APDMA Controller Driver");
 663 MODULE_AUTHOR("Long Cheng <long.cheng@mediatek.com>");
 664 MODULE_LICENSE("GPL v2");

/* [<][>][^][v][top][bottom][index][help] */