root/drivers/dma/fsl-qdma.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. qdma_ccdf_addr_get64
  2. qdma_desc_addr_set64
  3. qdma_ccdf_get_queue
  4. qdma_ccdf_get_offset
  5. qdma_ccdf_set_format
  6. qdma_ccdf_get_status
  7. qdma_ccdf_set_ser
  8. qdma_csgf_set_len
  9. qdma_csgf_set_f
  10. qdma_readl
  11. qdma_writel
  12. to_fsl_qdma_chan
  13. to_fsl_qdma_comp
  14. fsl_qdma_free_chan_resources
  15. fsl_qdma_comp_fill_memcpy
  16. fsl_qdma_pre_request_enqueue_desc
  17. fsl_qdma_request_enqueue_desc
  18. fsl_qdma_alloc_queue_resources
  19. fsl_qdma_prep_status_queue
  20. fsl_qdma_halt
  21. fsl_qdma_queue_transfer_complete
  22. fsl_qdma_error_handler
  23. fsl_qdma_queue_handler
  24. fsl_qdma_irq_init
  25. fsl_qdma_irq_exit
  26. fsl_qdma_reg_init
  27. fsl_qdma_prep_memcpy
  28. fsl_qdma_enqueue_desc
  29. fsl_qdma_free_desc
  30. fsl_qdma_issue_pending
  31. fsl_qdma_synchronize
  32. fsl_qdma_terminate_all
  33. fsl_qdma_alloc_chan_resources
  34. fsl_qdma_probe
  35. fsl_qdma_cleanup_vchan
  36. fsl_qdma_remove

   1 // SPDX-License-Identifier: GPL-2.0
   2 // Copyright 2014-2015 Freescale
   3 // Copyright 2018 NXP
   4 
   5 /*
   6  * Driver for NXP Layerscape Queue Direct Memory Access Controller
   7  *
   8  * Author:
   9  *  Wen He <wen.he_1@nxp.com>
  10  *  Jiaheng Fan <jiaheng.fan@nxp.com>
  11  *
  12  */
  13 
  14 #include <linux/module.h>
  15 #include <linux/delay.h>
  16 #include <linux/of_irq.h>
  17 #include <linux/of_platform.h>
  18 #include <linux/of_dma.h>
  19 #include <linux/dma-mapping.h>
  20 
  21 #include "virt-dma.h"
  22 #include "fsldma.h"
  23 
  24 /* Register related definition */
  25 #define FSL_QDMA_DMR                    0x0
  26 #define FSL_QDMA_DSR                    0x4
  27 #define FSL_QDMA_DEIER                  0xe00
  28 #define FSL_QDMA_DEDR                   0xe04
  29 #define FSL_QDMA_DECFDW0R               0xe10
  30 #define FSL_QDMA_DECFDW1R               0xe14
  31 #define FSL_QDMA_DECFDW2R               0xe18
  32 #define FSL_QDMA_DECFDW3R               0xe1c
  33 #define FSL_QDMA_DECFQIDR               0xe30
  34 #define FSL_QDMA_DECBR                  0xe34
  35 
  36 #define FSL_QDMA_BCQMR(x)               (0xc0 + 0x100 * (x))
  37 #define FSL_QDMA_BCQSR(x)               (0xc4 + 0x100 * (x))
  38 #define FSL_QDMA_BCQEDPA_SADDR(x)       (0xc8 + 0x100 * (x))
  39 #define FSL_QDMA_BCQDPA_SADDR(x)        (0xcc + 0x100 * (x))
  40 #define FSL_QDMA_BCQEEPA_SADDR(x)       (0xd0 + 0x100 * (x))
  41 #define FSL_QDMA_BCQEPA_SADDR(x)        (0xd4 + 0x100 * (x))
  42 #define FSL_QDMA_BCQIER(x)              (0xe0 + 0x100 * (x))
  43 #define FSL_QDMA_BCQIDR(x)              (0xe4 + 0x100 * (x))
  44 
  45 #define FSL_QDMA_SQDPAR                 0x80c
  46 #define FSL_QDMA_SQEPAR                 0x814
  47 #define FSL_QDMA_BSQMR                  0x800
  48 #define FSL_QDMA_BSQSR                  0x804
  49 #define FSL_QDMA_BSQICR                 0x828
  50 #define FSL_QDMA_CQMR                   0xa00
  51 #define FSL_QDMA_CQDSCR1                0xa08
  52 #define FSL_QDMA_CQDSCR2                0xa0c
  53 #define FSL_QDMA_CQIER                  0xa10
  54 #define FSL_QDMA_CQEDR                  0xa14
  55 #define FSL_QDMA_SQCCMR                 0xa20
  56 
  57 /* Registers for bit and genmask */
  58 #define FSL_QDMA_CQIDR_SQT              BIT(15)
  59 #define QDMA_CCDF_FOTMAT                BIT(29)
  60 #define QDMA_CCDF_SER                   BIT(30)
  61 #define QDMA_SG_FIN                     BIT(30)
  62 #define QDMA_SG_LEN_MASK                GENMASK(29, 0)
  63 #define QDMA_CCDF_MASK                  GENMASK(28, 20)
  64 
  65 #define FSL_QDMA_DEDR_CLEAR             GENMASK(31, 0)
  66 #define FSL_QDMA_BCQIDR_CLEAR           GENMASK(31, 0)
  67 #define FSL_QDMA_DEIER_CLEAR            GENMASK(31, 0)
  68 
  69 #define FSL_QDMA_BCQIER_CQTIE           BIT(15)
  70 #define FSL_QDMA_BCQIER_CQPEIE          BIT(23)
  71 #define FSL_QDMA_BSQICR_ICEN            BIT(31)
  72 
  73 #define FSL_QDMA_BSQICR_ICST(x)         ((x) << 16)
  74 #define FSL_QDMA_CQIER_MEIE             BIT(31)
  75 #define FSL_QDMA_CQIER_TEIE             BIT(0)
  76 #define FSL_QDMA_SQCCMR_ENTER_WM        BIT(21)
  77 
  78 #define FSL_QDMA_BCQMR_EN               BIT(31)
  79 #define FSL_QDMA_BCQMR_EI               BIT(30)
  80 #define FSL_QDMA_BCQMR_CD_THLD(x)       ((x) << 20)
  81 #define FSL_QDMA_BCQMR_CQ_SIZE(x)       ((x) << 16)
  82 
  83 #define FSL_QDMA_BCQSR_QF               BIT(16)
  84 #define FSL_QDMA_BCQSR_XOFF             BIT(0)
  85 
  86 #define FSL_QDMA_BSQMR_EN               BIT(31)
  87 #define FSL_QDMA_BSQMR_DI               BIT(30)
  88 #define FSL_QDMA_BSQMR_CQ_SIZE(x)       ((x) << 16)
  89 
  90 #define FSL_QDMA_BSQSR_QE               BIT(17)
  91 
  92 #define FSL_QDMA_DMR_DQD                BIT(30)
  93 #define FSL_QDMA_DSR_DB         BIT(31)
  94 
  95 /* Size related definition */
  96 #define FSL_QDMA_QUEUE_MAX              8
  97 #define FSL_QDMA_COMMAND_BUFFER_SIZE    64
  98 #define FSL_QDMA_DESCRIPTOR_BUFFER_SIZE 32
  99 #define FSL_QDMA_CIRCULAR_DESC_SIZE_MIN 64
 100 #define FSL_QDMA_CIRCULAR_DESC_SIZE_MAX 16384
 101 #define FSL_QDMA_QUEUE_NUM_MAX          8
 102 
 103 /* Field definition for CMD */
 104 #define FSL_QDMA_CMD_RWTTYPE            0x4
 105 #define FSL_QDMA_CMD_LWC                0x2
 106 #define FSL_QDMA_CMD_RWTTYPE_OFFSET     28
 107 #define FSL_QDMA_CMD_NS_OFFSET          27
 108 #define FSL_QDMA_CMD_DQOS_OFFSET        24
 109 #define FSL_QDMA_CMD_WTHROTL_OFFSET     20
 110 #define FSL_QDMA_CMD_DSEN_OFFSET        19
 111 #define FSL_QDMA_CMD_LWC_OFFSET         16
 112 
 113 /* Field definition for Descriptor offset */
 114 #define QDMA_CCDF_STATUS                20
 115 #define QDMA_CCDF_OFFSET                20
 116 #define QDMA_SDDF_CMD(x)                (((u64)(x)) << 32)
 117 
 118 /* Field definition for safe loop count*/
 119 #define FSL_QDMA_HALT_COUNT             1500
 120 #define FSL_QDMA_MAX_SIZE               16385
 121 #define FSL_QDMA_COMP_TIMEOUT           1000
 122 #define FSL_COMMAND_QUEUE_OVERFLLOW     10
 123 
 124 #define FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma_engine, x)                  \
 125         (((fsl_qdma_engine)->block_offset) * (x))
 126 
 127 /**
 128  * struct fsl_qdma_format - This is the struct holding describing compound
 129  *                          descriptor format with qDMA.
 130  * @status:                 Command status and enqueue status notification.
 131  * @cfg:                    Frame offset and frame format.
 132  * @addr_lo:                Holding the compound descriptor of the lower
 133  *                          32-bits address in memory 40-bit address.
 134  * @addr_hi:                Same as above member, but point high 8-bits in
 135  *                          memory 40-bit address.
 136  * @__reserved1:            Reserved field.
 137  * @cfg8b_w1:               Compound descriptor command queue origin produced
 138  *                          by qDMA and dynamic debug field.
 139  * @data                    Pointer to the memory 40-bit address, describes DMA
 140  *                          source information and DMA destination information.
 141  */
 142 struct fsl_qdma_format {
 143         __le32 status;
 144         __le32 cfg;
 145         union {
 146                 struct {
 147                         __le32 addr_lo;
 148                         u8 addr_hi;
 149                         u8 __reserved1[2];
 150                         u8 cfg8b_w1;
 151                 } __packed;
 152                 __le64 data;
 153         };
 154 } __packed;
 155 
 156 /* qDMA status notification pre information */
 157 struct fsl_pre_status {
 158         u64 addr;
 159         u8 queue;
 160 };
 161 
 162 static DEFINE_PER_CPU(struct fsl_pre_status, pre);
 163 
 164 struct fsl_qdma_chan {
 165         struct virt_dma_chan            vchan;
 166         struct virt_dma_desc            vdesc;
 167         enum dma_status                 status;
 168         struct fsl_qdma_engine          *qdma;
 169         struct fsl_qdma_queue           *queue;
 170 };
 171 
 172 struct fsl_qdma_queue {
 173         struct fsl_qdma_format  *virt_head;
 174         struct fsl_qdma_format  *virt_tail;
 175         struct list_head        comp_used;
 176         struct list_head        comp_free;
 177         struct dma_pool         *comp_pool;
 178         struct dma_pool         *desc_pool;
 179         spinlock_t              queue_lock;
 180         dma_addr_t              bus_addr;
 181         u32                     n_cq;
 182         u32                     id;
 183         struct fsl_qdma_format  *cq;
 184         void __iomem            *block_base;
 185 };
 186 
 187 struct fsl_qdma_comp {
 188         dma_addr_t              bus_addr;
 189         dma_addr_t              desc_bus_addr;
 190         struct fsl_qdma_format  *virt_addr;
 191         struct fsl_qdma_format  *desc_virt_addr;
 192         struct fsl_qdma_chan    *qchan;
 193         struct virt_dma_desc    vdesc;
 194         struct list_head        list;
 195 };
 196 
 197 struct fsl_qdma_engine {
 198         struct dma_device       dma_dev;
 199         void __iomem            *ctrl_base;
 200         void __iomem            *status_base;
 201         void __iomem            *block_base;
 202         u32                     n_chans;
 203         u32                     n_queues;
 204         struct mutex            fsl_qdma_mutex;
 205         int                     error_irq;
 206         int                     *queue_irq;
 207         u32                     feature;
 208         struct fsl_qdma_queue   *queue;
 209         struct fsl_qdma_queue   **status;
 210         struct fsl_qdma_chan    *chans;
 211         int                     block_number;
 212         int                     block_offset;
 213         int                     irq_base;
 214         int                     desc_allocated;
 215 
 216 };
 217 
 218 static inline u64
 219 qdma_ccdf_addr_get64(const struct fsl_qdma_format *ccdf)
 220 {
 221         return le64_to_cpu(ccdf->data) & (U64_MAX >> 24);
 222 }
 223 
 224 static inline void
 225 qdma_desc_addr_set64(struct fsl_qdma_format *ccdf, u64 addr)
 226 {
 227         ccdf->addr_hi = upper_32_bits(addr);
 228         ccdf->addr_lo = cpu_to_le32(lower_32_bits(addr));
 229 }
 230 
 231 static inline u8
 232 qdma_ccdf_get_queue(const struct fsl_qdma_format *ccdf)
 233 {
 234         return ccdf->cfg8b_w1 & U8_MAX;
 235 }
 236 
 237 static inline int
 238 qdma_ccdf_get_offset(const struct fsl_qdma_format *ccdf)
 239 {
 240         return (le32_to_cpu(ccdf->cfg) & QDMA_CCDF_MASK) >> QDMA_CCDF_OFFSET;
 241 }
 242 
 243 static inline void
 244 qdma_ccdf_set_format(struct fsl_qdma_format *ccdf, int offset)
 245 {
 246         ccdf->cfg = cpu_to_le32(QDMA_CCDF_FOTMAT | offset);
 247 }
 248 
 249 static inline int
 250 qdma_ccdf_get_status(const struct fsl_qdma_format *ccdf)
 251 {
 252         return (le32_to_cpu(ccdf->status) & QDMA_CCDF_MASK) >> QDMA_CCDF_STATUS;
 253 }
 254 
 255 static inline void
 256 qdma_ccdf_set_ser(struct fsl_qdma_format *ccdf, int status)
 257 {
 258         ccdf->status = cpu_to_le32(QDMA_CCDF_SER | status);
 259 }
 260 
 261 static inline void qdma_csgf_set_len(struct fsl_qdma_format *csgf, int len)
 262 {
 263         csgf->cfg = cpu_to_le32(len & QDMA_SG_LEN_MASK);
 264 }
 265 
 266 static inline void qdma_csgf_set_f(struct fsl_qdma_format *csgf, int len)
 267 {
 268         csgf->cfg = cpu_to_le32(QDMA_SG_FIN | (len & QDMA_SG_LEN_MASK));
 269 }
 270 
 271 static u32 qdma_readl(struct fsl_qdma_engine *qdma, void __iomem *addr)
 272 {
 273         return FSL_DMA_IN(qdma, addr, 32);
 274 }
 275 
 276 static void qdma_writel(struct fsl_qdma_engine *qdma, u32 val,
 277                         void __iomem *addr)
 278 {
 279         FSL_DMA_OUT(qdma, addr, val, 32);
 280 }
 281 
 282 static struct fsl_qdma_chan *to_fsl_qdma_chan(struct dma_chan *chan)
 283 {
 284         return container_of(chan, struct fsl_qdma_chan, vchan.chan);
 285 }
 286 
 287 static struct fsl_qdma_comp *to_fsl_qdma_comp(struct virt_dma_desc *vd)
 288 {
 289         return container_of(vd, struct fsl_qdma_comp, vdesc);
 290 }
 291 
 292 static void fsl_qdma_free_chan_resources(struct dma_chan *chan)
 293 {
 294         struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
 295         struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
 296         struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma;
 297         struct fsl_qdma_comp *comp_temp, *_comp_temp;
 298         unsigned long flags;
 299         LIST_HEAD(head);
 300 
 301         spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
 302         vchan_get_all_descriptors(&fsl_chan->vchan, &head);
 303         spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
 304 
 305         vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
 306 
 307         if (!fsl_queue->comp_pool && !fsl_queue->desc_pool)
 308                 return;
 309 
 310         list_for_each_entry_safe(comp_temp, _comp_temp,
 311                                  &fsl_queue->comp_used, list) {
 312                 dma_pool_free(fsl_queue->comp_pool,
 313                               comp_temp->virt_addr,
 314                               comp_temp->bus_addr);
 315                 dma_pool_free(fsl_queue->desc_pool,
 316                               comp_temp->desc_virt_addr,
 317                               comp_temp->desc_bus_addr);
 318                 list_del(&comp_temp->list);
 319                 kfree(comp_temp);
 320         }
 321 
 322         list_for_each_entry_safe(comp_temp, _comp_temp,
 323                                  &fsl_queue->comp_free, list) {
 324                 dma_pool_free(fsl_queue->comp_pool,
 325                               comp_temp->virt_addr,
 326                               comp_temp->bus_addr);
 327                 dma_pool_free(fsl_queue->desc_pool,
 328                               comp_temp->desc_virt_addr,
 329                               comp_temp->desc_bus_addr);
 330                 list_del(&comp_temp->list);
 331                 kfree(comp_temp);
 332         }
 333 
 334         dma_pool_destroy(fsl_queue->comp_pool);
 335         dma_pool_destroy(fsl_queue->desc_pool);
 336 
 337         fsl_qdma->desc_allocated--;
 338         fsl_queue->comp_pool = NULL;
 339         fsl_queue->desc_pool = NULL;
 340 }
 341 
 342 static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
 343                                       dma_addr_t dst, dma_addr_t src, u32 len)
 344 {
 345         u32 cmd;
 346         struct fsl_qdma_format *sdf, *ddf;
 347         struct fsl_qdma_format *ccdf, *csgf_desc, *csgf_src, *csgf_dest;
 348 
 349         ccdf = fsl_comp->virt_addr;
 350         csgf_desc = fsl_comp->virt_addr + 1;
 351         csgf_src = fsl_comp->virt_addr + 2;
 352         csgf_dest = fsl_comp->virt_addr + 3;
 353         sdf = fsl_comp->desc_virt_addr;
 354         ddf = fsl_comp->desc_virt_addr + 1;
 355 
 356         memset(fsl_comp->virt_addr, 0, FSL_QDMA_COMMAND_BUFFER_SIZE);
 357         memset(fsl_comp->desc_virt_addr, 0, FSL_QDMA_DESCRIPTOR_BUFFER_SIZE);
 358         /* Head Command Descriptor(Frame Descriptor) */
 359         qdma_desc_addr_set64(ccdf, fsl_comp->bus_addr + 16);
 360         qdma_ccdf_set_format(ccdf, qdma_ccdf_get_offset(ccdf));
 361         qdma_ccdf_set_ser(ccdf, qdma_ccdf_get_status(ccdf));
 362         /* Status notification is enqueued to status queue. */
 363         /* Compound Command Descriptor(Frame List Table) */
 364         qdma_desc_addr_set64(csgf_desc, fsl_comp->desc_bus_addr);
 365         /* It must be 32 as Compound S/G Descriptor */
 366         qdma_csgf_set_len(csgf_desc, 32);
 367         qdma_desc_addr_set64(csgf_src, src);
 368         qdma_csgf_set_len(csgf_src, len);
 369         qdma_desc_addr_set64(csgf_dest, dst);
 370         qdma_csgf_set_len(csgf_dest, len);
 371         /* This entry is the last entry. */
 372         qdma_csgf_set_f(csgf_dest, len);
 373         /* Descriptor Buffer */
 374         cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE <<
 375                           FSL_QDMA_CMD_RWTTYPE_OFFSET);
 376         sdf->data = QDMA_SDDF_CMD(cmd);
 377 
 378         cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE <<
 379                           FSL_QDMA_CMD_RWTTYPE_OFFSET);
 380         cmd |= cpu_to_le32(FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET);
 381         ddf->data = QDMA_SDDF_CMD(cmd);
 382 }
 383 
 384 /*
 385  * Pre-request full command descriptor for enqueue.
 386  */
 387 static int fsl_qdma_pre_request_enqueue_desc(struct fsl_qdma_queue *queue)
 388 {
 389         int i;
 390         struct fsl_qdma_comp *comp_temp, *_comp_temp;
 391 
 392         for (i = 0; i < queue->n_cq + FSL_COMMAND_QUEUE_OVERFLLOW; i++) {
 393                 comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);
 394                 if (!comp_temp)
 395                         goto err_alloc;
 396                 comp_temp->virt_addr =
 397                         dma_pool_alloc(queue->comp_pool, GFP_KERNEL,
 398                                        &comp_temp->bus_addr);
 399                 if (!comp_temp->virt_addr)
 400                         goto err_dma_alloc;
 401 
 402                 comp_temp->desc_virt_addr =
 403                         dma_pool_alloc(queue->desc_pool, GFP_KERNEL,
 404                                        &comp_temp->desc_bus_addr);
 405                 if (!comp_temp->desc_virt_addr)
 406                         goto err_desc_dma_alloc;
 407 
 408                 list_add_tail(&comp_temp->list, &queue->comp_free);
 409         }
 410 
 411         return 0;
 412 
 413 err_desc_dma_alloc:
 414         dma_pool_free(queue->comp_pool, comp_temp->virt_addr,
 415                       comp_temp->bus_addr);
 416 
 417 err_dma_alloc:
 418         kfree(comp_temp);
 419 
 420 err_alloc:
 421         list_for_each_entry_safe(comp_temp, _comp_temp,
 422                                  &queue->comp_free, list) {
 423                 if (comp_temp->virt_addr)
 424                         dma_pool_free(queue->comp_pool,
 425                                       comp_temp->virt_addr,
 426                                       comp_temp->bus_addr);
 427                 if (comp_temp->desc_virt_addr)
 428                         dma_pool_free(queue->desc_pool,
 429                                       comp_temp->desc_virt_addr,
 430                                       comp_temp->desc_bus_addr);
 431 
 432                 list_del(&comp_temp->list);
 433                 kfree(comp_temp);
 434         }
 435 
 436         return -ENOMEM;
 437 }
 438 
 439 /*
 440  * Request a command descriptor for enqueue.
 441  */
 442 static struct fsl_qdma_comp
 443 *fsl_qdma_request_enqueue_desc(struct fsl_qdma_chan *fsl_chan)
 444 {
 445         unsigned long flags;
 446         struct fsl_qdma_comp *comp_temp;
 447         int timeout = FSL_QDMA_COMP_TIMEOUT;
 448         struct fsl_qdma_queue *queue = fsl_chan->queue;
 449 
 450         while (timeout--) {
 451                 spin_lock_irqsave(&queue->queue_lock, flags);
 452                 if (!list_empty(&queue->comp_free)) {
 453                         comp_temp = list_first_entry(&queue->comp_free,
 454                                                      struct fsl_qdma_comp,
 455                                                      list);
 456                         list_del(&comp_temp->list);
 457 
 458                         spin_unlock_irqrestore(&queue->queue_lock, flags);
 459                         comp_temp->qchan = fsl_chan;
 460                         return comp_temp;
 461                 }
 462                 spin_unlock_irqrestore(&queue->queue_lock, flags);
 463                 udelay(1);
 464         }
 465 
 466         return NULL;
 467 }
 468 
 469 static struct fsl_qdma_queue
 470 *fsl_qdma_alloc_queue_resources(struct platform_device *pdev,
 471                                 struct fsl_qdma_engine *fsl_qdma)
 472 {
 473         int ret, len, i, j;
 474         int queue_num, block_number;
 475         unsigned int queue_size[FSL_QDMA_QUEUE_MAX];
 476         struct fsl_qdma_queue *queue_head, *queue_temp;
 477 
 478         queue_num = fsl_qdma->n_queues;
 479         block_number = fsl_qdma->block_number;
 480 
 481         if (queue_num > FSL_QDMA_QUEUE_MAX)
 482                 queue_num = FSL_QDMA_QUEUE_MAX;
 483         len = sizeof(*queue_head) * queue_num * block_number;
 484         queue_head = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
 485         if (!queue_head)
 486                 return NULL;
 487 
 488         ret = device_property_read_u32_array(&pdev->dev, "queue-sizes",
 489                                              queue_size, queue_num);
 490         if (ret) {
 491                 dev_err(&pdev->dev, "Can't get queue-sizes.\n");
 492                 return NULL;
 493         }
 494         for (j = 0; j < block_number; j++) {
 495                 for (i = 0; i < queue_num; i++) {
 496                         if (queue_size[i] > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX ||
 497                             queue_size[i] < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
 498                                 dev_err(&pdev->dev,
 499                                         "Get wrong queue-sizes.\n");
 500                                 return NULL;
 501                         }
 502                         queue_temp = queue_head + i + (j * queue_num);
 503 
 504                         queue_temp->cq =
 505                         dma_alloc_coherent(&pdev->dev,
 506                                            sizeof(struct fsl_qdma_format) *
 507                                            queue_size[i],
 508                                            &queue_temp->bus_addr,
 509                                            GFP_KERNEL);
 510                         if (!queue_temp->cq)
 511                                 return NULL;
 512                         queue_temp->block_base = fsl_qdma->block_base +
 513                                 FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
 514                         queue_temp->n_cq = queue_size[i];
 515                         queue_temp->id = i;
 516                         queue_temp->virt_head = queue_temp->cq;
 517                         queue_temp->virt_tail = queue_temp->cq;
 518                         /*
 519                          * List for queue command buffer
 520                          */
 521                         INIT_LIST_HEAD(&queue_temp->comp_used);
 522                         spin_lock_init(&queue_temp->queue_lock);
 523                 }
 524         }
 525         return queue_head;
 526 }
 527 
 528 static struct fsl_qdma_queue
 529 *fsl_qdma_prep_status_queue(struct platform_device *pdev)
 530 {
 531         int ret;
 532         unsigned int status_size;
 533         struct fsl_qdma_queue *status_head;
 534         struct device_node *np = pdev->dev.of_node;
 535 
 536         ret = of_property_read_u32(np, "status-sizes", &status_size);
 537         if (ret) {
 538                 dev_err(&pdev->dev, "Can't get status-sizes.\n");
 539                 return NULL;
 540         }
 541         if (status_size > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX ||
 542             status_size < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
 543                 dev_err(&pdev->dev, "Get wrong status_size.\n");
 544                 return NULL;
 545         }
 546         status_head = devm_kzalloc(&pdev->dev,
 547                                    sizeof(*status_head), GFP_KERNEL);
 548         if (!status_head)
 549                 return NULL;
 550 
 551         /*
 552          * Buffer for queue command
 553          */
 554         status_head->cq = dma_alloc_coherent(&pdev->dev,
 555                                              sizeof(struct fsl_qdma_format) *
 556                                              status_size,
 557                                              &status_head->bus_addr,
 558                                              GFP_KERNEL);
 559         if (!status_head->cq) {
 560                 devm_kfree(&pdev->dev, status_head);
 561                 return NULL;
 562         }
 563         status_head->n_cq = status_size;
 564         status_head->virt_head = status_head->cq;
 565         status_head->virt_tail = status_head->cq;
 566         status_head->comp_pool = NULL;
 567 
 568         return status_head;
 569 }
 570 
 571 static int fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
 572 {
 573         u32 reg;
 574         int i, j, count = FSL_QDMA_HALT_COUNT;
 575         void __iomem *block, *ctrl = fsl_qdma->ctrl_base;
 576 
 577         /* Disable the command queue and wait for idle state. */
 578         reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
 579         reg |= FSL_QDMA_DMR_DQD;
 580         qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
 581         for (j = 0; j < fsl_qdma->block_number; j++) {
 582                 block = fsl_qdma->block_base +
 583                         FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
 584                 for (i = 0; i < FSL_QDMA_QUEUE_NUM_MAX; i++)
 585                         qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQMR(i));
 586         }
 587         while (1) {
 588                 reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DSR);
 589                 if (!(reg & FSL_QDMA_DSR_DB))
 590                         break;
 591                 if (count-- < 0)
 592                         return -EBUSY;
 593                 udelay(100);
 594         }
 595 
 596         for (j = 0; j < fsl_qdma->block_number; j++) {
 597                 block = fsl_qdma->block_base +
 598                         FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
 599 
 600                 /* Disable status queue. */
 601                 qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BSQMR);
 602 
 603                 /*
 604                  * clear the command queue interrupt detect register for
 605                  * all queues.
 606                  */
 607                 qdma_writel(fsl_qdma, FSL_QDMA_BCQIDR_CLEAR,
 608                             block + FSL_QDMA_BCQIDR(0));
 609         }
 610 
 611         return 0;
 612 }
 613 
 614 static int
 615 fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma,
 616                                  void *block,
 617                                  int id)
 618 {
 619         bool duplicate;
 620         u32 reg, i, count;
 621         struct fsl_qdma_queue *temp_queue;
 622         struct fsl_qdma_format *status_addr;
 623         struct fsl_qdma_comp *fsl_comp = NULL;
 624         struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
 625         struct fsl_qdma_queue *fsl_status = fsl_qdma->status[id];
 626 
 627         count = FSL_QDMA_MAX_SIZE;
 628 
 629         while (count--) {
 630                 duplicate = 0;
 631                 reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQSR);
 632                 if (reg & FSL_QDMA_BSQSR_QE)
 633                         return 0;
 634 
 635                 status_addr = fsl_status->virt_head;
 636 
 637                 if (qdma_ccdf_get_queue(status_addr) ==
 638                    __this_cpu_read(pre.queue) &&
 639                         qdma_ccdf_addr_get64(status_addr) ==
 640                         __this_cpu_read(pre.addr))
 641                         duplicate = 1;
 642                 i = qdma_ccdf_get_queue(status_addr) +
 643                         id * fsl_qdma->n_queues;
 644                 __this_cpu_write(pre.addr, qdma_ccdf_addr_get64(status_addr));
 645                 __this_cpu_write(pre.queue, qdma_ccdf_get_queue(status_addr));
 646                 temp_queue = fsl_queue + i;
 647 
 648                 spin_lock(&temp_queue->queue_lock);
 649                 if (list_empty(&temp_queue->comp_used)) {
 650                         if (!duplicate) {
 651                                 spin_unlock(&temp_queue->queue_lock);
 652                                 return -EAGAIN;
 653                         }
 654                 } else {
 655                         fsl_comp = list_first_entry(&temp_queue->comp_used,
 656                                                     struct fsl_qdma_comp, list);
 657                         if (fsl_comp->bus_addr + 16 !=
 658                                 __this_cpu_read(pre.addr)) {
 659                                 if (!duplicate) {
 660                                         spin_unlock(&temp_queue->queue_lock);
 661                                         return -EAGAIN;
 662                                 }
 663                         }
 664                 }
 665 
 666                 if (duplicate) {
 667                         reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
 668                         reg |= FSL_QDMA_BSQMR_DI;
 669                         qdma_desc_addr_set64(status_addr, 0x0);
 670                         fsl_status->virt_head++;
 671                         if (fsl_status->virt_head == fsl_status->cq
 672                                                    + fsl_status->n_cq)
 673                                 fsl_status->virt_head = fsl_status->cq;
 674                         qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
 675                         spin_unlock(&temp_queue->queue_lock);
 676                         continue;
 677                 }
 678                 list_del(&fsl_comp->list);
 679 
 680                 reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
 681                 reg |= FSL_QDMA_BSQMR_DI;
 682                 qdma_desc_addr_set64(status_addr, 0x0);
 683                 fsl_status->virt_head++;
 684                 if (fsl_status->virt_head == fsl_status->cq + fsl_status->n_cq)
 685                         fsl_status->virt_head = fsl_status->cq;
 686                 qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
 687                 spin_unlock(&temp_queue->queue_lock);
 688 
 689                 spin_lock(&fsl_comp->qchan->vchan.lock);
 690                 vchan_cookie_complete(&fsl_comp->vdesc);
 691                 fsl_comp->qchan->status = DMA_COMPLETE;
 692                 spin_unlock(&fsl_comp->qchan->vchan.lock);
 693         }
 694 
 695         return 0;
 696 }
 697 
 698 static irqreturn_t fsl_qdma_error_handler(int irq, void *dev_id)
 699 {
 700         unsigned int intr;
 701         struct fsl_qdma_engine *fsl_qdma = dev_id;
 702         void __iomem *status = fsl_qdma->status_base;
 703 
 704         intr = qdma_readl(fsl_qdma, status + FSL_QDMA_DEDR);
 705 
 706         if (intr)
 707                 dev_err(fsl_qdma->dma_dev.dev, "DMA transaction error!\n");
 708 
 709         qdma_writel(fsl_qdma, FSL_QDMA_DEDR_CLEAR, status + FSL_QDMA_DEDR);
 710         return IRQ_HANDLED;
 711 }
 712 
 713 static irqreturn_t fsl_qdma_queue_handler(int irq, void *dev_id)
 714 {
 715         int id;
 716         unsigned int intr, reg;
 717         struct fsl_qdma_engine *fsl_qdma = dev_id;
 718         void __iomem *block, *ctrl = fsl_qdma->ctrl_base;
 719 
 720         id = irq - fsl_qdma->irq_base;
 721         if (id < 0 && id > fsl_qdma->block_number) {
 722                 dev_err(fsl_qdma->dma_dev.dev,
 723                         "irq %d is wrong irq_base is %d\n",
 724                         irq, fsl_qdma->irq_base);
 725         }
 726 
 727         block = fsl_qdma->block_base +
 728                 FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, id);
 729 
 730         intr = qdma_readl(fsl_qdma, block + FSL_QDMA_BCQIDR(0));
 731 
 732         if ((intr & FSL_QDMA_CQIDR_SQT) != 0)
 733                 intr = fsl_qdma_queue_transfer_complete(fsl_qdma, block, id);
 734 
 735         if (intr != 0) {
 736                 reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
 737                 reg |= FSL_QDMA_DMR_DQD;
 738                 qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
 739                 qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQIER(0));
 740                 dev_err(fsl_qdma->dma_dev.dev, "QDMA: status err!\n");
 741         }
 742 
 743         /* Clear all detected events and interrupts. */
 744         qdma_writel(fsl_qdma, FSL_QDMA_BCQIDR_CLEAR,
 745                     block + FSL_QDMA_BCQIDR(0));
 746 
 747         return IRQ_HANDLED;
 748 }
 749 
 750 static int
 751 fsl_qdma_irq_init(struct platform_device *pdev,
 752                   struct fsl_qdma_engine *fsl_qdma)
 753 {
 754         int i;
 755         int cpu;
 756         int ret;
 757         char irq_name[20];
 758 
 759         fsl_qdma->error_irq =
 760                 platform_get_irq_byname(pdev, "qdma-error");
 761         if (fsl_qdma->error_irq < 0)
 762                 return fsl_qdma->error_irq;
 763 
 764         ret = devm_request_irq(&pdev->dev, fsl_qdma->error_irq,
 765                                fsl_qdma_error_handler, 0,
 766                                "qDMA error", fsl_qdma);
 767         if (ret) {
 768                 dev_err(&pdev->dev, "Can't register qDMA controller IRQ.\n");
 769                 return  ret;
 770         }
 771 
 772         for (i = 0; i < fsl_qdma->block_number; i++) {
 773                 sprintf(irq_name, "qdma-queue%d", i);
 774                 fsl_qdma->queue_irq[i] =
 775                                 platform_get_irq_byname(pdev, irq_name);
 776 
 777                 if (fsl_qdma->queue_irq[i] < 0)
 778                         return fsl_qdma->queue_irq[i];
 779 
 780                 ret = devm_request_irq(&pdev->dev,
 781                                        fsl_qdma->queue_irq[i],
 782                                        fsl_qdma_queue_handler,
 783                                        0,
 784                                        "qDMA queue",
 785                                        fsl_qdma);
 786                 if (ret) {
 787                         dev_err(&pdev->dev,
 788                                 "Can't register qDMA queue IRQ.\n");
 789                         return  ret;
 790                 }
 791 
 792                 cpu = i % num_online_cpus();
 793                 ret = irq_set_affinity_hint(fsl_qdma->queue_irq[i],
 794                                             get_cpu_mask(cpu));
 795                 if (ret) {
 796                         dev_err(&pdev->dev,
 797                                 "Can't set cpu %d affinity to IRQ %d.\n",
 798                                 cpu,
 799                                 fsl_qdma->queue_irq[i]);
 800                         return  ret;
 801                 }
 802         }
 803 
 804         return 0;
 805 }
 806 
 807 static void fsl_qdma_irq_exit(struct platform_device *pdev,
 808                               struct fsl_qdma_engine *fsl_qdma)
 809 {
 810         int i;
 811 
 812         devm_free_irq(&pdev->dev, fsl_qdma->error_irq, fsl_qdma);
 813         for (i = 0; i < fsl_qdma->block_number; i++)
 814                 devm_free_irq(&pdev->dev, fsl_qdma->queue_irq[i], fsl_qdma);
 815 }
 816 
 817 static int fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 818 {
 819         u32 reg;
 820         int i, j, ret;
 821         struct fsl_qdma_queue *temp;
 822         void __iomem *status = fsl_qdma->status_base;
 823         void __iomem *block, *ctrl = fsl_qdma->ctrl_base;
 824         struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
 825 
 826         /* Try to halt the qDMA engine first. */
 827         ret = fsl_qdma_halt(fsl_qdma);
 828         if (ret) {
 829                 dev_err(fsl_qdma->dma_dev.dev, "DMA halt failed!");
 830                 return ret;
 831         }
 832 
 833         for (i = 0; i < fsl_qdma->block_number; i++) {
 834                 /*
 835                  * Clear the command queue interrupt detect register for
 836                  * all queues.
 837                  */
 838 
 839                 block = fsl_qdma->block_base +
 840                         FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, i);
 841                 qdma_writel(fsl_qdma, FSL_QDMA_BCQIDR_CLEAR,
 842                             block + FSL_QDMA_BCQIDR(0));
 843         }
 844 
 845         for (j = 0; j < fsl_qdma->block_number; j++) {
 846                 block = fsl_qdma->block_base +
 847                         FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
 848                 for (i = 0; i < fsl_qdma->n_queues; i++) {
 849                         temp = fsl_queue + i + (j * fsl_qdma->n_queues);
 850                         /*
 851                          * Initialize Command Queue registers to
 852                          * point to the first
 853                          * command descriptor in memory.
 854                          * Dequeue Pointer Address Registers
 855                          * Enqueue Pointer Address Registers
 856                          */
 857 
 858                         qdma_writel(fsl_qdma, temp->bus_addr,
 859                                     block + FSL_QDMA_BCQDPA_SADDR(i));
 860                         qdma_writel(fsl_qdma, temp->bus_addr,
 861                                     block + FSL_QDMA_BCQEPA_SADDR(i));
 862 
 863                         /* Initialize the queue mode. */
 864                         reg = FSL_QDMA_BCQMR_EN;
 865                         reg |= FSL_QDMA_BCQMR_CD_THLD(ilog2(temp->n_cq) - 4);
 866                         reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2(temp->n_cq) - 6);
 867                         qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BCQMR(i));
 868                 }
 869 
 870                 /*
 871                  * Workaround for erratum: ERR010812.
 872                  * We must enable XOFF to avoid the enqueue rejection occurs.
 873                  * Setting SQCCMR ENTER_WM to 0x20.
 874                  */
 875 
 876                 qdma_writel(fsl_qdma, FSL_QDMA_SQCCMR_ENTER_WM,
 877                             block + FSL_QDMA_SQCCMR);
 878 
 879                 /*
 880                  * Initialize status queue registers to point to the first
 881                  * command descriptor in memory.
 882                  * Dequeue Pointer Address Registers
 883                  * Enqueue Pointer Address Registers
 884                  */
 885 
 886                 qdma_writel(fsl_qdma, fsl_qdma->status[j]->bus_addr,
 887                             block + FSL_QDMA_SQEPAR);
 888                 qdma_writel(fsl_qdma, fsl_qdma->status[j]->bus_addr,
 889                             block + FSL_QDMA_SQDPAR);
 890                 /* Initialize status queue interrupt. */
 891                 qdma_writel(fsl_qdma, FSL_QDMA_BCQIER_CQTIE,
 892                             block + FSL_QDMA_BCQIER(0));
 893                 qdma_writel(fsl_qdma, FSL_QDMA_BSQICR_ICEN |
 894                                    FSL_QDMA_BSQICR_ICST(5) | 0x8000,
 895                                    block + FSL_QDMA_BSQICR);
 896                 qdma_writel(fsl_qdma, FSL_QDMA_CQIER_MEIE |
 897                                    FSL_QDMA_CQIER_TEIE,
 898                                    block + FSL_QDMA_CQIER);
 899 
 900                 /* Initialize the status queue mode. */
 901                 reg = FSL_QDMA_BSQMR_EN;
 902                 reg |= FSL_QDMA_BSQMR_CQ_SIZE(ilog2
 903                         (fsl_qdma->status[j]->n_cq) - 6);
 904 
 905                 qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
 906                 reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
 907         }
 908 
 909         /* Initialize controller interrupt register. */
 910         qdma_writel(fsl_qdma, FSL_QDMA_DEDR_CLEAR, status + FSL_QDMA_DEDR);
 911         qdma_writel(fsl_qdma, FSL_QDMA_DEIER_CLEAR, status + FSL_QDMA_DEIER);
 912 
 913         reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
 914         reg &= ~FSL_QDMA_DMR_DQD;
 915         qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
 916 
 917         return 0;
 918 }
 919 
 920 static struct dma_async_tx_descriptor *
 921 fsl_qdma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst,
 922                      dma_addr_t src, size_t len, unsigned long flags)
 923 {
 924         struct fsl_qdma_comp *fsl_comp;
 925         struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
 926 
 927         fsl_comp = fsl_qdma_request_enqueue_desc(fsl_chan);
 928 
 929         if (!fsl_comp)
 930                 return NULL;
 931 
 932         fsl_qdma_comp_fill_memcpy(fsl_comp, dst, src, len);
 933 
 934         return vchan_tx_prep(&fsl_chan->vchan, &fsl_comp->vdesc, flags);
 935 }
 936 
 937 static void fsl_qdma_enqueue_desc(struct fsl_qdma_chan *fsl_chan)
 938 {
 939         u32 reg;
 940         struct virt_dma_desc *vdesc;
 941         struct fsl_qdma_comp *fsl_comp;
 942         struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
 943         void __iomem *block = fsl_queue->block_base;
 944 
 945         reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQSR(fsl_queue->id));
 946         if (reg & (FSL_QDMA_BCQSR_QF | FSL_QDMA_BCQSR_XOFF))
 947                 return;
 948         vdesc = vchan_next_desc(&fsl_chan->vchan);
 949         if (!vdesc)
 950                 return;
 951         list_del(&vdesc->node);
 952         fsl_comp = to_fsl_qdma_comp(vdesc);
 953 
 954         memcpy(fsl_queue->virt_head++,
 955                fsl_comp->virt_addr, sizeof(struct fsl_qdma_format));
 956         if (fsl_queue->virt_head == fsl_queue->cq + fsl_queue->n_cq)
 957                 fsl_queue->virt_head = fsl_queue->cq;
 958 
 959         list_add_tail(&fsl_comp->list, &fsl_queue->comp_used);
 960         barrier();
 961         reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQMR(fsl_queue->id));
 962         reg |= FSL_QDMA_BCQMR_EI;
 963         qdma_writel(fsl_chan->qdma, reg, block + FSL_QDMA_BCQMR(fsl_queue->id));
 964         fsl_chan->status = DMA_IN_PROGRESS;
 965 }
 966 
 967 static void fsl_qdma_free_desc(struct virt_dma_desc *vdesc)
 968 {
 969         unsigned long flags;
 970         struct fsl_qdma_comp *fsl_comp;
 971         struct fsl_qdma_queue *fsl_queue;
 972 
 973         fsl_comp = to_fsl_qdma_comp(vdesc);
 974         fsl_queue = fsl_comp->qchan->queue;
 975 
 976         spin_lock_irqsave(&fsl_queue->queue_lock, flags);
 977         list_add_tail(&fsl_comp->list, &fsl_queue->comp_free);
 978         spin_unlock_irqrestore(&fsl_queue->queue_lock, flags);
 979 }
 980 
 981 static void fsl_qdma_issue_pending(struct dma_chan *chan)
 982 {
 983         unsigned long flags;
 984         struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
 985         struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
 986 
 987         spin_lock_irqsave(&fsl_queue->queue_lock, flags);
 988         spin_lock(&fsl_chan->vchan.lock);
 989         if (vchan_issue_pending(&fsl_chan->vchan))
 990                 fsl_qdma_enqueue_desc(fsl_chan);
 991         spin_unlock(&fsl_chan->vchan.lock);
 992         spin_unlock_irqrestore(&fsl_queue->queue_lock, flags);
 993 }
 994 
 995 static void fsl_qdma_synchronize(struct dma_chan *chan)
 996 {
 997         struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
 998 
 999         vchan_synchronize(&fsl_chan->vchan);
1000 }
1001 
1002 static int fsl_qdma_terminate_all(struct dma_chan *chan)
1003 {
1004         LIST_HEAD(head);
1005         unsigned long flags;
1006         struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
1007 
1008         spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
1009         vchan_get_all_descriptors(&fsl_chan->vchan, &head);
1010         spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
1011         vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
1012         return 0;
1013 }
1014 
1015 static int fsl_qdma_alloc_chan_resources(struct dma_chan *chan)
1016 {
1017         int ret;
1018         struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
1019         struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma;
1020         struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
1021 
1022         if (fsl_queue->comp_pool && fsl_queue->desc_pool)
1023                 return fsl_qdma->desc_allocated;
1024 
1025         INIT_LIST_HEAD(&fsl_queue->comp_free);
1026 
1027         /*
1028          * The dma pool for queue command buffer
1029          */
1030         fsl_queue->comp_pool =
1031         dma_pool_create("comp_pool",
1032                         chan->device->dev,
1033                         FSL_QDMA_COMMAND_BUFFER_SIZE,
1034                         64, 0);
1035         if (!fsl_queue->comp_pool)
1036                 return -ENOMEM;
1037 
1038         /*
1039          * The dma pool for Descriptor(SD/DD) buffer
1040          */
1041         fsl_queue->desc_pool =
1042         dma_pool_create("desc_pool",
1043                         chan->device->dev,
1044                         FSL_QDMA_DESCRIPTOR_BUFFER_SIZE,
1045                         32, 0);
1046         if (!fsl_queue->desc_pool)
1047                 goto err_desc_pool;
1048 
1049         ret = fsl_qdma_pre_request_enqueue_desc(fsl_queue);
1050         if (ret) {
1051                 dev_err(chan->device->dev,
1052                         "failed to alloc dma buffer for S/G descriptor\n");
1053                 goto err_mem;
1054         }
1055 
1056         fsl_qdma->desc_allocated++;
1057         return fsl_qdma->desc_allocated;
1058 
1059 err_mem:
1060         dma_pool_destroy(fsl_queue->desc_pool);
1061 err_desc_pool:
1062         dma_pool_destroy(fsl_queue->comp_pool);
1063         return -ENOMEM;
1064 }
1065 
1066 static int fsl_qdma_probe(struct platform_device *pdev)
1067 {
1068         int ret, i;
1069         int blk_num, blk_off;
1070         u32 len, chans, queues;
1071         struct resource *res;
1072         struct fsl_qdma_chan *fsl_chan;
1073         struct fsl_qdma_engine *fsl_qdma;
1074         struct device_node *np = pdev->dev.of_node;
1075 
1076         ret = of_property_read_u32(np, "dma-channels", &chans);
1077         if (ret) {
1078                 dev_err(&pdev->dev, "Can't get dma-channels.\n");
1079                 return ret;
1080         }
1081 
1082         ret = of_property_read_u32(np, "block-offset", &blk_off);
1083         if (ret) {
1084                 dev_err(&pdev->dev, "Can't get block-offset.\n");
1085                 return ret;
1086         }
1087 
1088         ret = of_property_read_u32(np, "block-number", &blk_num);
1089         if (ret) {
1090                 dev_err(&pdev->dev, "Can't get block-number.\n");
1091                 return ret;
1092         }
1093 
1094         blk_num = min_t(int, blk_num, num_online_cpus());
1095 
1096         len = sizeof(*fsl_qdma);
1097         fsl_qdma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
1098         if (!fsl_qdma)
1099                 return -ENOMEM;
1100 
1101         len = sizeof(*fsl_chan) * chans;
1102         fsl_qdma->chans = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
1103         if (!fsl_qdma->chans)
1104                 return -ENOMEM;
1105 
1106         len = sizeof(struct fsl_qdma_queue *) * blk_num;
1107         fsl_qdma->status = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
1108         if (!fsl_qdma->status)
1109                 return -ENOMEM;
1110 
1111         len = sizeof(int) * blk_num;
1112         fsl_qdma->queue_irq = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
1113         if (!fsl_qdma->queue_irq)
1114                 return -ENOMEM;
1115 
1116         ret = of_property_read_u32(np, "fsl,dma-queues", &queues);
1117         if (ret) {
1118                 dev_err(&pdev->dev, "Can't get queues.\n");
1119                 return ret;
1120         }
1121 
1122         fsl_qdma->desc_allocated = 0;
1123         fsl_qdma->n_chans = chans;
1124         fsl_qdma->n_queues = queues;
1125         fsl_qdma->block_number = blk_num;
1126         fsl_qdma->block_offset = blk_off;
1127 
1128         mutex_init(&fsl_qdma->fsl_qdma_mutex);
1129 
1130         for (i = 0; i < fsl_qdma->block_number; i++) {
1131                 fsl_qdma->status[i] = fsl_qdma_prep_status_queue(pdev);
1132                 if (!fsl_qdma->status[i])
1133                         return -ENOMEM;
1134         }
1135         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1136         fsl_qdma->ctrl_base = devm_ioremap_resource(&pdev->dev, res);
1137         if (IS_ERR(fsl_qdma->ctrl_base))
1138                 return PTR_ERR(fsl_qdma->ctrl_base);
1139 
1140         res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1141         fsl_qdma->status_base = devm_ioremap_resource(&pdev->dev, res);
1142         if (IS_ERR(fsl_qdma->status_base))
1143                 return PTR_ERR(fsl_qdma->status_base);
1144 
1145         res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
1146         fsl_qdma->block_base = devm_ioremap_resource(&pdev->dev, res);
1147         if (IS_ERR(fsl_qdma->block_base))
1148                 return PTR_ERR(fsl_qdma->block_base);
1149         fsl_qdma->queue = fsl_qdma_alloc_queue_resources(pdev, fsl_qdma);
1150         if (!fsl_qdma->queue)
1151                 return -ENOMEM;
1152 
1153         ret = fsl_qdma_irq_init(pdev, fsl_qdma);
1154         if (ret)
1155                 return ret;
1156 
1157         fsl_qdma->irq_base = platform_get_irq_byname(pdev, "qdma-queue0");
1158         if (fsl_qdma->irq_base < 0)
1159                 return fsl_qdma->irq_base;
1160 
1161         fsl_qdma->feature = of_property_read_bool(np, "big-endian");
1162         INIT_LIST_HEAD(&fsl_qdma->dma_dev.channels);
1163 
1164         for (i = 0; i < fsl_qdma->n_chans; i++) {
1165                 struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
1166 
1167                 fsl_chan->qdma = fsl_qdma;
1168                 fsl_chan->queue = fsl_qdma->queue + i % (fsl_qdma->n_queues *
1169                                                         fsl_qdma->block_number);
1170                 fsl_chan->vchan.desc_free = fsl_qdma_free_desc;
1171                 vchan_init(&fsl_chan->vchan, &fsl_qdma->dma_dev);
1172         }
1173 
1174         dma_cap_set(DMA_MEMCPY, fsl_qdma->dma_dev.cap_mask);
1175 
1176         fsl_qdma->dma_dev.dev = &pdev->dev;
1177         fsl_qdma->dma_dev.device_free_chan_resources =
1178                 fsl_qdma_free_chan_resources;
1179         fsl_qdma->dma_dev.device_alloc_chan_resources =
1180                 fsl_qdma_alloc_chan_resources;
1181         fsl_qdma->dma_dev.device_tx_status = dma_cookie_status;
1182         fsl_qdma->dma_dev.device_prep_dma_memcpy = fsl_qdma_prep_memcpy;
1183         fsl_qdma->dma_dev.device_issue_pending = fsl_qdma_issue_pending;
1184         fsl_qdma->dma_dev.device_synchronize = fsl_qdma_synchronize;
1185         fsl_qdma->dma_dev.device_terminate_all = fsl_qdma_terminate_all;
1186 
1187         dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
1188 
1189         platform_set_drvdata(pdev, fsl_qdma);
1190 
1191         ret = dma_async_device_register(&fsl_qdma->dma_dev);
1192         if (ret) {
1193                 dev_err(&pdev->dev,
1194                         "Can't register NXP Layerscape qDMA engine.\n");
1195                 return ret;
1196         }
1197 
1198         ret = fsl_qdma_reg_init(fsl_qdma);
1199         if (ret) {
1200                 dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n");
1201                 return ret;
1202         }
1203 
1204         return 0;
1205 }
1206 
1207 static void fsl_qdma_cleanup_vchan(struct dma_device *dmadev)
1208 {
1209         struct fsl_qdma_chan *chan, *_chan;
1210 
1211         list_for_each_entry_safe(chan, _chan,
1212                                  &dmadev->channels, vchan.chan.device_node) {
1213                 list_del(&chan->vchan.chan.device_node);
1214                 tasklet_kill(&chan->vchan.task);
1215         }
1216 }
1217 
1218 static int fsl_qdma_remove(struct platform_device *pdev)
1219 {
1220         int i;
1221         struct fsl_qdma_queue *status;
1222         struct device_node *np = pdev->dev.of_node;
1223         struct fsl_qdma_engine *fsl_qdma = platform_get_drvdata(pdev);
1224 
1225         fsl_qdma_irq_exit(pdev, fsl_qdma);
1226         fsl_qdma_cleanup_vchan(&fsl_qdma->dma_dev);
1227         of_dma_controller_free(np);
1228         dma_async_device_unregister(&fsl_qdma->dma_dev);
1229 
1230         for (i = 0; i < fsl_qdma->block_number; i++) {
1231                 status = fsl_qdma->status[i];
1232                 dma_free_coherent(&pdev->dev, sizeof(struct fsl_qdma_format) *
1233                                 status->n_cq, status->cq, status->bus_addr);
1234         }
1235         return 0;
1236 }
1237 
1238 static const struct of_device_id fsl_qdma_dt_ids[] = {
1239         { .compatible = "fsl,ls1021a-qdma", },
1240         { /* sentinel */ }
1241 };
1242 MODULE_DEVICE_TABLE(of, fsl_qdma_dt_ids);
1243 
1244 static struct platform_driver fsl_qdma_driver = {
1245         .driver         = {
1246                 .name   = "fsl-qdma",
1247                 .of_match_table = fsl_qdma_dt_ids,
1248         },
1249         .probe          = fsl_qdma_probe,
1250         .remove         = fsl_qdma_remove,
1251 };
1252 
1253 module_platform_driver(fsl_qdma_driver);
1254 
1255 MODULE_ALIAS("platform:fsl-qdma");
1256 MODULE_LICENSE("GPL v2");
1257 MODULE_DESCRIPTION("NXP Layerscape qDMA engine driver");

/* [<][>][^][v][top][bottom][index][help] */