root/drivers/spi/spi-fsl-cpm.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. fsl_spi_cpm_reinit_txrx
  2. fsl_spi_cpm_bufs_start
  3. fsl_spi_cpm_bufs
  4. fsl_spi_cpm_bufs_complete
  5. fsl_spi_cpm_irq
  6. fsl_spi_alloc_dummy_rx
  7. fsl_spi_free_dummy_rx
  8. fsl_spi_cpm_get_pram
  9. fsl_spi_cpm_init
  10. fsl_spi_cpm_free

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /*
   3  * Freescale SPI controller driver cpm functions.
   4  *
   5  * Maintainer: Kumar Gala
   6  *
   7  * Copyright (C) 2006 Polycom, Inc.
   8  * Copyright 2010 Freescale Semiconductor, Inc.
   9  *
  10  * CPM SPI and QE buffer descriptors mode support:
  11  * Copyright (c) 2009  MontaVista Software, Inc.
  12  * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
  13  */
  14 #include <asm/cpm.h>
  15 #include <soc/fsl/qe/qe.h>
  16 #include <linux/dma-mapping.h>
  17 #include <linux/fsl_devices.h>
  18 #include <linux/kernel.h>
  19 #include <linux/module.h>
  20 #include <linux/of_address.h>
  21 #include <linux/spi/spi.h>
  22 #include <linux/types.h>
  23 #include <linux/platform_device.h>
  24 
  25 #include "spi-fsl-cpm.h"
  26 #include "spi-fsl-lib.h"
  27 #include "spi-fsl-spi.h"
  28 
  29 /* CPM1 and CPM2 are mutually exclusive. */
  30 #ifdef CONFIG_CPM1
  31 #include <asm/cpm1.h>
  32 #define CPM_SPI_CMD mk_cr_cmd(CPM_CR_CH_SPI, 0)
  33 #else
  34 #include <asm/cpm2.h>
  35 #define CPM_SPI_CMD mk_cr_cmd(CPM_CR_SPI_PAGE, CPM_CR_SPI_SBLOCK, 0, 0)
  36 #endif
  37 
  38 #define SPIE_TXB        0x00000200      /* Last char is written to tx fifo */
  39 #define SPIE_RXB        0x00000100      /* Last char is written to rx buf */
  40 
  41 /* SPCOM register values */
  42 #define SPCOM_STR       (1 << 23)       /* Start transmit */
  43 
  44 #define SPI_PRAM_SIZE   0x100
  45 #define SPI_MRBLR       ((unsigned int)PAGE_SIZE)
  46 
  47 static void *fsl_dummy_rx;
  48 static DEFINE_MUTEX(fsl_dummy_rx_lock);
  49 static int fsl_dummy_rx_refcnt;
  50 
  51 void fsl_spi_cpm_reinit_txrx(struct mpc8xxx_spi *mspi)
  52 {
  53         if (mspi->flags & SPI_QE) {
  54                 qe_issue_cmd(QE_INIT_TX_RX, mspi->subblock,
  55                              QE_CR_PROTOCOL_UNSPECIFIED, 0);
  56         } else {
  57                 if (mspi->flags & SPI_CPM1) {
  58                         out_be32(&mspi->pram->rstate, 0);
  59                         out_be16(&mspi->pram->rbptr,
  60                                  in_be16(&mspi->pram->rbase));
  61                         out_be32(&mspi->pram->tstate, 0);
  62                         out_be16(&mspi->pram->tbptr,
  63                                  in_be16(&mspi->pram->tbase));
  64                 } else {
  65                         cpm_command(CPM_SPI_CMD, CPM_CR_INIT_TRX);
  66                 }
  67         }
  68 }
  69 EXPORT_SYMBOL_GPL(fsl_spi_cpm_reinit_txrx);
  70 
  71 static void fsl_spi_cpm_bufs_start(struct mpc8xxx_spi *mspi)
  72 {
  73         struct cpm_buf_desc __iomem *tx_bd = mspi->tx_bd;
  74         struct cpm_buf_desc __iomem *rx_bd = mspi->rx_bd;
  75         unsigned int xfer_len = min(mspi->count, SPI_MRBLR);
  76         unsigned int xfer_ofs;
  77         struct fsl_spi_reg *reg_base = mspi->reg_base;
  78 
  79         xfer_ofs = mspi->xfer_in_progress->len - mspi->count;
  80 
  81         if (mspi->rx_dma == mspi->dma_dummy_rx)
  82                 out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma);
  83         else
  84                 out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma + xfer_ofs);
  85         out_be16(&rx_bd->cbd_datlen, 0);
  86         out_be16(&rx_bd->cbd_sc, BD_SC_EMPTY | BD_SC_INTRPT | BD_SC_WRAP);
  87 
  88         if (mspi->tx_dma == mspi->dma_dummy_tx)
  89                 out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma);
  90         else
  91                 out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma + xfer_ofs);
  92         out_be16(&tx_bd->cbd_datlen, xfer_len);
  93         out_be16(&tx_bd->cbd_sc, BD_SC_READY | BD_SC_INTRPT | BD_SC_WRAP |
  94                                  BD_SC_LAST);
  95 
  96         /* start transfer */
  97         mpc8xxx_spi_write_reg(&reg_base->command, SPCOM_STR);
  98 }
  99 
 100 int fsl_spi_cpm_bufs(struct mpc8xxx_spi *mspi,
 101                      struct spi_transfer *t, bool is_dma_mapped)
 102 {
 103         struct device *dev = mspi->dev;
 104         struct fsl_spi_reg *reg_base = mspi->reg_base;
 105 
 106         if (is_dma_mapped) {
 107                 mspi->map_tx_dma = 0;
 108                 mspi->map_rx_dma = 0;
 109         } else {
 110                 mspi->map_tx_dma = 1;
 111                 mspi->map_rx_dma = 1;
 112         }
 113 
 114         if (!t->tx_buf) {
 115                 mspi->tx_dma = mspi->dma_dummy_tx;
 116                 mspi->map_tx_dma = 0;
 117         }
 118 
 119         if (!t->rx_buf) {
 120                 mspi->rx_dma = mspi->dma_dummy_rx;
 121                 mspi->map_rx_dma = 0;
 122         }
 123 
 124         if (mspi->map_tx_dma) {
 125                 void *nonconst_tx = (void *)mspi->tx; /* shut up gcc */
 126 
 127                 mspi->tx_dma = dma_map_single(dev, nonconst_tx, t->len,
 128                                               DMA_TO_DEVICE);
 129                 if (dma_mapping_error(dev, mspi->tx_dma)) {
 130                         dev_err(dev, "unable to map tx dma\n");
 131                         return -ENOMEM;
 132                 }
 133         } else if (t->tx_buf) {
 134                 mspi->tx_dma = t->tx_dma;
 135         }
 136 
 137         if (mspi->map_rx_dma) {
 138                 mspi->rx_dma = dma_map_single(dev, mspi->rx, t->len,
 139                                               DMA_FROM_DEVICE);
 140                 if (dma_mapping_error(dev, mspi->rx_dma)) {
 141                         dev_err(dev, "unable to map rx dma\n");
 142                         goto err_rx_dma;
 143                 }
 144         } else if (t->rx_buf) {
 145                 mspi->rx_dma = t->rx_dma;
 146         }
 147 
 148         /* enable rx ints */
 149         mpc8xxx_spi_write_reg(&reg_base->mask, SPIE_RXB);
 150 
 151         mspi->xfer_in_progress = t;
 152         mspi->count = t->len;
 153 
 154         /* start CPM transfers */
 155         fsl_spi_cpm_bufs_start(mspi);
 156 
 157         return 0;
 158 
 159 err_rx_dma:
 160         if (mspi->map_tx_dma)
 161                 dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE);
 162         return -ENOMEM;
 163 }
 164 EXPORT_SYMBOL_GPL(fsl_spi_cpm_bufs);
 165 
 166 void fsl_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi)
 167 {
 168         struct device *dev = mspi->dev;
 169         struct spi_transfer *t = mspi->xfer_in_progress;
 170 
 171         if (mspi->map_tx_dma)
 172                 dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE);
 173         if (mspi->map_rx_dma)
 174                 dma_unmap_single(dev, mspi->rx_dma, t->len, DMA_FROM_DEVICE);
 175         mspi->xfer_in_progress = NULL;
 176 }
 177 EXPORT_SYMBOL_GPL(fsl_spi_cpm_bufs_complete);
 178 
 179 void fsl_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events)
 180 {
 181         u16 len;
 182         struct fsl_spi_reg *reg_base = mspi->reg_base;
 183 
 184         dev_dbg(mspi->dev, "%s: bd datlen %d, count %d\n", __func__,
 185                 in_be16(&mspi->rx_bd->cbd_datlen), mspi->count);
 186 
 187         len = in_be16(&mspi->rx_bd->cbd_datlen);
 188         if (len > mspi->count) {
 189                 WARN_ON(1);
 190                 len = mspi->count;
 191         }
 192 
 193         /* Clear the events */
 194         mpc8xxx_spi_write_reg(&reg_base->event, events);
 195 
 196         mspi->count -= len;
 197         if (mspi->count)
 198                 fsl_spi_cpm_bufs_start(mspi);
 199         else
 200                 complete(&mspi->done);
 201 }
 202 EXPORT_SYMBOL_GPL(fsl_spi_cpm_irq);
 203 
 204 static void *fsl_spi_alloc_dummy_rx(void)
 205 {
 206         mutex_lock(&fsl_dummy_rx_lock);
 207 
 208         if (!fsl_dummy_rx)
 209                 fsl_dummy_rx = kmalloc(SPI_MRBLR, GFP_KERNEL);
 210         if (fsl_dummy_rx)
 211                 fsl_dummy_rx_refcnt++;
 212 
 213         mutex_unlock(&fsl_dummy_rx_lock);
 214 
 215         return fsl_dummy_rx;
 216 }
 217 
 218 static void fsl_spi_free_dummy_rx(void)
 219 {
 220         mutex_lock(&fsl_dummy_rx_lock);
 221 
 222         switch (fsl_dummy_rx_refcnt) {
 223         case 0:
 224                 WARN_ON(1);
 225                 break;
 226         case 1:
 227                 kfree(fsl_dummy_rx);
 228                 fsl_dummy_rx = NULL;
 229                 /* fall through */
 230         default:
 231                 fsl_dummy_rx_refcnt--;
 232                 break;
 233         }
 234 
 235         mutex_unlock(&fsl_dummy_rx_lock);
 236 }
 237 
 238 static unsigned long fsl_spi_cpm_get_pram(struct mpc8xxx_spi *mspi)
 239 {
 240         struct device *dev = mspi->dev;
 241         struct device_node *np = dev->of_node;
 242         const u32 *iprop;
 243         int size;
 244         void __iomem *spi_base;
 245         unsigned long pram_ofs = -ENOMEM;
 246 
 247         /* Can't use of_address_to_resource(), QE muram isn't at 0. */
 248         iprop = of_get_property(np, "reg", &size);
 249 
 250         /* QE with a fixed pram location? */
 251         if (mspi->flags & SPI_QE && iprop && size == sizeof(*iprop) * 4)
 252                 return cpm_muram_alloc_fixed(iprop[2], SPI_PRAM_SIZE);
 253 
 254         /* QE but with a dynamic pram location? */
 255         if (mspi->flags & SPI_QE) {
 256                 pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64);
 257                 qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, mspi->subblock,
 258                              QE_CR_PROTOCOL_UNSPECIFIED, pram_ofs);
 259                 return pram_ofs;
 260         }
 261 
 262         spi_base = of_iomap(np, 1);
 263         if (spi_base == NULL)
 264                 return -EINVAL;
 265 
 266         if (mspi->flags & SPI_CPM2) {
 267                 pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64);
 268                 out_be16(spi_base, pram_ofs);
 269         }
 270 
 271         iounmap(spi_base);
 272         return pram_ofs;
 273 }
 274 
 275 int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi)
 276 {
 277         struct device *dev = mspi->dev;
 278         struct device_node *np = dev->of_node;
 279         const u32 *iprop;
 280         int size;
 281         unsigned long bds_ofs;
 282 
 283         if (!(mspi->flags & SPI_CPM_MODE))
 284                 return 0;
 285 
 286         if (!fsl_spi_alloc_dummy_rx())
 287                 return -ENOMEM;
 288 
 289         if (mspi->flags & SPI_QE) {
 290                 iprop = of_get_property(np, "cell-index", &size);
 291                 if (iprop && size == sizeof(*iprop))
 292                         mspi->subblock = *iprop;
 293 
 294                 switch (mspi->subblock) {
 295                 default:
 296                         dev_warn(dev, "cell-index unspecified, assuming SPI1\n");
 297                         /* fall through */
 298                 case 0:
 299                         mspi->subblock = QE_CR_SUBBLOCK_SPI1;
 300                         break;
 301                 case 1:
 302                         mspi->subblock = QE_CR_SUBBLOCK_SPI2;
 303                         break;
 304                 }
 305         }
 306 
 307         if (mspi->flags & SPI_CPM1) {
 308                 void *pram;
 309 
 310                 pram = devm_platform_ioremap_resource(to_platform_device(dev),
 311                                                       1);
 312                 if (IS_ERR(pram))
 313                         mspi->pram = NULL;
 314                 else
 315                         mspi->pram = pram;
 316         } else {
 317                 unsigned long pram_ofs = fsl_spi_cpm_get_pram(mspi);
 318 
 319                 if (IS_ERR_VALUE(pram_ofs))
 320                         mspi->pram = NULL;
 321                 else
 322                         mspi->pram = cpm_muram_addr(pram_ofs);
 323         }
 324         if (mspi->pram == NULL) {
 325                 dev_err(dev, "can't allocate spi parameter ram\n");
 326                 goto err_pram;
 327         }
 328 
 329         bds_ofs = cpm_muram_alloc(sizeof(*mspi->tx_bd) +
 330                                   sizeof(*mspi->rx_bd), 8);
 331         if (IS_ERR_VALUE(bds_ofs)) {
 332                 dev_err(dev, "can't allocate bds\n");
 333                 goto err_bds;
 334         }
 335 
 336         mspi->dma_dummy_tx = dma_map_single(dev, empty_zero_page, PAGE_SIZE,
 337                                             DMA_TO_DEVICE);
 338         if (dma_mapping_error(dev, mspi->dma_dummy_tx)) {
 339                 dev_err(dev, "unable to map dummy tx buffer\n");
 340                 goto err_dummy_tx;
 341         }
 342 
 343         mspi->dma_dummy_rx = dma_map_single(dev, fsl_dummy_rx, SPI_MRBLR,
 344                                             DMA_FROM_DEVICE);
 345         if (dma_mapping_error(dev, mspi->dma_dummy_rx)) {
 346                 dev_err(dev, "unable to map dummy rx buffer\n");
 347                 goto err_dummy_rx;
 348         }
 349 
 350         mspi->tx_bd = cpm_muram_addr(bds_ofs);
 351         mspi->rx_bd = cpm_muram_addr(bds_ofs + sizeof(*mspi->tx_bd));
 352 
 353         /* Initialize parameter ram. */
 354         out_be16(&mspi->pram->tbase, cpm_muram_offset(mspi->tx_bd));
 355         out_be16(&mspi->pram->rbase, cpm_muram_offset(mspi->rx_bd));
 356         out_8(&mspi->pram->tfcr, CPMFCR_EB | CPMFCR_GBL);
 357         out_8(&mspi->pram->rfcr, CPMFCR_EB | CPMFCR_GBL);
 358         out_be16(&mspi->pram->mrblr, SPI_MRBLR);
 359         out_be32(&mspi->pram->rstate, 0);
 360         out_be32(&mspi->pram->rdp, 0);
 361         out_be16(&mspi->pram->rbptr, 0);
 362         out_be16(&mspi->pram->rbc, 0);
 363         out_be32(&mspi->pram->rxtmp, 0);
 364         out_be32(&mspi->pram->tstate, 0);
 365         out_be32(&mspi->pram->tdp, 0);
 366         out_be16(&mspi->pram->tbptr, 0);
 367         out_be16(&mspi->pram->tbc, 0);
 368         out_be32(&mspi->pram->txtmp, 0);
 369 
 370         return 0;
 371 
 372 err_dummy_rx:
 373         dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE);
 374 err_dummy_tx:
 375         cpm_muram_free(bds_ofs);
 376 err_bds:
 377         if (!(mspi->flags & SPI_CPM1))
 378                 cpm_muram_free(cpm_muram_offset(mspi->pram));
 379 err_pram:
 380         fsl_spi_free_dummy_rx();
 381         return -ENOMEM;
 382 }
 383 EXPORT_SYMBOL_GPL(fsl_spi_cpm_init);
 384 
 385 void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi)
 386 {
 387         struct device *dev = mspi->dev;
 388 
 389         if (!(mspi->flags & SPI_CPM_MODE))
 390                 return;
 391 
 392         dma_unmap_single(dev, mspi->dma_dummy_rx, SPI_MRBLR, DMA_FROM_DEVICE);
 393         dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE);
 394         cpm_muram_free(cpm_muram_offset(mspi->tx_bd));
 395         cpm_muram_free(cpm_muram_offset(mspi->pram));
 396         fsl_spi_free_dummy_rx();
 397 }
 398 EXPORT_SYMBOL_GPL(fsl_spi_cpm_free);
 399 
 400 MODULE_LICENSE("GPL");

/* [<][>][^][v][top][bottom][index][help] */