1/* 2 * Generic driver for the MPSC (UART mode) on Marvell parts (e.g., GT64240, 3 * GT64260, MV64340, MV64360, GT96100, ... ). 4 * 5 * Author: Mark A. Greer <mgreer@mvista.com> 6 * 7 * Based on an old MPSC driver that was in the linuxppc tree. It appears to 8 * have been created by Chris Zankel (formerly of MontaVista) but there 9 * is no proper Copyright so I'm not sure. Apparently, parts were also 10 * taken from PPCBoot (now U-Boot). Also based on drivers/serial/8250.c 11 * by Russell King. 12 * 13 * 2004 (c) MontaVista, Software, Inc. This file is licensed under 14 * the terms of the GNU General Public License version 2. This program 15 * is licensed "as is" without any warranty of any kind, whether express 16 * or implied. 17 */ 18/* 19 * The MPSC interface is much like a typical network controller's interface. 20 * That is, you set up separate rings of descriptors for transmitting and 21 * receiving data. There is also a pool of buffers with (one buffer per 22 * descriptor) that incoming data are dma'd into or outgoing data are dma'd 23 * out of. 24 * 25 * The MPSC requires two other controllers to be able to work. The Baud Rate 26 * Generator (BRG) provides a clock at programmable frequencies which determines 27 * the baud rate. The Serial DMA Controller (SDMA) takes incoming data from the 28 * MPSC and DMA's it into memory or DMA's outgoing data and passes it to the 29 * MPSC. It is actually the SDMA interrupt that the driver uses to keep the 30 * transmit and receive "engines" going (i.e., indicate data has been 31 * transmitted or received). 32 * 33 * NOTES: 34 * 35 * 1) Some chips have an erratum where several regs cannot be 36 * read. To work around that, we keep a local copy of those regs in 37 * 'mpsc_port_info'. 38 * 39 * 2) Some chips have an erratum where the ctlr will hang when the SDMA ctlr 40 * accesses system mem with coherency enabled. For that reason, the driver 41 * assumes that coherency for that ctlr has been disabled. This means 42 * that when in a cache coherent system, the driver has to manually manage 43 * the data cache on the areas that it touches because the dma_* macro are 44 * basically no-ops. 45 * 46 * 3) There is an erratum (on PPC) where you can't use the instruction to do 47 * a DMA_TO_DEVICE/cache clean so DMA_BIDIRECTIONAL/flushes are used in places 48 * where a DMA_TO_DEVICE/clean would have [otherwise] sufficed. 49 * 50 * 4) AFAICT, hardware flow control isn't supported by the controller --MAG. 51 */ 52 53 54#if defined(CONFIG_SERIAL_MPSC_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) 55#define SUPPORT_SYSRQ 56#endif 57 58#include <linux/module.h> 59#include <linux/moduleparam.h> 60#include <linux/tty.h> 61#include <linux/tty_flip.h> 62#include <linux/ioport.h> 63#include <linux/init.h> 64#include <linux/console.h> 65#include <linux/sysrq.h> 66#include <linux/serial.h> 67#include <linux/serial_core.h> 68#include <linux/delay.h> 69#include <linux/device.h> 70#include <linux/dma-mapping.h> 71#include <linux/mv643xx.h> 72#include <linux/platform_device.h> 73#include <linux/gfp.h> 74 75#include <asm/io.h> 76#include <asm/irq.h> 77 78#define MPSC_NUM_CTLRS 2 79 80/* 81 * Descriptors and buffers must be cache line aligned. 82 * Buffers lengths must be multiple of cache line size. 83 * Number of Tx & Rx descriptors must be powers of 2. 84 */ 85#define MPSC_RXR_ENTRIES 32 86#define MPSC_RXRE_SIZE dma_get_cache_alignment() 87#define MPSC_RXR_SIZE (MPSC_RXR_ENTRIES * MPSC_RXRE_SIZE) 88#define MPSC_RXBE_SIZE dma_get_cache_alignment() 89#define MPSC_RXB_SIZE (MPSC_RXR_ENTRIES * MPSC_RXBE_SIZE) 90 91#define MPSC_TXR_ENTRIES 32 92#define MPSC_TXRE_SIZE dma_get_cache_alignment() 93#define MPSC_TXR_SIZE (MPSC_TXR_ENTRIES * MPSC_TXRE_SIZE) 94#define MPSC_TXBE_SIZE dma_get_cache_alignment() 95#define MPSC_TXB_SIZE (MPSC_TXR_ENTRIES * MPSC_TXBE_SIZE) 96 97#define MPSC_DMA_ALLOC_SIZE (MPSC_RXR_SIZE + MPSC_RXB_SIZE + MPSC_TXR_SIZE \ 98 + MPSC_TXB_SIZE + dma_get_cache_alignment() /* for alignment */) 99 100/* Rx and Tx Ring entry descriptors -- assume entry size is <= cacheline size */ 101struct mpsc_rx_desc { 102 u16 bufsize; 103 u16 bytecnt; 104 u32 cmdstat; 105 u32 link; 106 u32 buf_ptr; 107} __attribute((packed)); 108 109struct mpsc_tx_desc { 110 u16 bytecnt; 111 u16 shadow; 112 u32 cmdstat; 113 u32 link; 114 u32 buf_ptr; 115} __attribute((packed)); 116 117/* 118 * Some regs that have the erratum that you can't read them are are shared 119 * between the two MPSC controllers. This struct contains those shared regs. 120 */ 121struct mpsc_shared_regs { 122 phys_addr_t mpsc_routing_base_p; 123 phys_addr_t sdma_intr_base_p; 124 125 void __iomem *mpsc_routing_base; 126 void __iomem *sdma_intr_base; 127 128 u32 MPSC_MRR_m; 129 u32 MPSC_RCRR_m; 130 u32 MPSC_TCRR_m; 131 u32 SDMA_INTR_CAUSE_m; 132 u32 SDMA_INTR_MASK_m; 133}; 134 135/* The main driver data structure */ 136struct mpsc_port_info { 137 struct uart_port port; /* Overlay uart_port structure */ 138 139 /* Internal driver state for this ctlr */ 140 u8 ready; 141 u8 rcv_data; 142 tcflag_t c_iflag; /* save termios->c_iflag */ 143 tcflag_t c_cflag; /* save termios->c_cflag */ 144 145 /* Info passed in from platform */ 146 u8 mirror_regs; /* Need to mirror regs? */ 147 u8 cache_mgmt; /* Need manual cache mgmt? */ 148 u8 brg_can_tune; /* BRG has baud tuning? */ 149 u32 brg_clk_src; 150 u16 mpsc_max_idle; 151 int default_baud; 152 int default_bits; 153 int default_parity; 154 int default_flow; 155 156 /* Physical addresses of various blocks of registers (from platform) */ 157 phys_addr_t mpsc_base_p; 158 phys_addr_t sdma_base_p; 159 phys_addr_t brg_base_p; 160 161 /* Virtual addresses of various blocks of registers (from platform) */ 162 void __iomem *mpsc_base; 163 void __iomem *sdma_base; 164 void __iomem *brg_base; 165 166 /* Descriptor ring and buffer allocations */ 167 void *dma_region; 168 dma_addr_t dma_region_p; 169 170 dma_addr_t rxr; /* Rx descriptor ring */ 171 dma_addr_t rxr_p; /* Phys addr of rxr */ 172 u8 *rxb; /* Rx Ring I/O buf */ 173 u8 *rxb_p; /* Phys addr of rxb */ 174 u32 rxr_posn; /* First desc w/ Rx data */ 175 176 dma_addr_t txr; /* Tx descriptor ring */ 177 dma_addr_t txr_p; /* Phys addr of txr */ 178 u8 *txb; /* Tx Ring I/O buf */ 179 u8 *txb_p; /* Phys addr of txb */ 180 int txr_head; /* Where new data goes */ 181 int txr_tail; /* Where sent data comes off */ 182 spinlock_t tx_lock; /* transmit lock */ 183 184 /* Mirrored values of regs we can't read (if 'mirror_regs' set) */ 185 u32 MPSC_MPCR_m; 186 u32 MPSC_CHR_1_m; 187 u32 MPSC_CHR_2_m; 188 u32 MPSC_CHR_10_m; 189 u32 BRG_BCR_m; 190 struct mpsc_shared_regs *shared_regs; 191}; 192 193/* Hooks to platform-specific code */ 194int mpsc_platform_register_driver(void); 195void mpsc_platform_unregister_driver(void); 196 197/* Hooks back in to mpsc common to be called by platform-specific code */ 198struct mpsc_port_info *mpsc_device_probe(int index); 199struct mpsc_port_info *mpsc_device_remove(int index); 200 201/* Main MPSC Configuration Register Offsets */ 202#define MPSC_MMCRL 0x0000 203#define MPSC_MMCRH 0x0004 204#define MPSC_MPCR 0x0008 205#define MPSC_CHR_1 0x000c 206#define MPSC_CHR_2 0x0010 207#define MPSC_CHR_3 0x0014 208#define MPSC_CHR_4 0x0018 209#define MPSC_CHR_5 0x001c 210#define MPSC_CHR_6 0x0020 211#define MPSC_CHR_7 0x0024 212#define MPSC_CHR_8 0x0028 213#define MPSC_CHR_9 0x002c 214#define MPSC_CHR_10 0x0030 215#define MPSC_CHR_11 0x0034 216 217#define MPSC_MPCR_FRZ (1 << 9) 218#define MPSC_MPCR_CL_5 0 219#define MPSC_MPCR_CL_6 1 220#define MPSC_MPCR_CL_7 2 221#define MPSC_MPCR_CL_8 3 222#define MPSC_MPCR_SBL_1 0 223#define MPSC_MPCR_SBL_2 1 224 225#define MPSC_CHR_2_TEV (1<<1) 226#define MPSC_CHR_2_TA (1<<7) 227#define MPSC_CHR_2_TTCS (1<<9) 228#define MPSC_CHR_2_REV (1<<17) 229#define MPSC_CHR_2_RA (1<<23) 230#define MPSC_CHR_2_CRD (1<<25) 231#define MPSC_CHR_2_EH (1<<31) 232#define MPSC_CHR_2_PAR_ODD 0 233#define MPSC_CHR_2_PAR_SPACE 1 234#define MPSC_CHR_2_PAR_EVEN 2 235#define MPSC_CHR_2_PAR_MARK 3 236 237/* MPSC Signal Routing */ 238#define MPSC_MRR 0x0000 239#define MPSC_RCRR 0x0004 240#define MPSC_TCRR 0x0008 241 242/* Serial DMA Controller Interface Registers */ 243#define SDMA_SDC 0x0000 244#define SDMA_SDCM 0x0008 245#define SDMA_RX_DESC 0x0800 246#define SDMA_RX_BUF_PTR 0x0808 247#define SDMA_SCRDP 0x0810 248#define SDMA_TX_DESC 0x0c00 249#define SDMA_SCTDP 0x0c10 250#define SDMA_SFTDP 0x0c14 251 252#define SDMA_DESC_CMDSTAT_PE (1<<0) 253#define SDMA_DESC_CMDSTAT_CDL (1<<1) 254#define SDMA_DESC_CMDSTAT_FR (1<<3) 255#define SDMA_DESC_CMDSTAT_OR (1<<6) 256#define SDMA_DESC_CMDSTAT_BR (1<<9) 257#define SDMA_DESC_CMDSTAT_MI (1<<10) 258#define SDMA_DESC_CMDSTAT_A (1<<11) 259#define SDMA_DESC_CMDSTAT_AM (1<<12) 260#define SDMA_DESC_CMDSTAT_CT (1<<13) 261#define SDMA_DESC_CMDSTAT_C (1<<14) 262#define SDMA_DESC_CMDSTAT_ES (1<<15) 263#define SDMA_DESC_CMDSTAT_L (1<<16) 264#define SDMA_DESC_CMDSTAT_F (1<<17) 265#define SDMA_DESC_CMDSTAT_P (1<<18) 266#define SDMA_DESC_CMDSTAT_EI (1<<23) 267#define SDMA_DESC_CMDSTAT_O (1<<31) 268 269#define SDMA_DESC_DFLT (SDMA_DESC_CMDSTAT_O \ 270 | SDMA_DESC_CMDSTAT_EI) 271 272#define SDMA_SDC_RFT (1<<0) 273#define SDMA_SDC_SFM (1<<1) 274#define SDMA_SDC_BLMR (1<<6) 275#define SDMA_SDC_BLMT (1<<7) 276#define SDMA_SDC_POVR (1<<8) 277#define SDMA_SDC_RIFB (1<<9) 278 279#define SDMA_SDCM_ERD (1<<7) 280#define SDMA_SDCM_AR (1<<15) 281#define SDMA_SDCM_STD (1<<16) 282#define SDMA_SDCM_TXD (1<<23) 283#define SDMA_SDCM_AT (1<<31) 284 285#define SDMA_0_CAUSE_RXBUF (1<<0) 286#define SDMA_0_CAUSE_RXERR (1<<1) 287#define SDMA_0_CAUSE_TXBUF (1<<2) 288#define SDMA_0_CAUSE_TXEND (1<<3) 289#define SDMA_1_CAUSE_RXBUF (1<<8) 290#define SDMA_1_CAUSE_RXERR (1<<9) 291#define SDMA_1_CAUSE_TXBUF (1<<10) 292#define SDMA_1_CAUSE_TXEND (1<<11) 293 294#define SDMA_CAUSE_RX_MASK (SDMA_0_CAUSE_RXBUF | SDMA_0_CAUSE_RXERR \ 295 | SDMA_1_CAUSE_RXBUF | SDMA_1_CAUSE_RXERR) 296#define SDMA_CAUSE_TX_MASK (SDMA_0_CAUSE_TXBUF | SDMA_0_CAUSE_TXEND \ 297 | SDMA_1_CAUSE_TXBUF | SDMA_1_CAUSE_TXEND) 298 299/* SDMA Interrupt registers */ 300#define SDMA_INTR_CAUSE 0x0000 301#define SDMA_INTR_MASK 0x0080 302 303/* Baud Rate Generator Interface Registers */ 304#define BRG_BCR 0x0000 305#define BRG_BTR 0x0004 306 307/* 308 * Define how this driver is known to the outside (we've been assigned a 309 * range on the "Low-density serial ports" major). 310 */ 311#define MPSC_MAJOR 204 312#define MPSC_MINOR_START 44 313#define MPSC_DRIVER_NAME "MPSC" 314#define MPSC_DEV_NAME "ttyMM" 315#define MPSC_VERSION "1.00" 316 317static struct mpsc_port_info mpsc_ports[MPSC_NUM_CTLRS]; 318static struct mpsc_shared_regs mpsc_shared_regs; 319static struct uart_driver mpsc_reg; 320 321static void mpsc_start_rx(struct mpsc_port_info *pi); 322static void mpsc_free_ring_mem(struct mpsc_port_info *pi); 323static void mpsc_release_port(struct uart_port *port); 324/* 325 ****************************************************************************** 326 * 327 * Baud Rate Generator Routines (BRG) 328 * 329 ****************************************************************************** 330 */ 331static void mpsc_brg_init(struct mpsc_port_info *pi, u32 clk_src) 332{ 333 u32 v; 334 335 v = (pi->mirror_regs) ? pi->BRG_BCR_m : readl(pi->brg_base + BRG_BCR); 336 v = (v & ~(0xf << 18)) | ((clk_src & 0xf) << 18); 337 338 if (pi->brg_can_tune) 339 v &= ~(1 << 25); 340 341 if (pi->mirror_regs) 342 pi->BRG_BCR_m = v; 343 writel(v, pi->brg_base + BRG_BCR); 344 345 writel(readl(pi->brg_base + BRG_BTR) & 0xffff0000, 346 pi->brg_base + BRG_BTR); 347} 348 349static void mpsc_brg_enable(struct mpsc_port_info *pi) 350{ 351 u32 v; 352 353 v = (pi->mirror_regs) ? pi->BRG_BCR_m : readl(pi->brg_base + BRG_BCR); 354 v |= (1 << 16); 355 356 if (pi->mirror_regs) 357 pi->BRG_BCR_m = v; 358 writel(v, pi->brg_base + BRG_BCR); 359} 360 361static void mpsc_brg_disable(struct mpsc_port_info *pi) 362{ 363 u32 v; 364 365 v = (pi->mirror_regs) ? pi->BRG_BCR_m : readl(pi->brg_base + BRG_BCR); 366 v &= ~(1 << 16); 367 368 if (pi->mirror_regs) 369 pi->BRG_BCR_m = v; 370 writel(v, pi->brg_base + BRG_BCR); 371} 372 373/* 374 * To set the baud, we adjust the CDV field in the BRG_BCR reg. 375 * From manual: Baud = clk / ((CDV+1)*2) ==> CDV = (clk / (baud*2)) - 1. 376 * However, the input clock is divided by 16 in the MPSC b/c of how 377 * 'MPSC_MMCRH' was set up so we have to divide the 'clk' used in our 378 * calculation by 16 to account for that. So the real calculation 379 * that accounts for the way the mpsc is set up is: 380 * CDV = (clk / (baud*2*16)) - 1 ==> CDV = (clk / (baud << 5)) - 1. 381 */ 382static void mpsc_set_baudrate(struct mpsc_port_info *pi, u32 baud) 383{ 384 u32 cdv = (pi->port.uartclk / (baud << 5)) - 1; 385 u32 v; 386 387 mpsc_brg_disable(pi); 388 v = (pi->mirror_regs) ? pi->BRG_BCR_m : readl(pi->brg_base + BRG_BCR); 389 v = (v & 0xffff0000) | (cdv & 0xffff); 390 391 if (pi->mirror_regs) 392 pi->BRG_BCR_m = v; 393 writel(v, pi->brg_base + BRG_BCR); 394 mpsc_brg_enable(pi); 395} 396 397/* 398 ****************************************************************************** 399 * 400 * Serial DMA Routines (SDMA) 401 * 402 ****************************************************************************** 403 */ 404 405static void mpsc_sdma_burstsize(struct mpsc_port_info *pi, u32 burst_size) 406{ 407 u32 v; 408 409 pr_debug("mpsc_sdma_burstsize[%d]: burst_size: %d\n", 410 pi->port.line, burst_size); 411 412 burst_size >>= 3; /* Divide by 8 b/c reg values are 8-byte chunks */ 413 414 if (burst_size < 2) 415 v = 0x0; /* 1 64-bit word */ 416 else if (burst_size < 4) 417 v = 0x1; /* 2 64-bit words */ 418 else if (burst_size < 8) 419 v = 0x2; /* 4 64-bit words */ 420 else 421 v = 0x3; /* 8 64-bit words */ 422 423 writel((readl(pi->sdma_base + SDMA_SDC) & (0x3 << 12)) | (v << 12), 424 pi->sdma_base + SDMA_SDC); 425} 426 427static void mpsc_sdma_init(struct mpsc_port_info *pi, u32 burst_size) 428{ 429 pr_debug("mpsc_sdma_init[%d]: burst_size: %d\n", pi->port.line, 430 burst_size); 431 432 writel((readl(pi->sdma_base + SDMA_SDC) & 0x3ff) | 0x03f, 433 pi->sdma_base + SDMA_SDC); 434 mpsc_sdma_burstsize(pi, burst_size); 435} 436 437static u32 mpsc_sdma_intr_mask(struct mpsc_port_info *pi, u32 mask) 438{ 439 u32 old, v; 440 441 pr_debug("mpsc_sdma_intr_mask[%d]: mask: 0x%x\n", pi->port.line, mask); 442 443 old = v = (pi->mirror_regs) ? pi->shared_regs->SDMA_INTR_MASK_m : 444 readl(pi->shared_regs->sdma_intr_base + SDMA_INTR_MASK); 445 446 mask &= 0xf; 447 if (pi->port.line) 448 mask <<= 8; 449 v &= ~mask; 450 451 if (pi->mirror_regs) 452 pi->shared_regs->SDMA_INTR_MASK_m = v; 453 writel(v, pi->shared_regs->sdma_intr_base + SDMA_INTR_MASK); 454 455 if (pi->port.line) 456 old >>= 8; 457 return old & 0xf; 458} 459 460static void mpsc_sdma_intr_unmask(struct mpsc_port_info *pi, u32 mask) 461{ 462 u32 v; 463 464 pr_debug("mpsc_sdma_intr_unmask[%d]: mask: 0x%x\n", pi->port.line,mask); 465 466 v = (pi->mirror_regs) ? pi->shared_regs->SDMA_INTR_MASK_m 467 : readl(pi->shared_regs->sdma_intr_base + SDMA_INTR_MASK); 468 469 mask &= 0xf; 470 if (pi->port.line) 471 mask <<= 8; 472 v |= mask; 473 474 if (pi->mirror_regs) 475 pi->shared_regs->SDMA_INTR_MASK_m = v; 476 writel(v, pi->shared_regs->sdma_intr_base + SDMA_INTR_MASK); 477} 478 479static void mpsc_sdma_intr_ack(struct mpsc_port_info *pi) 480{ 481 pr_debug("mpsc_sdma_intr_ack[%d]: Acknowledging IRQ\n", pi->port.line); 482 483 if (pi->mirror_regs) 484 pi->shared_regs->SDMA_INTR_CAUSE_m = 0; 485 writeb(0x00, pi->shared_regs->sdma_intr_base + SDMA_INTR_CAUSE 486 + pi->port.line); 487} 488 489static void mpsc_sdma_set_rx_ring(struct mpsc_port_info *pi, 490 struct mpsc_rx_desc *rxre_p) 491{ 492 pr_debug("mpsc_sdma_set_rx_ring[%d]: rxre_p: 0x%x\n", 493 pi->port.line, (u32)rxre_p); 494 495 writel((u32)rxre_p, pi->sdma_base + SDMA_SCRDP); 496} 497 498static void mpsc_sdma_set_tx_ring(struct mpsc_port_info *pi, 499 struct mpsc_tx_desc *txre_p) 500{ 501 writel((u32)txre_p, pi->sdma_base + SDMA_SFTDP); 502 writel((u32)txre_p, pi->sdma_base + SDMA_SCTDP); 503} 504 505static void mpsc_sdma_cmd(struct mpsc_port_info *pi, u32 val) 506{ 507 u32 v; 508 509 v = readl(pi->sdma_base + SDMA_SDCM); 510 if (val) 511 v |= val; 512 else 513 v = 0; 514 wmb(); 515 writel(v, pi->sdma_base + SDMA_SDCM); 516 wmb(); 517} 518 519static uint mpsc_sdma_tx_active(struct mpsc_port_info *pi) 520{ 521 return readl(pi->sdma_base + SDMA_SDCM) & SDMA_SDCM_TXD; 522} 523 524static void mpsc_sdma_start_tx(struct mpsc_port_info *pi) 525{ 526 struct mpsc_tx_desc *txre, *txre_p; 527 528 /* If tx isn't running & there's a desc ready to go, start it */ 529 if (!mpsc_sdma_tx_active(pi)) { 530 txre = (struct mpsc_tx_desc *)(pi->txr 531 + (pi->txr_tail * MPSC_TXRE_SIZE)); 532 dma_cache_sync(pi->port.dev, (void *)txre, MPSC_TXRE_SIZE, 533 DMA_FROM_DEVICE); 534#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 535 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 536 invalidate_dcache_range((ulong)txre, 537 (ulong)txre + MPSC_TXRE_SIZE); 538#endif 539 540 if (be32_to_cpu(txre->cmdstat) & SDMA_DESC_CMDSTAT_O) { 541 txre_p = (struct mpsc_tx_desc *) 542 (pi->txr_p + (pi->txr_tail * MPSC_TXRE_SIZE)); 543 544 mpsc_sdma_set_tx_ring(pi, txre_p); 545 mpsc_sdma_cmd(pi, SDMA_SDCM_STD | SDMA_SDCM_TXD); 546 } 547 } 548} 549 550static void mpsc_sdma_stop(struct mpsc_port_info *pi) 551{ 552 pr_debug("mpsc_sdma_stop[%d]: Stopping SDMA\n", pi->port.line); 553 554 /* Abort any SDMA transfers */ 555 mpsc_sdma_cmd(pi, 0); 556 mpsc_sdma_cmd(pi, SDMA_SDCM_AR | SDMA_SDCM_AT); 557 558 /* Clear the SDMA current and first TX and RX pointers */ 559 mpsc_sdma_set_tx_ring(pi, NULL); 560 mpsc_sdma_set_rx_ring(pi, NULL); 561 562 /* Disable interrupts */ 563 mpsc_sdma_intr_mask(pi, 0xf); 564 mpsc_sdma_intr_ack(pi); 565} 566 567/* 568 ****************************************************************************** 569 * 570 * Multi-Protocol Serial Controller Routines (MPSC) 571 * 572 ****************************************************************************** 573 */ 574 575static void mpsc_hw_init(struct mpsc_port_info *pi) 576{ 577 u32 v; 578 579 pr_debug("mpsc_hw_init[%d]: Initializing hardware\n", pi->port.line); 580 581 /* Set up clock routing */ 582 if (pi->mirror_regs) { 583 v = pi->shared_regs->MPSC_MRR_m; 584 v &= ~0x1c7; 585 pi->shared_regs->MPSC_MRR_m = v; 586 writel(v, pi->shared_regs->mpsc_routing_base + MPSC_MRR); 587 588 v = pi->shared_regs->MPSC_RCRR_m; 589 v = (v & ~0xf0f) | 0x100; 590 pi->shared_regs->MPSC_RCRR_m = v; 591 writel(v, pi->shared_regs->mpsc_routing_base + MPSC_RCRR); 592 593 v = pi->shared_regs->MPSC_TCRR_m; 594 v = (v & ~0xf0f) | 0x100; 595 pi->shared_regs->MPSC_TCRR_m = v; 596 writel(v, pi->shared_regs->mpsc_routing_base + MPSC_TCRR); 597 } else { 598 v = readl(pi->shared_regs->mpsc_routing_base + MPSC_MRR); 599 v &= ~0x1c7; 600 writel(v, pi->shared_regs->mpsc_routing_base + MPSC_MRR); 601 602 v = readl(pi->shared_regs->mpsc_routing_base + MPSC_RCRR); 603 v = (v & ~0xf0f) | 0x100; 604 writel(v, pi->shared_regs->mpsc_routing_base + MPSC_RCRR); 605 606 v = readl(pi->shared_regs->mpsc_routing_base + MPSC_TCRR); 607 v = (v & ~0xf0f) | 0x100; 608 writel(v, pi->shared_regs->mpsc_routing_base + MPSC_TCRR); 609 } 610 611 /* Put MPSC in UART mode & enabel Tx/Rx egines */ 612 writel(0x000004c4, pi->mpsc_base + MPSC_MMCRL); 613 614 /* No preamble, 16x divider, low-latency, */ 615 writel(0x04400400, pi->mpsc_base + MPSC_MMCRH); 616 mpsc_set_baudrate(pi, pi->default_baud); 617 618 if (pi->mirror_regs) { 619 pi->MPSC_CHR_1_m = 0; 620 pi->MPSC_CHR_2_m = 0; 621 } 622 writel(0, pi->mpsc_base + MPSC_CHR_1); 623 writel(0, pi->mpsc_base + MPSC_CHR_2); 624 writel(pi->mpsc_max_idle, pi->mpsc_base + MPSC_CHR_3); 625 writel(0, pi->mpsc_base + MPSC_CHR_4); 626 writel(0, pi->mpsc_base + MPSC_CHR_5); 627 writel(0, pi->mpsc_base + MPSC_CHR_6); 628 writel(0, pi->mpsc_base + MPSC_CHR_7); 629 writel(0, pi->mpsc_base + MPSC_CHR_8); 630 writel(0, pi->mpsc_base + MPSC_CHR_9); 631 writel(0, pi->mpsc_base + MPSC_CHR_10); 632} 633 634static void mpsc_enter_hunt(struct mpsc_port_info *pi) 635{ 636 pr_debug("mpsc_enter_hunt[%d]: Hunting...\n", pi->port.line); 637 638 if (pi->mirror_regs) { 639 writel(pi->MPSC_CHR_2_m | MPSC_CHR_2_EH, 640 pi->mpsc_base + MPSC_CHR_2); 641 /* Erratum prevents reading CHR_2 so just delay for a while */ 642 udelay(100); 643 } else { 644 writel(readl(pi->mpsc_base + MPSC_CHR_2) | MPSC_CHR_2_EH, 645 pi->mpsc_base + MPSC_CHR_2); 646 647 while (readl(pi->mpsc_base + MPSC_CHR_2) & MPSC_CHR_2_EH) 648 udelay(10); 649 } 650} 651 652static void mpsc_freeze(struct mpsc_port_info *pi) 653{ 654 u32 v; 655 656 pr_debug("mpsc_freeze[%d]: Freezing\n", pi->port.line); 657 658 v = (pi->mirror_regs) ? pi->MPSC_MPCR_m : 659 readl(pi->mpsc_base + MPSC_MPCR); 660 v |= MPSC_MPCR_FRZ; 661 662 if (pi->mirror_regs) 663 pi->MPSC_MPCR_m = v; 664 writel(v, pi->mpsc_base + MPSC_MPCR); 665} 666 667static void mpsc_unfreeze(struct mpsc_port_info *pi) 668{ 669 u32 v; 670 671 v = (pi->mirror_regs) ? pi->MPSC_MPCR_m : 672 readl(pi->mpsc_base + MPSC_MPCR); 673 v &= ~MPSC_MPCR_FRZ; 674 675 if (pi->mirror_regs) 676 pi->MPSC_MPCR_m = v; 677 writel(v, pi->mpsc_base + MPSC_MPCR); 678 679 pr_debug("mpsc_unfreeze[%d]: Unfrozen\n", pi->port.line); 680} 681 682static void mpsc_set_char_length(struct mpsc_port_info *pi, u32 len) 683{ 684 u32 v; 685 686 pr_debug("mpsc_set_char_length[%d]: char len: %d\n", pi->port.line,len); 687 688 v = (pi->mirror_regs) ? pi->MPSC_MPCR_m : 689 readl(pi->mpsc_base + MPSC_MPCR); 690 v = (v & ~(0x3 << 12)) | ((len & 0x3) << 12); 691 692 if (pi->mirror_regs) 693 pi->MPSC_MPCR_m = v; 694 writel(v, pi->mpsc_base + MPSC_MPCR); 695} 696 697static void mpsc_set_stop_bit_length(struct mpsc_port_info *pi, u32 len) 698{ 699 u32 v; 700 701 pr_debug("mpsc_set_stop_bit_length[%d]: stop bits: %d\n", 702 pi->port.line, len); 703 704 v = (pi->mirror_regs) ? pi->MPSC_MPCR_m : 705 readl(pi->mpsc_base + MPSC_MPCR); 706 707 v = (v & ~(1 << 14)) | ((len & 0x1) << 14); 708 709 if (pi->mirror_regs) 710 pi->MPSC_MPCR_m = v; 711 writel(v, pi->mpsc_base + MPSC_MPCR); 712} 713 714static void mpsc_set_parity(struct mpsc_port_info *pi, u32 p) 715{ 716 u32 v; 717 718 pr_debug("mpsc_set_parity[%d]: parity bits: 0x%x\n", pi->port.line, p); 719 720 v = (pi->mirror_regs) ? pi->MPSC_CHR_2_m : 721 readl(pi->mpsc_base + MPSC_CHR_2); 722 723 p &= 0x3; 724 v = (v & ~0xc000c) | (p << 18) | (p << 2); 725 726 if (pi->mirror_regs) 727 pi->MPSC_CHR_2_m = v; 728 writel(v, pi->mpsc_base + MPSC_CHR_2); 729} 730 731/* 732 ****************************************************************************** 733 * 734 * Driver Init Routines 735 * 736 ****************************************************************************** 737 */ 738 739static void mpsc_init_hw(struct mpsc_port_info *pi) 740{ 741 pr_debug("mpsc_init_hw[%d]: Initializing\n", pi->port.line); 742 743 mpsc_brg_init(pi, pi->brg_clk_src); 744 mpsc_brg_enable(pi); 745 mpsc_sdma_init(pi, dma_get_cache_alignment()); /* burst a cacheline */ 746 mpsc_sdma_stop(pi); 747 mpsc_hw_init(pi); 748} 749 750static int mpsc_alloc_ring_mem(struct mpsc_port_info *pi) 751{ 752 int rc = 0; 753 754 pr_debug("mpsc_alloc_ring_mem[%d]: Allocating ring mem\n", 755 pi->port.line); 756 757 if (!pi->dma_region) { 758 if (!dma_supported(pi->port.dev, 0xffffffff)) { 759 printk(KERN_ERR "MPSC: Inadequate DMA support\n"); 760 rc = -ENXIO; 761 } else if ((pi->dma_region = dma_alloc_noncoherent(pi->port.dev, 762 MPSC_DMA_ALLOC_SIZE, 763 &pi->dma_region_p, GFP_KERNEL)) 764 == NULL) { 765 printk(KERN_ERR "MPSC: Can't alloc Desc region\n"); 766 rc = -ENOMEM; 767 } 768 } 769 770 return rc; 771} 772 773static void mpsc_free_ring_mem(struct mpsc_port_info *pi) 774{ 775 pr_debug("mpsc_free_ring_mem[%d]: Freeing ring mem\n", pi->port.line); 776 777 if (pi->dma_region) { 778 dma_free_noncoherent(pi->port.dev, MPSC_DMA_ALLOC_SIZE, 779 pi->dma_region, pi->dma_region_p); 780 pi->dma_region = NULL; 781 pi->dma_region_p = (dma_addr_t)NULL; 782 } 783} 784 785static void mpsc_init_rings(struct mpsc_port_info *pi) 786{ 787 struct mpsc_rx_desc *rxre; 788 struct mpsc_tx_desc *txre; 789 dma_addr_t dp, dp_p; 790 u8 *bp, *bp_p; 791 int i; 792 793 pr_debug("mpsc_init_rings[%d]: Initializing rings\n", pi->port.line); 794 795 BUG_ON(pi->dma_region == NULL); 796 797 memset(pi->dma_region, 0, MPSC_DMA_ALLOC_SIZE); 798 799 /* 800 * Descriptors & buffers are multiples of cacheline size and must be 801 * cacheline aligned. 802 */ 803 dp = ALIGN((u32)pi->dma_region, dma_get_cache_alignment()); 804 dp_p = ALIGN((u32)pi->dma_region_p, dma_get_cache_alignment()); 805 806 /* 807 * Partition dma region into rx ring descriptor, rx buffers, 808 * tx ring descriptors, and tx buffers. 809 */ 810 pi->rxr = dp; 811 pi->rxr_p = dp_p; 812 dp += MPSC_RXR_SIZE; 813 dp_p += MPSC_RXR_SIZE; 814 815 pi->rxb = (u8 *)dp; 816 pi->rxb_p = (u8 *)dp_p; 817 dp += MPSC_RXB_SIZE; 818 dp_p += MPSC_RXB_SIZE; 819 820 pi->rxr_posn = 0; 821 822 pi->txr = dp; 823 pi->txr_p = dp_p; 824 dp += MPSC_TXR_SIZE; 825 dp_p += MPSC_TXR_SIZE; 826 827 pi->txb = (u8 *)dp; 828 pi->txb_p = (u8 *)dp_p; 829 830 pi->txr_head = 0; 831 pi->txr_tail = 0; 832 833 /* Init rx ring descriptors */ 834 dp = pi->rxr; 835 dp_p = pi->rxr_p; 836 bp = pi->rxb; 837 bp_p = pi->rxb_p; 838 839 for (i = 0; i < MPSC_RXR_ENTRIES; i++) { 840 rxre = (struct mpsc_rx_desc *)dp; 841 842 rxre->bufsize = cpu_to_be16(MPSC_RXBE_SIZE); 843 rxre->bytecnt = cpu_to_be16(0); 844 rxre->cmdstat = cpu_to_be32(SDMA_DESC_CMDSTAT_O 845 | SDMA_DESC_CMDSTAT_EI | SDMA_DESC_CMDSTAT_F 846 | SDMA_DESC_CMDSTAT_L); 847 rxre->link = cpu_to_be32(dp_p + MPSC_RXRE_SIZE); 848 rxre->buf_ptr = cpu_to_be32(bp_p); 849 850 dp += MPSC_RXRE_SIZE; 851 dp_p += MPSC_RXRE_SIZE; 852 bp += MPSC_RXBE_SIZE; 853 bp_p += MPSC_RXBE_SIZE; 854 } 855 rxre->link = cpu_to_be32(pi->rxr_p); /* Wrap last back to first */ 856 857 /* Init tx ring descriptors */ 858 dp = pi->txr; 859 dp_p = pi->txr_p; 860 bp = pi->txb; 861 bp_p = pi->txb_p; 862 863 for (i = 0; i < MPSC_TXR_ENTRIES; i++) { 864 txre = (struct mpsc_tx_desc *)dp; 865 866 txre->link = cpu_to_be32(dp_p + MPSC_TXRE_SIZE); 867 txre->buf_ptr = cpu_to_be32(bp_p); 868 869 dp += MPSC_TXRE_SIZE; 870 dp_p += MPSC_TXRE_SIZE; 871 bp += MPSC_TXBE_SIZE; 872 bp_p += MPSC_TXBE_SIZE; 873 } 874 txre->link = cpu_to_be32(pi->txr_p); /* Wrap last back to first */ 875 876 dma_cache_sync(pi->port.dev, (void *)pi->dma_region, 877 MPSC_DMA_ALLOC_SIZE, DMA_BIDIRECTIONAL); 878#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 879 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 880 flush_dcache_range((ulong)pi->dma_region, 881 (ulong)pi->dma_region 882 + MPSC_DMA_ALLOC_SIZE); 883#endif 884 885 return; 886} 887 888static void mpsc_uninit_rings(struct mpsc_port_info *pi) 889{ 890 pr_debug("mpsc_uninit_rings[%d]: Uninitializing rings\n",pi->port.line); 891 892 BUG_ON(pi->dma_region == NULL); 893 894 pi->rxr = 0; 895 pi->rxr_p = 0; 896 pi->rxb = NULL; 897 pi->rxb_p = NULL; 898 pi->rxr_posn = 0; 899 900 pi->txr = 0; 901 pi->txr_p = 0; 902 pi->txb = NULL; 903 pi->txb_p = NULL; 904 pi->txr_head = 0; 905 pi->txr_tail = 0; 906} 907 908static int mpsc_make_ready(struct mpsc_port_info *pi) 909{ 910 int rc; 911 912 pr_debug("mpsc_make_ready[%d]: Making cltr ready\n", pi->port.line); 913 914 if (!pi->ready) { 915 mpsc_init_hw(pi); 916 if ((rc = mpsc_alloc_ring_mem(pi))) 917 return rc; 918 mpsc_init_rings(pi); 919 pi->ready = 1; 920 } 921 922 return 0; 923} 924 925#ifdef CONFIG_CONSOLE_POLL 926static int serial_polled; 927#endif 928 929/* 930 ****************************************************************************** 931 * 932 * Interrupt Handling Routines 933 * 934 ****************************************************************************** 935 */ 936 937static int mpsc_rx_intr(struct mpsc_port_info *pi, unsigned long *flags) 938{ 939 struct mpsc_rx_desc *rxre; 940 struct tty_port *port = &pi->port.state->port; 941 u32 cmdstat, bytes_in, i; 942 int rc = 0; 943 u8 *bp; 944 char flag = TTY_NORMAL; 945 946 pr_debug("mpsc_rx_intr[%d]: Handling Rx intr\n", pi->port.line); 947 948 rxre = (struct mpsc_rx_desc *)(pi->rxr + (pi->rxr_posn*MPSC_RXRE_SIZE)); 949 950 dma_cache_sync(pi->port.dev, (void *)rxre, MPSC_RXRE_SIZE, 951 DMA_FROM_DEVICE); 952#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 953 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 954 invalidate_dcache_range((ulong)rxre, 955 (ulong)rxre + MPSC_RXRE_SIZE); 956#endif 957 958 /* 959 * Loop through Rx descriptors handling ones that have been completed. 960 */ 961 while (!((cmdstat = be32_to_cpu(rxre->cmdstat)) 962 & SDMA_DESC_CMDSTAT_O)) { 963 bytes_in = be16_to_cpu(rxre->bytecnt); 964#ifdef CONFIG_CONSOLE_POLL 965 if (unlikely(serial_polled)) { 966 serial_polled = 0; 967 return 0; 968 } 969#endif 970 /* Following use of tty struct directly is deprecated */ 971 if (tty_buffer_request_room(port, bytes_in) < bytes_in) { 972 if (port->low_latency) { 973 spin_unlock_irqrestore(&pi->port.lock, *flags); 974 tty_flip_buffer_push(port); 975 spin_lock_irqsave(&pi->port.lock, *flags); 976 } 977 /* 978 * If this failed then we will throw away the bytes 979 * but must do so to clear interrupts. 980 */ 981 } 982 983 bp = pi->rxb + (pi->rxr_posn * MPSC_RXBE_SIZE); 984 dma_cache_sync(pi->port.dev, (void *)bp, MPSC_RXBE_SIZE, 985 DMA_FROM_DEVICE); 986#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 987 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 988 invalidate_dcache_range((ulong)bp, 989 (ulong)bp + MPSC_RXBE_SIZE); 990#endif 991 992 /* 993 * Other than for parity error, the manual provides little 994 * info on what data will be in a frame flagged by any of 995 * these errors. For parity error, it is the last byte in 996 * the buffer that had the error. As for the rest, I guess 997 * we'll assume there is no data in the buffer. 998 * If there is...it gets lost. 999 */ 1000 if (unlikely(cmdstat & (SDMA_DESC_CMDSTAT_BR 1001 | SDMA_DESC_CMDSTAT_FR 1002 | SDMA_DESC_CMDSTAT_OR))) { 1003 1004 pi->port.icount.rx++; 1005 1006 if (cmdstat & SDMA_DESC_CMDSTAT_BR) { /* Break */ 1007 pi->port.icount.brk++; 1008 1009 if (uart_handle_break(&pi->port)) 1010 goto next_frame; 1011 } else if (cmdstat & SDMA_DESC_CMDSTAT_FR) { 1012 pi->port.icount.frame++; 1013 } else if (cmdstat & SDMA_DESC_CMDSTAT_OR) { 1014 pi->port.icount.overrun++; 1015 } 1016 1017 cmdstat &= pi->port.read_status_mask; 1018 1019 if (cmdstat & SDMA_DESC_CMDSTAT_BR) 1020 flag = TTY_BREAK; 1021 else if (cmdstat & SDMA_DESC_CMDSTAT_FR) 1022 flag = TTY_FRAME; 1023 else if (cmdstat & SDMA_DESC_CMDSTAT_OR) 1024 flag = TTY_OVERRUN; 1025 else if (cmdstat & SDMA_DESC_CMDSTAT_PE) 1026 flag = TTY_PARITY; 1027 } 1028 1029 if (uart_handle_sysrq_char(&pi->port, *bp)) { 1030 bp++; 1031 bytes_in--; 1032#ifdef CONFIG_CONSOLE_POLL 1033 if (unlikely(serial_polled)) { 1034 serial_polled = 0; 1035 return 0; 1036 } 1037#endif 1038 goto next_frame; 1039 } 1040 1041 if ((unlikely(cmdstat & (SDMA_DESC_CMDSTAT_BR 1042 | SDMA_DESC_CMDSTAT_FR 1043 | SDMA_DESC_CMDSTAT_OR))) 1044 && !(cmdstat & pi->port.ignore_status_mask)) { 1045 tty_insert_flip_char(port, *bp, flag); 1046 } else { 1047 for (i=0; i<bytes_in; i++) 1048 tty_insert_flip_char(port, *bp++, TTY_NORMAL); 1049 1050 pi->port.icount.rx += bytes_in; 1051 } 1052 1053next_frame: 1054 rxre->bytecnt = cpu_to_be16(0); 1055 wmb(); 1056 rxre->cmdstat = cpu_to_be32(SDMA_DESC_CMDSTAT_O 1057 | SDMA_DESC_CMDSTAT_EI | SDMA_DESC_CMDSTAT_F 1058 | SDMA_DESC_CMDSTAT_L); 1059 wmb(); 1060 dma_cache_sync(pi->port.dev, (void *)rxre, MPSC_RXRE_SIZE, 1061 DMA_BIDIRECTIONAL); 1062#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 1063 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 1064 flush_dcache_range((ulong)rxre, 1065 (ulong)rxre + MPSC_RXRE_SIZE); 1066#endif 1067 1068 /* Advance to next descriptor */ 1069 pi->rxr_posn = (pi->rxr_posn + 1) & (MPSC_RXR_ENTRIES - 1); 1070 rxre = (struct mpsc_rx_desc *) 1071 (pi->rxr + (pi->rxr_posn * MPSC_RXRE_SIZE)); 1072 dma_cache_sync(pi->port.dev, (void *)rxre, MPSC_RXRE_SIZE, 1073 DMA_FROM_DEVICE); 1074#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 1075 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 1076 invalidate_dcache_range((ulong)rxre, 1077 (ulong)rxre + MPSC_RXRE_SIZE); 1078#endif 1079 rc = 1; 1080 } 1081 1082 /* Restart rx engine, if its stopped */ 1083 if ((readl(pi->sdma_base + SDMA_SDCM) & SDMA_SDCM_ERD) == 0) 1084 mpsc_start_rx(pi); 1085 1086 spin_unlock_irqrestore(&pi->port.lock, *flags); 1087 tty_flip_buffer_push(port); 1088 spin_lock_irqsave(&pi->port.lock, *flags); 1089 return rc; 1090} 1091 1092static void mpsc_setup_tx_desc(struct mpsc_port_info *pi, u32 count, u32 intr) 1093{ 1094 struct mpsc_tx_desc *txre; 1095 1096 txre = (struct mpsc_tx_desc *)(pi->txr 1097 + (pi->txr_head * MPSC_TXRE_SIZE)); 1098 1099 txre->bytecnt = cpu_to_be16(count); 1100 txre->shadow = txre->bytecnt; 1101 wmb(); /* ensure cmdstat is last field updated */ 1102 txre->cmdstat = cpu_to_be32(SDMA_DESC_CMDSTAT_O | SDMA_DESC_CMDSTAT_F 1103 | SDMA_DESC_CMDSTAT_L 1104 | ((intr) ? SDMA_DESC_CMDSTAT_EI : 0)); 1105 wmb(); 1106 dma_cache_sync(pi->port.dev, (void *)txre, MPSC_TXRE_SIZE, 1107 DMA_BIDIRECTIONAL); 1108#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 1109 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 1110 flush_dcache_range((ulong)txre, 1111 (ulong)txre + MPSC_TXRE_SIZE); 1112#endif 1113} 1114 1115static void mpsc_copy_tx_data(struct mpsc_port_info *pi) 1116{ 1117 struct circ_buf *xmit = &pi->port.state->xmit; 1118 u8 *bp; 1119 u32 i; 1120 1121 /* Make sure the desc ring isn't full */ 1122 while (CIRC_CNT(pi->txr_head, pi->txr_tail, MPSC_TXR_ENTRIES) 1123 < (MPSC_TXR_ENTRIES - 1)) { 1124 if (pi->port.x_char) { 1125 /* 1126 * Ideally, we should use the TCS field in 1127 * CHR_1 to put the x_char out immediately but 1128 * errata prevents us from being able to read 1129 * CHR_2 to know that its safe to write to 1130 * CHR_1. Instead, just put it in-band with 1131 * all the other Tx data. 1132 */ 1133 bp = pi->txb + (pi->txr_head * MPSC_TXBE_SIZE); 1134 *bp = pi->port.x_char; 1135 pi->port.x_char = 0; 1136 i = 1; 1137 } else if (!uart_circ_empty(xmit) 1138 && !uart_tx_stopped(&pi->port)) { 1139 i = min((u32)MPSC_TXBE_SIZE, 1140 (u32)uart_circ_chars_pending(xmit)); 1141 i = min(i, (u32)CIRC_CNT_TO_END(xmit->head, xmit->tail, 1142 UART_XMIT_SIZE)); 1143 bp = pi->txb + (pi->txr_head * MPSC_TXBE_SIZE); 1144 memcpy(bp, &xmit->buf[xmit->tail], i); 1145 xmit->tail = (xmit->tail + i) & (UART_XMIT_SIZE - 1); 1146 1147 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 1148 uart_write_wakeup(&pi->port); 1149 } else { /* All tx data copied into ring bufs */ 1150 return; 1151 } 1152 1153 dma_cache_sync(pi->port.dev, (void *)bp, MPSC_TXBE_SIZE, 1154 DMA_BIDIRECTIONAL); 1155#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 1156 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 1157 flush_dcache_range((ulong)bp, 1158 (ulong)bp + MPSC_TXBE_SIZE); 1159#endif 1160 mpsc_setup_tx_desc(pi, i, 1); 1161 1162 /* Advance to next descriptor */ 1163 pi->txr_head = (pi->txr_head + 1) & (MPSC_TXR_ENTRIES - 1); 1164 } 1165} 1166 1167static int mpsc_tx_intr(struct mpsc_port_info *pi) 1168{ 1169 struct mpsc_tx_desc *txre; 1170 int rc = 0; 1171 unsigned long iflags; 1172 1173 spin_lock_irqsave(&pi->tx_lock, iflags); 1174 1175 if (!mpsc_sdma_tx_active(pi)) { 1176 txre = (struct mpsc_tx_desc *)(pi->txr 1177 + (pi->txr_tail * MPSC_TXRE_SIZE)); 1178 1179 dma_cache_sync(pi->port.dev, (void *)txre, MPSC_TXRE_SIZE, 1180 DMA_FROM_DEVICE); 1181#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 1182 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 1183 invalidate_dcache_range((ulong)txre, 1184 (ulong)txre + MPSC_TXRE_SIZE); 1185#endif 1186 1187 while (!(be32_to_cpu(txre->cmdstat) & SDMA_DESC_CMDSTAT_O)) { 1188 rc = 1; 1189 pi->port.icount.tx += be16_to_cpu(txre->bytecnt); 1190 pi->txr_tail = (pi->txr_tail+1) & (MPSC_TXR_ENTRIES-1); 1191 1192 /* If no more data to tx, fall out of loop */ 1193 if (pi->txr_head == pi->txr_tail) 1194 break; 1195 1196 txre = (struct mpsc_tx_desc *)(pi->txr 1197 + (pi->txr_tail * MPSC_TXRE_SIZE)); 1198 dma_cache_sync(pi->port.dev, (void *)txre, 1199 MPSC_TXRE_SIZE, DMA_FROM_DEVICE); 1200#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 1201 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 1202 invalidate_dcache_range((ulong)txre, 1203 (ulong)txre + MPSC_TXRE_SIZE); 1204#endif 1205 } 1206 1207 mpsc_copy_tx_data(pi); 1208 mpsc_sdma_start_tx(pi); /* start next desc if ready */ 1209 } 1210 1211 spin_unlock_irqrestore(&pi->tx_lock, iflags); 1212 return rc; 1213} 1214 1215/* 1216 * This is the driver's interrupt handler. To avoid a race, we first clear 1217 * the interrupt, then handle any completed Rx/Tx descriptors. When done 1218 * handling those descriptors, we restart the Rx/Tx engines if they're stopped. 1219 */ 1220static irqreturn_t mpsc_sdma_intr(int irq, void *dev_id) 1221{ 1222 struct mpsc_port_info *pi = dev_id; 1223 ulong iflags; 1224 int rc = IRQ_NONE; 1225 1226 pr_debug("mpsc_sdma_intr[%d]: SDMA Interrupt Received\n",pi->port.line); 1227 1228 spin_lock_irqsave(&pi->port.lock, iflags); 1229 mpsc_sdma_intr_ack(pi); 1230 if (mpsc_rx_intr(pi, &iflags)) 1231 rc = IRQ_HANDLED; 1232 if (mpsc_tx_intr(pi)) 1233 rc = IRQ_HANDLED; 1234 spin_unlock_irqrestore(&pi->port.lock, iflags); 1235 1236 pr_debug("mpsc_sdma_intr[%d]: SDMA Interrupt Handled\n", pi->port.line); 1237 return rc; 1238} 1239 1240/* 1241 ****************************************************************************** 1242 * 1243 * serial_core.c Interface routines 1244 * 1245 ****************************************************************************** 1246 */ 1247static uint mpsc_tx_empty(struct uart_port *port) 1248{ 1249 struct mpsc_port_info *pi = 1250 container_of(port, struct mpsc_port_info, port); 1251 ulong iflags; 1252 uint rc; 1253 1254 spin_lock_irqsave(&pi->port.lock, iflags); 1255 rc = mpsc_sdma_tx_active(pi) ? 0 : TIOCSER_TEMT; 1256 spin_unlock_irqrestore(&pi->port.lock, iflags); 1257 1258 return rc; 1259} 1260 1261static void mpsc_set_mctrl(struct uart_port *port, uint mctrl) 1262{ 1263 /* Have no way to set modem control lines AFAICT */ 1264} 1265 1266static uint mpsc_get_mctrl(struct uart_port *port) 1267{ 1268 struct mpsc_port_info *pi = 1269 container_of(port, struct mpsc_port_info, port); 1270 u32 mflags, status; 1271 1272 status = (pi->mirror_regs) ? pi->MPSC_CHR_10_m 1273 : readl(pi->mpsc_base + MPSC_CHR_10); 1274 1275 mflags = 0; 1276 if (status & 0x1) 1277 mflags |= TIOCM_CTS; 1278 if (status & 0x2) 1279 mflags |= TIOCM_CAR; 1280 1281 return mflags | TIOCM_DSR; /* No way to tell if DSR asserted */ 1282} 1283 1284static void mpsc_stop_tx(struct uart_port *port) 1285{ 1286 struct mpsc_port_info *pi = 1287 container_of(port, struct mpsc_port_info, port); 1288 1289 pr_debug("mpsc_stop_tx[%d]\n", port->line); 1290 1291 mpsc_freeze(pi); 1292} 1293 1294static void mpsc_start_tx(struct uart_port *port) 1295{ 1296 struct mpsc_port_info *pi = 1297 container_of(port, struct mpsc_port_info, port); 1298 unsigned long iflags; 1299 1300 spin_lock_irqsave(&pi->tx_lock, iflags); 1301 1302 mpsc_unfreeze(pi); 1303 mpsc_copy_tx_data(pi); 1304 mpsc_sdma_start_tx(pi); 1305 1306 spin_unlock_irqrestore(&pi->tx_lock, iflags); 1307 1308 pr_debug("mpsc_start_tx[%d]\n", port->line); 1309} 1310 1311static void mpsc_start_rx(struct mpsc_port_info *pi) 1312{ 1313 pr_debug("mpsc_start_rx[%d]: Starting...\n", pi->port.line); 1314 1315 if (pi->rcv_data) { 1316 mpsc_enter_hunt(pi); 1317 mpsc_sdma_cmd(pi, SDMA_SDCM_ERD); 1318 } 1319} 1320 1321static void mpsc_stop_rx(struct uart_port *port) 1322{ 1323 struct mpsc_port_info *pi = 1324 container_of(port, struct mpsc_port_info, port); 1325 1326 pr_debug("mpsc_stop_rx[%d]: Stopping...\n", port->line); 1327 1328 if (pi->mirror_regs) { 1329 writel(pi->MPSC_CHR_2_m | MPSC_CHR_2_RA, 1330 pi->mpsc_base + MPSC_CHR_2); 1331 /* Erratum prevents reading CHR_2 so just delay for a while */ 1332 udelay(100); 1333 } else { 1334 writel(readl(pi->mpsc_base + MPSC_CHR_2) | MPSC_CHR_2_RA, 1335 pi->mpsc_base + MPSC_CHR_2); 1336 1337 while (readl(pi->mpsc_base + MPSC_CHR_2) & MPSC_CHR_2_RA) 1338 udelay(10); 1339 } 1340 1341 mpsc_sdma_cmd(pi, SDMA_SDCM_AR); 1342} 1343 1344static void mpsc_break_ctl(struct uart_port *port, int ctl) 1345{ 1346 struct mpsc_port_info *pi = 1347 container_of(port, struct mpsc_port_info, port); 1348 ulong flags; 1349 u32 v; 1350 1351 v = ctl ? 0x00ff0000 : 0; 1352 1353 spin_lock_irqsave(&pi->port.lock, flags); 1354 if (pi->mirror_regs) 1355 pi->MPSC_CHR_1_m = v; 1356 writel(v, pi->mpsc_base + MPSC_CHR_1); 1357 spin_unlock_irqrestore(&pi->port.lock, flags); 1358} 1359 1360static int mpsc_startup(struct uart_port *port) 1361{ 1362 struct mpsc_port_info *pi = 1363 container_of(port, struct mpsc_port_info, port); 1364 u32 flag = 0; 1365 int rc; 1366 1367 pr_debug("mpsc_startup[%d]: Starting up MPSC, irq: %d\n", 1368 port->line, pi->port.irq); 1369 1370 if ((rc = mpsc_make_ready(pi)) == 0) { 1371 /* Setup IRQ handler */ 1372 mpsc_sdma_intr_ack(pi); 1373 1374 /* If irq's are shared, need to set flag */ 1375 if (mpsc_ports[0].port.irq == mpsc_ports[1].port.irq) 1376 flag = IRQF_SHARED; 1377 1378 if (request_irq(pi->port.irq, mpsc_sdma_intr, flag, 1379 "mpsc-sdma", pi)) 1380 printk(KERN_ERR "MPSC: Can't get SDMA IRQ %d\n", 1381 pi->port.irq); 1382 1383 mpsc_sdma_intr_unmask(pi, 0xf); 1384 mpsc_sdma_set_rx_ring(pi, (struct mpsc_rx_desc *)(pi->rxr_p 1385 + (pi->rxr_posn * MPSC_RXRE_SIZE))); 1386 } 1387 1388 return rc; 1389} 1390 1391static void mpsc_shutdown(struct uart_port *port) 1392{ 1393 struct mpsc_port_info *pi = 1394 container_of(port, struct mpsc_port_info, port); 1395 1396 pr_debug("mpsc_shutdown[%d]: Shutting down MPSC\n", port->line); 1397 1398 mpsc_sdma_stop(pi); 1399 free_irq(pi->port.irq, pi); 1400} 1401 1402static void mpsc_set_termios(struct uart_port *port, struct ktermios *termios, 1403 struct ktermios *old) 1404{ 1405 struct mpsc_port_info *pi = 1406 container_of(port, struct mpsc_port_info, port); 1407 u32 baud; 1408 ulong flags; 1409 u32 chr_bits, stop_bits, par; 1410 1411 pi->c_iflag = termios->c_iflag; 1412 pi->c_cflag = termios->c_cflag; 1413 1414 switch (termios->c_cflag & CSIZE) { 1415 case CS5: 1416 chr_bits = MPSC_MPCR_CL_5; 1417 break; 1418 case CS6: 1419 chr_bits = MPSC_MPCR_CL_6; 1420 break; 1421 case CS7: 1422 chr_bits = MPSC_MPCR_CL_7; 1423 break; 1424 case CS8: 1425 default: 1426 chr_bits = MPSC_MPCR_CL_8; 1427 break; 1428 } 1429 1430 if (termios->c_cflag & CSTOPB) 1431 stop_bits = MPSC_MPCR_SBL_2; 1432 else 1433 stop_bits = MPSC_MPCR_SBL_1; 1434 1435 par = MPSC_CHR_2_PAR_EVEN; 1436 if (termios->c_cflag & PARENB) 1437 if (termios->c_cflag & PARODD) 1438 par = MPSC_CHR_2_PAR_ODD; 1439#ifdef CMSPAR 1440 if (termios->c_cflag & CMSPAR) { 1441 if (termios->c_cflag & PARODD) 1442 par = MPSC_CHR_2_PAR_MARK; 1443 else 1444 par = MPSC_CHR_2_PAR_SPACE; 1445 } 1446#endif 1447 1448 baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk); 1449 1450 spin_lock_irqsave(&pi->port.lock, flags); 1451 1452 uart_update_timeout(port, termios->c_cflag, baud); 1453 1454 mpsc_set_char_length(pi, chr_bits); 1455 mpsc_set_stop_bit_length(pi, stop_bits); 1456 mpsc_set_parity(pi, par); 1457 mpsc_set_baudrate(pi, baud); 1458 1459 /* Characters/events to read */ 1460 pi->port.read_status_mask = SDMA_DESC_CMDSTAT_OR; 1461 1462 if (termios->c_iflag & INPCK) 1463 pi->port.read_status_mask |= SDMA_DESC_CMDSTAT_PE 1464 | SDMA_DESC_CMDSTAT_FR; 1465 1466 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) 1467 pi->port.read_status_mask |= SDMA_DESC_CMDSTAT_BR; 1468 1469 /* Characters/events to ignore */ 1470 pi->port.ignore_status_mask = 0; 1471 1472 if (termios->c_iflag & IGNPAR) 1473 pi->port.ignore_status_mask |= SDMA_DESC_CMDSTAT_PE 1474 | SDMA_DESC_CMDSTAT_FR; 1475 1476 if (termios->c_iflag & IGNBRK) { 1477 pi->port.ignore_status_mask |= SDMA_DESC_CMDSTAT_BR; 1478 1479 if (termios->c_iflag & IGNPAR) 1480 pi->port.ignore_status_mask |= SDMA_DESC_CMDSTAT_OR; 1481 } 1482 1483 if ((termios->c_cflag & CREAD)) { 1484 if (!pi->rcv_data) { 1485 pi->rcv_data = 1; 1486 mpsc_start_rx(pi); 1487 } 1488 } else if (pi->rcv_data) { 1489 mpsc_stop_rx(port); 1490 pi->rcv_data = 0; 1491 } 1492 1493 spin_unlock_irqrestore(&pi->port.lock, flags); 1494} 1495 1496static const char *mpsc_type(struct uart_port *port) 1497{ 1498 pr_debug("mpsc_type[%d]: port type: %s\n", port->line,MPSC_DRIVER_NAME); 1499 return MPSC_DRIVER_NAME; 1500} 1501 1502static int mpsc_request_port(struct uart_port *port) 1503{ 1504 /* Should make chip/platform specific call */ 1505 return 0; 1506} 1507 1508static void mpsc_release_port(struct uart_port *port) 1509{ 1510 struct mpsc_port_info *pi = 1511 container_of(port, struct mpsc_port_info, port); 1512 1513 if (pi->ready) { 1514 mpsc_uninit_rings(pi); 1515 mpsc_free_ring_mem(pi); 1516 pi->ready = 0; 1517 } 1518} 1519 1520static void mpsc_config_port(struct uart_port *port, int flags) 1521{ 1522} 1523 1524static int mpsc_verify_port(struct uart_port *port, struct serial_struct *ser) 1525{ 1526 struct mpsc_port_info *pi = 1527 container_of(port, struct mpsc_port_info, port); 1528 int rc = 0; 1529 1530 pr_debug("mpsc_verify_port[%d]: Verifying port data\n", pi->port.line); 1531 1532 if (ser->type != PORT_UNKNOWN && ser->type != PORT_MPSC) 1533 rc = -EINVAL; 1534 else if (pi->port.irq != ser->irq) 1535 rc = -EINVAL; 1536 else if (ser->io_type != SERIAL_IO_MEM) 1537 rc = -EINVAL; 1538 else if (pi->port.uartclk / 16 != ser->baud_base) /* Not sure */ 1539 rc = -EINVAL; 1540 else if ((void *)pi->port.mapbase != ser->iomem_base) 1541 rc = -EINVAL; 1542 else if (pi->port.iobase != ser->port) 1543 rc = -EINVAL; 1544 else if (ser->hub6 != 0) 1545 rc = -EINVAL; 1546 1547 return rc; 1548} 1549#ifdef CONFIG_CONSOLE_POLL 1550/* Serial polling routines for writing and reading from the uart while 1551 * in an interrupt or debug context. 1552 */ 1553 1554static char poll_buf[2048]; 1555static int poll_ptr; 1556static int poll_cnt; 1557static void mpsc_put_poll_char(struct uart_port *port, 1558 unsigned char c); 1559 1560static int mpsc_get_poll_char(struct uart_port *port) 1561{ 1562 struct mpsc_port_info *pi = 1563 container_of(port, struct mpsc_port_info, port); 1564 struct mpsc_rx_desc *rxre; 1565 u32 cmdstat, bytes_in, i; 1566 u8 *bp; 1567 1568 if (!serial_polled) 1569 serial_polled = 1; 1570 1571 pr_debug("mpsc_rx_intr[%d]: Handling Rx intr\n", pi->port.line); 1572 1573 if (poll_cnt) { 1574 poll_cnt--; 1575 return poll_buf[poll_ptr++]; 1576 } 1577 poll_ptr = 0; 1578 poll_cnt = 0; 1579 1580 while (poll_cnt == 0) { 1581 rxre = (struct mpsc_rx_desc *)(pi->rxr + 1582 (pi->rxr_posn*MPSC_RXRE_SIZE)); 1583 dma_cache_sync(pi->port.dev, (void *)rxre, 1584 MPSC_RXRE_SIZE, DMA_FROM_DEVICE); 1585#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 1586 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 1587 invalidate_dcache_range((ulong)rxre, 1588 (ulong)rxre + MPSC_RXRE_SIZE); 1589#endif 1590 /* 1591 * Loop through Rx descriptors handling ones that have 1592 * been completed. 1593 */ 1594 while (poll_cnt == 0 && 1595 !((cmdstat = be32_to_cpu(rxre->cmdstat)) & 1596 SDMA_DESC_CMDSTAT_O)){ 1597 bytes_in = be16_to_cpu(rxre->bytecnt); 1598 bp = pi->rxb + (pi->rxr_posn * MPSC_RXBE_SIZE); 1599 dma_cache_sync(pi->port.dev, (void *) bp, 1600 MPSC_RXBE_SIZE, DMA_FROM_DEVICE); 1601#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 1602 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 1603 invalidate_dcache_range((ulong)bp, 1604 (ulong)bp + MPSC_RXBE_SIZE); 1605#endif 1606 if ((unlikely(cmdstat & (SDMA_DESC_CMDSTAT_BR | 1607 SDMA_DESC_CMDSTAT_FR | SDMA_DESC_CMDSTAT_OR))) && 1608 !(cmdstat & pi->port.ignore_status_mask)) { 1609 poll_buf[poll_cnt] = *bp; 1610 poll_cnt++; 1611 } else { 1612 for (i = 0; i < bytes_in; i++) { 1613 poll_buf[poll_cnt] = *bp++; 1614 poll_cnt++; 1615 } 1616 pi->port.icount.rx += bytes_in; 1617 } 1618 rxre->bytecnt = cpu_to_be16(0); 1619 wmb(); 1620 rxre->cmdstat = cpu_to_be32(SDMA_DESC_CMDSTAT_O | 1621 SDMA_DESC_CMDSTAT_EI | 1622 SDMA_DESC_CMDSTAT_F | 1623 SDMA_DESC_CMDSTAT_L); 1624 wmb(); 1625 dma_cache_sync(pi->port.dev, (void *)rxre, 1626 MPSC_RXRE_SIZE, DMA_BIDIRECTIONAL); 1627#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 1628 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 1629 flush_dcache_range((ulong)rxre, 1630 (ulong)rxre + MPSC_RXRE_SIZE); 1631#endif 1632 1633 /* Advance to next descriptor */ 1634 pi->rxr_posn = (pi->rxr_posn + 1) & 1635 (MPSC_RXR_ENTRIES - 1); 1636 rxre = (struct mpsc_rx_desc *)(pi->rxr + 1637 (pi->rxr_posn * MPSC_RXRE_SIZE)); 1638 dma_cache_sync(pi->port.dev, (void *)rxre, 1639 MPSC_RXRE_SIZE, DMA_FROM_DEVICE); 1640#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 1641 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 1642 invalidate_dcache_range((ulong)rxre, 1643 (ulong)rxre + MPSC_RXRE_SIZE); 1644#endif 1645 } 1646 1647 /* Restart rx engine, if its stopped */ 1648 if ((readl(pi->sdma_base + SDMA_SDCM) & SDMA_SDCM_ERD) == 0) 1649 mpsc_start_rx(pi); 1650 } 1651 if (poll_cnt) { 1652 poll_cnt--; 1653 return poll_buf[poll_ptr++]; 1654 } 1655 1656 return 0; 1657} 1658 1659 1660static void mpsc_put_poll_char(struct uart_port *port, 1661 unsigned char c) 1662{ 1663 struct mpsc_port_info *pi = 1664 container_of(port, struct mpsc_port_info, port); 1665 u32 data; 1666 1667 data = readl(pi->mpsc_base + MPSC_MPCR); 1668 writeb(c, pi->mpsc_base + MPSC_CHR_1); 1669 mb(); 1670 data = readl(pi->mpsc_base + MPSC_CHR_2); 1671 data |= MPSC_CHR_2_TTCS; 1672 writel(data, pi->mpsc_base + MPSC_CHR_2); 1673 mb(); 1674 1675 while (readl(pi->mpsc_base + MPSC_CHR_2) & MPSC_CHR_2_TTCS); 1676} 1677#endif 1678 1679static struct uart_ops mpsc_pops = { 1680 .tx_empty = mpsc_tx_empty, 1681 .set_mctrl = mpsc_set_mctrl, 1682 .get_mctrl = mpsc_get_mctrl, 1683 .stop_tx = mpsc_stop_tx, 1684 .start_tx = mpsc_start_tx, 1685 .stop_rx = mpsc_stop_rx, 1686 .break_ctl = mpsc_break_ctl, 1687 .startup = mpsc_startup, 1688 .shutdown = mpsc_shutdown, 1689 .set_termios = mpsc_set_termios, 1690 .type = mpsc_type, 1691 .release_port = mpsc_release_port, 1692 .request_port = mpsc_request_port, 1693 .config_port = mpsc_config_port, 1694 .verify_port = mpsc_verify_port, 1695#ifdef CONFIG_CONSOLE_POLL 1696 .poll_get_char = mpsc_get_poll_char, 1697 .poll_put_char = mpsc_put_poll_char, 1698#endif 1699}; 1700 1701/* 1702 ****************************************************************************** 1703 * 1704 * Console Interface Routines 1705 * 1706 ****************************************************************************** 1707 */ 1708 1709#ifdef CONFIG_SERIAL_MPSC_CONSOLE 1710static void mpsc_console_write(struct console *co, const char *s, uint count) 1711{ 1712 struct mpsc_port_info *pi = &mpsc_ports[co->index]; 1713 u8 *bp, *dp, add_cr = 0; 1714 int i; 1715 unsigned long iflags; 1716 1717 spin_lock_irqsave(&pi->tx_lock, iflags); 1718 1719 while (pi->txr_head != pi->txr_tail) { 1720 while (mpsc_sdma_tx_active(pi)) 1721 udelay(100); 1722 mpsc_sdma_intr_ack(pi); 1723 mpsc_tx_intr(pi); 1724 } 1725 1726 while (mpsc_sdma_tx_active(pi)) 1727 udelay(100); 1728 1729 while (count > 0) { 1730 bp = dp = pi->txb + (pi->txr_head * MPSC_TXBE_SIZE); 1731 1732 for (i = 0; i < MPSC_TXBE_SIZE; i++) { 1733 if (count == 0) 1734 break; 1735 1736 if (add_cr) { 1737 *(dp++) = '\r'; 1738 add_cr = 0; 1739 } else { 1740 *(dp++) = *s; 1741 1742 if (*(s++) == '\n') { /* add '\r' after '\n' */ 1743 add_cr = 1; 1744 count++; 1745 } 1746 } 1747 1748 count--; 1749 } 1750 1751 dma_cache_sync(pi->port.dev, (void *)bp, MPSC_TXBE_SIZE, 1752 DMA_BIDIRECTIONAL); 1753#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 1754 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 1755 flush_dcache_range((ulong)bp, 1756 (ulong)bp + MPSC_TXBE_SIZE); 1757#endif 1758 mpsc_setup_tx_desc(pi, i, 0); 1759 pi->txr_head = (pi->txr_head + 1) & (MPSC_TXR_ENTRIES - 1); 1760 mpsc_sdma_start_tx(pi); 1761 1762 while (mpsc_sdma_tx_active(pi)) 1763 udelay(100); 1764 1765 pi->txr_tail = (pi->txr_tail + 1) & (MPSC_TXR_ENTRIES - 1); 1766 } 1767 1768 spin_unlock_irqrestore(&pi->tx_lock, iflags); 1769} 1770 1771static int __init mpsc_console_setup(struct console *co, char *options) 1772{ 1773 struct mpsc_port_info *pi; 1774 int baud, bits, parity, flow; 1775 1776 pr_debug("mpsc_console_setup[%d]: options: %s\n", co->index, options); 1777 1778 if (co->index >= MPSC_NUM_CTLRS) 1779 co->index = 0; 1780 1781 pi = &mpsc_ports[co->index]; 1782 1783 baud = pi->default_baud; 1784 bits = pi->default_bits; 1785 parity = pi->default_parity; 1786 flow = pi->default_flow; 1787 1788 if (!pi->port.ops) 1789 return -ENODEV; 1790 1791 spin_lock_init(&pi->port.lock); /* Temporary fix--copied from 8250.c */ 1792 1793 if (options) 1794 uart_parse_options(options, &baud, &parity, &bits, &flow); 1795 1796 return uart_set_options(&pi->port, co, baud, parity, bits, flow); 1797} 1798 1799static struct console mpsc_console = { 1800 .name = MPSC_DEV_NAME, 1801 .write = mpsc_console_write, 1802 .device = uart_console_device, 1803 .setup = mpsc_console_setup, 1804 .flags = CON_PRINTBUFFER, 1805 .index = -1, 1806 .data = &mpsc_reg, 1807}; 1808 1809static int __init mpsc_late_console_init(void) 1810{ 1811 pr_debug("mpsc_late_console_init: Enter\n"); 1812 1813 if (!(mpsc_console.flags & CON_ENABLED)) 1814 register_console(&mpsc_console); 1815 return 0; 1816} 1817 1818late_initcall(mpsc_late_console_init); 1819 1820#define MPSC_CONSOLE &mpsc_console 1821#else 1822#define MPSC_CONSOLE NULL 1823#endif 1824/* 1825 ****************************************************************************** 1826 * 1827 * Dummy Platform Driver to extract & map shared register regions 1828 * 1829 ****************************************************************************** 1830 */ 1831static void mpsc_resource_err(char *s) 1832{ 1833 printk(KERN_WARNING "MPSC: Platform device resource error in %s\n", s); 1834} 1835 1836static int mpsc_shared_map_regs(struct platform_device *pd) 1837{ 1838 struct resource *r; 1839 1840 if ((r = platform_get_resource(pd, IORESOURCE_MEM, 1841 MPSC_ROUTING_BASE_ORDER)) 1842 && request_mem_region(r->start, 1843 MPSC_ROUTING_REG_BLOCK_SIZE, 1844 "mpsc_routing_regs")) { 1845 mpsc_shared_regs.mpsc_routing_base = ioremap(r->start, 1846 MPSC_ROUTING_REG_BLOCK_SIZE); 1847 mpsc_shared_regs.mpsc_routing_base_p = r->start; 1848 } else { 1849 mpsc_resource_err("MPSC routing base"); 1850 return -ENOMEM; 1851 } 1852 1853 if ((r = platform_get_resource(pd, IORESOURCE_MEM, 1854 MPSC_SDMA_INTR_BASE_ORDER)) 1855 && request_mem_region(r->start, 1856 MPSC_SDMA_INTR_REG_BLOCK_SIZE, 1857 "sdma_intr_regs")) { 1858 mpsc_shared_regs.sdma_intr_base = ioremap(r->start, 1859 MPSC_SDMA_INTR_REG_BLOCK_SIZE); 1860 mpsc_shared_regs.sdma_intr_base_p = r->start; 1861 } else { 1862 iounmap(mpsc_shared_regs.mpsc_routing_base); 1863 release_mem_region(mpsc_shared_regs.mpsc_routing_base_p, 1864 MPSC_ROUTING_REG_BLOCK_SIZE); 1865 mpsc_resource_err("SDMA intr base"); 1866 return -ENOMEM; 1867 } 1868 1869 return 0; 1870} 1871 1872static void mpsc_shared_unmap_regs(void) 1873{ 1874 if (!mpsc_shared_regs.mpsc_routing_base) { 1875 iounmap(mpsc_shared_regs.mpsc_routing_base); 1876 release_mem_region(mpsc_shared_regs.mpsc_routing_base_p, 1877 MPSC_ROUTING_REG_BLOCK_SIZE); 1878 } 1879 if (!mpsc_shared_regs.sdma_intr_base) { 1880 iounmap(mpsc_shared_regs.sdma_intr_base); 1881 release_mem_region(mpsc_shared_regs.sdma_intr_base_p, 1882 MPSC_SDMA_INTR_REG_BLOCK_SIZE); 1883 } 1884 1885 mpsc_shared_regs.mpsc_routing_base = NULL; 1886 mpsc_shared_regs.sdma_intr_base = NULL; 1887 1888 mpsc_shared_regs.mpsc_routing_base_p = 0; 1889 mpsc_shared_regs.sdma_intr_base_p = 0; 1890} 1891 1892static int mpsc_shared_drv_probe(struct platform_device *dev) 1893{ 1894 struct mpsc_shared_pdata *pdata; 1895 int rc = -ENODEV; 1896 1897 if (dev->id == 0) { 1898 if (!(rc = mpsc_shared_map_regs(dev))) { 1899 pdata = (struct mpsc_shared_pdata *) 1900 dev_get_platdata(&dev->dev); 1901 1902 mpsc_shared_regs.MPSC_MRR_m = pdata->mrr_val; 1903 mpsc_shared_regs.MPSC_RCRR_m= pdata->rcrr_val; 1904 mpsc_shared_regs.MPSC_TCRR_m= pdata->tcrr_val; 1905 mpsc_shared_regs.SDMA_INTR_CAUSE_m = 1906 pdata->intr_cause_val; 1907 mpsc_shared_regs.SDMA_INTR_MASK_m = 1908 pdata->intr_mask_val; 1909 1910 rc = 0; 1911 } 1912 } 1913 1914 return rc; 1915} 1916 1917static int mpsc_shared_drv_remove(struct platform_device *dev) 1918{ 1919 int rc = -ENODEV; 1920 1921 if (dev->id == 0) { 1922 mpsc_shared_unmap_regs(); 1923 mpsc_shared_regs.MPSC_MRR_m = 0; 1924 mpsc_shared_regs.MPSC_RCRR_m = 0; 1925 mpsc_shared_regs.MPSC_TCRR_m = 0; 1926 mpsc_shared_regs.SDMA_INTR_CAUSE_m = 0; 1927 mpsc_shared_regs.SDMA_INTR_MASK_m = 0; 1928 rc = 0; 1929 } 1930 1931 return rc; 1932} 1933 1934static struct platform_driver mpsc_shared_driver = { 1935 .probe = mpsc_shared_drv_probe, 1936 .remove = mpsc_shared_drv_remove, 1937 .driver = { 1938 .name = MPSC_SHARED_NAME, 1939 }, 1940}; 1941 1942/* 1943 ****************************************************************************** 1944 * 1945 * Driver Interface Routines 1946 * 1947 ****************************************************************************** 1948 */ 1949static struct uart_driver mpsc_reg = { 1950 .owner = THIS_MODULE, 1951 .driver_name = MPSC_DRIVER_NAME, 1952 .dev_name = MPSC_DEV_NAME, 1953 .major = MPSC_MAJOR, 1954 .minor = MPSC_MINOR_START, 1955 .nr = MPSC_NUM_CTLRS, 1956 .cons = MPSC_CONSOLE, 1957}; 1958 1959static int mpsc_drv_map_regs(struct mpsc_port_info *pi, 1960 struct platform_device *pd) 1961{ 1962 struct resource *r; 1963 1964 if ((r = platform_get_resource(pd, IORESOURCE_MEM, MPSC_BASE_ORDER)) 1965 && request_mem_region(r->start, MPSC_REG_BLOCK_SIZE, 1966 "mpsc_regs")) { 1967 pi->mpsc_base = ioremap(r->start, MPSC_REG_BLOCK_SIZE); 1968 pi->mpsc_base_p = r->start; 1969 } else { 1970 mpsc_resource_err("MPSC base"); 1971 goto err; 1972 } 1973 1974 if ((r = platform_get_resource(pd, IORESOURCE_MEM, 1975 MPSC_SDMA_BASE_ORDER)) 1976 && request_mem_region(r->start, 1977 MPSC_SDMA_REG_BLOCK_SIZE, "sdma_regs")) { 1978 pi->sdma_base = ioremap(r->start,MPSC_SDMA_REG_BLOCK_SIZE); 1979 pi->sdma_base_p = r->start; 1980 } else { 1981 mpsc_resource_err("SDMA base"); 1982 if (pi->mpsc_base) { 1983 iounmap(pi->mpsc_base); 1984 pi->mpsc_base = NULL; 1985 } 1986 goto err; 1987 } 1988 1989 if ((r = platform_get_resource(pd,IORESOURCE_MEM,MPSC_BRG_BASE_ORDER)) 1990 && request_mem_region(r->start, 1991 MPSC_BRG_REG_BLOCK_SIZE, "brg_regs")) { 1992 pi->brg_base = ioremap(r->start, MPSC_BRG_REG_BLOCK_SIZE); 1993 pi->brg_base_p = r->start; 1994 } else { 1995 mpsc_resource_err("BRG base"); 1996 if (pi->mpsc_base) { 1997 iounmap(pi->mpsc_base); 1998 pi->mpsc_base = NULL; 1999 } 2000 if (pi->sdma_base) { 2001 iounmap(pi->sdma_base); 2002 pi->sdma_base = NULL; 2003 } 2004 goto err; 2005 } 2006 return 0; 2007 2008err: 2009 return -ENOMEM; 2010} 2011 2012static void mpsc_drv_unmap_regs(struct mpsc_port_info *pi) 2013{ 2014 if (!pi->mpsc_base) { 2015 iounmap(pi->mpsc_base); 2016 release_mem_region(pi->mpsc_base_p, MPSC_REG_BLOCK_SIZE); 2017 } 2018 if (!pi->sdma_base) { 2019 iounmap(pi->sdma_base); 2020 release_mem_region(pi->sdma_base_p, MPSC_SDMA_REG_BLOCK_SIZE); 2021 } 2022 if (!pi->brg_base) { 2023 iounmap(pi->brg_base); 2024 release_mem_region(pi->brg_base_p, MPSC_BRG_REG_BLOCK_SIZE); 2025 } 2026 2027 pi->mpsc_base = NULL; 2028 pi->sdma_base = NULL; 2029 pi->brg_base = NULL; 2030 2031 pi->mpsc_base_p = 0; 2032 pi->sdma_base_p = 0; 2033 pi->brg_base_p = 0; 2034} 2035 2036static void mpsc_drv_get_platform_data(struct mpsc_port_info *pi, 2037 struct platform_device *pd, int num) 2038{ 2039 struct mpsc_pdata *pdata; 2040 2041 pdata = dev_get_platdata(&pd->dev); 2042 2043 pi->port.uartclk = pdata->brg_clk_freq; 2044 pi->port.iotype = UPIO_MEM; 2045 pi->port.line = num; 2046 pi->port.type = PORT_MPSC; 2047 pi->port.fifosize = MPSC_TXBE_SIZE; 2048 pi->port.membase = pi->mpsc_base; 2049 pi->port.mapbase = (ulong)pi->mpsc_base; 2050 pi->port.ops = &mpsc_pops; 2051 2052 pi->mirror_regs = pdata->mirror_regs; 2053 pi->cache_mgmt = pdata->cache_mgmt; 2054 pi->brg_can_tune = pdata->brg_can_tune; 2055 pi->brg_clk_src = pdata->brg_clk_src; 2056 pi->mpsc_max_idle = pdata->max_idle; 2057 pi->default_baud = pdata->default_baud; 2058 pi->default_bits = pdata->default_bits; 2059 pi->default_parity = pdata->default_parity; 2060 pi->default_flow = pdata->default_flow; 2061 2062 /* Initial values of mirrored regs */ 2063 pi->MPSC_CHR_1_m = pdata->chr_1_val; 2064 pi->MPSC_CHR_2_m = pdata->chr_2_val; 2065 pi->MPSC_CHR_10_m = pdata->chr_10_val; 2066 pi->MPSC_MPCR_m = pdata->mpcr_val; 2067 pi->BRG_BCR_m = pdata->bcr_val; 2068 2069 pi->shared_regs = &mpsc_shared_regs; 2070 2071 pi->port.irq = platform_get_irq(pd, 0); 2072} 2073 2074static int mpsc_drv_probe(struct platform_device *dev) 2075{ 2076 struct mpsc_port_info *pi; 2077 int rc = -ENODEV; 2078 2079 pr_debug("mpsc_drv_probe: Adding MPSC %d\n", dev->id); 2080 2081 if (dev->id < MPSC_NUM_CTLRS) { 2082 pi = &mpsc_ports[dev->id]; 2083 2084 if (!(rc = mpsc_drv_map_regs(pi, dev))) { 2085 mpsc_drv_get_platform_data(pi, dev, dev->id); 2086 pi->port.dev = &dev->dev; 2087 2088 if (!(rc = mpsc_make_ready(pi))) { 2089 spin_lock_init(&pi->tx_lock); 2090 if (!(rc = uart_add_one_port(&mpsc_reg, 2091 &pi->port))) { 2092 rc = 0; 2093 } else { 2094 mpsc_release_port((struct uart_port *) 2095 pi); 2096 mpsc_drv_unmap_regs(pi); 2097 } 2098 } else { 2099 mpsc_drv_unmap_regs(pi); 2100 } 2101 } 2102 } 2103 2104 return rc; 2105} 2106 2107static int mpsc_drv_remove(struct platform_device *dev) 2108{ 2109 pr_debug("mpsc_drv_exit: Removing MPSC %d\n", dev->id); 2110 2111 if (dev->id < MPSC_NUM_CTLRS) { 2112 uart_remove_one_port(&mpsc_reg, &mpsc_ports[dev->id].port); 2113 mpsc_release_port((struct uart_port *) 2114 &mpsc_ports[dev->id].port); 2115 mpsc_drv_unmap_regs(&mpsc_ports[dev->id]); 2116 return 0; 2117 } else { 2118 return -ENODEV; 2119 } 2120} 2121 2122static struct platform_driver mpsc_driver = { 2123 .probe = mpsc_drv_probe, 2124 .remove = mpsc_drv_remove, 2125 .driver = { 2126 .name = MPSC_CTLR_NAME, 2127 }, 2128}; 2129 2130static int __init mpsc_drv_init(void) 2131{ 2132 int rc; 2133 2134 printk(KERN_INFO "Serial: MPSC driver\n"); 2135 2136 memset(mpsc_ports, 0, sizeof(mpsc_ports)); 2137 memset(&mpsc_shared_regs, 0, sizeof(mpsc_shared_regs)); 2138 2139 if (!(rc = uart_register_driver(&mpsc_reg))) { 2140 if (!(rc = platform_driver_register(&mpsc_shared_driver))) { 2141 if ((rc = platform_driver_register(&mpsc_driver))) { 2142 platform_driver_unregister(&mpsc_shared_driver); 2143 uart_unregister_driver(&mpsc_reg); 2144 } 2145 } else { 2146 uart_unregister_driver(&mpsc_reg); 2147 } 2148 } 2149 2150 return rc; 2151} 2152 2153static void __exit mpsc_drv_exit(void) 2154{ 2155 platform_driver_unregister(&mpsc_driver); 2156 platform_driver_unregister(&mpsc_shared_driver); 2157 uart_unregister_driver(&mpsc_reg); 2158 memset(mpsc_ports, 0, sizeof(mpsc_ports)); 2159 memset(&mpsc_shared_regs, 0, sizeof(mpsc_shared_regs)); 2160} 2161 2162module_init(mpsc_drv_init); 2163module_exit(mpsc_drv_exit); 2164 2165MODULE_AUTHOR("Mark A. Greer <mgreer@mvista.com>"); 2166MODULE_DESCRIPTION("Generic Marvell MPSC serial/UART driver"); 2167MODULE_VERSION(MPSC_VERSION); 2168MODULE_LICENSE("GPL"); 2169MODULE_ALIAS_CHARDEV_MAJOR(MPSC_MAJOR); 2170MODULE_ALIAS("platform:" MPSC_CTLR_NAME); 2171