root/drivers/scsi/sun_esp.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. esp_sbus_setup_dma
  2. esp_sbus_map_regs
  3. esp_sbus_map_command_block
  4. esp_sbus_register_irq
  5. esp_get_scsi_id
  6. esp_get_differential
  7. esp_get_clock_params
  8. esp_get_bursts
  9. esp_sbus_get_props
  10. sbus_esp_write8
  11. sbus_esp_read8
  12. sbus_esp_irq_pending
  13. sbus_esp_reset_dma
  14. sbus_esp_dma_drain
  15. sbus_esp_dma_invalidate
  16. sbus_esp_send_dma_cmd
  17. sbus_esp_dma_error
  18. esp_sbus_probe_one
  19. esp_sbus_probe
  20. esp_sbus_remove
  21. sunesp_init
  22. sunesp_exit

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /* sun_esp.c: ESP front-end for Sparc SBUS systems.
   3  *
   4  * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net)
   5  */
   6 
   7 #include <linux/kernel.h>
   8 #include <linux/types.h>
   9 #include <linux/delay.h>
  10 #include <linux/module.h>
  11 #include <linux/mm.h>
  12 #include <linux/init.h>
  13 #include <linux/dma-mapping.h>
  14 #include <linux/of.h>
  15 #include <linux/of_device.h>
  16 #include <linux/gfp.h>
  17 
  18 #include <asm/irq.h>
  19 #include <asm/io.h>
  20 #include <asm/dma.h>
  21 
  22 #include <scsi/scsi_host.h>
  23 
  24 #include "esp_scsi.h"
  25 
  26 #define DRV_MODULE_NAME         "sun_esp"
  27 #define PFX DRV_MODULE_NAME     ": "
  28 #define DRV_VERSION             "1.100"
  29 #define DRV_MODULE_RELDATE      "August 27, 2008"
  30 
  31 #define dma_read32(REG) \
  32         sbus_readl(esp->dma_regs + (REG))
  33 #define dma_write32(VAL, REG) \
  34         sbus_writel((VAL), esp->dma_regs + (REG))
  35 
  36 /* DVMA chip revisions */
  37 enum dvma_rev {
  38         dvmarev0,
  39         dvmaesc1,
  40         dvmarev1,
  41         dvmarev2,
  42         dvmarev3,
  43         dvmarevplus,
  44         dvmahme
  45 };
  46 
  47 static int esp_sbus_setup_dma(struct esp *esp, struct platform_device *dma_of)
  48 {
  49         esp->dma = dma_of;
  50 
  51         esp->dma_regs = of_ioremap(&dma_of->resource[0], 0,
  52                                    resource_size(&dma_of->resource[0]),
  53                                    "espdma");
  54         if (!esp->dma_regs)
  55                 return -ENOMEM;
  56 
  57         switch (dma_read32(DMA_CSR) & DMA_DEVICE_ID) {
  58         case DMA_VERS0:
  59                 esp->dmarev = dvmarev0;
  60                 break;
  61         case DMA_ESCV1:
  62                 esp->dmarev = dvmaesc1;
  63                 break;
  64         case DMA_VERS1:
  65                 esp->dmarev = dvmarev1;
  66                 break;
  67         case DMA_VERS2:
  68                 esp->dmarev = dvmarev2;
  69                 break;
  70         case DMA_VERHME:
  71                 esp->dmarev = dvmahme;
  72                 break;
  73         case DMA_VERSPLUS:
  74                 esp->dmarev = dvmarevplus;
  75                 break;
  76         }
  77 
  78         return 0;
  79 
  80 }
  81 
  82 static int esp_sbus_map_regs(struct esp *esp, int hme)
  83 {
  84         struct platform_device *op = to_platform_device(esp->dev);
  85         struct resource *res;
  86 
  87         /* On HME, two reg sets exist, first is DVMA,
  88          * second is ESP registers.
  89          */
  90         if (hme)
  91                 res = &op->resource[1];
  92         else
  93                 res = &op->resource[0];
  94 
  95         esp->regs = of_ioremap(res, 0, SBUS_ESP_REG_SIZE, "ESP");
  96         if (!esp->regs)
  97                 return -ENOMEM;
  98 
  99         return 0;
 100 }
 101 
 102 static int esp_sbus_map_command_block(struct esp *esp)
 103 {
 104         esp->command_block = dma_alloc_coherent(esp->dev, 16,
 105                                                 &esp->command_block_dma,
 106                                                 GFP_KERNEL);
 107         if (!esp->command_block)
 108                 return -ENOMEM;
 109         return 0;
 110 }
 111 
 112 static int esp_sbus_register_irq(struct esp *esp)
 113 {
 114         struct Scsi_Host *host = esp->host;
 115         struct platform_device *op = to_platform_device(esp->dev);
 116 
 117         host->irq = op->archdata.irqs[0];
 118         return request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp);
 119 }
 120 
 121 static void esp_get_scsi_id(struct esp *esp, struct platform_device *espdma)
 122 {
 123         struct platform_device *op = to_platform_device(esp->dev);
 124         struct device_node *dp;
 125 
 126         dp = op->dev.of_node;
 127         esp->scsi_id = of_getintprop_default(dp, "initiator-id", 0xff);
 128         if (esp->scsi_id != 0xff)
 129                 goto done;
 130 
 131         esp->scsi_id = of_getintprop_default(dp, "scsi-initiator-id", 0xff);
 132         if (esp->scsi_id != 0xff)
 133                 goto done;
 134 
 135         esp->scsi_id = of_getintprop_default(espdma->dev.of_node,
 136                                              "scsi-initiator-id", 7);
 137 
 138 done:
 139         esp->host->this_id = esp->scsi_id;
 140         esp->scsi_id_mask = (1 << esp->scsi_id);
 141 }
 142 
 143 static void esp_get_differential(struct esp *esp)
 144 {
 145         struct platform_device *op = to_platform_device(esp->dev);
 146         struct device_node *dp;
 147 
 148         dp = op->dev.of_node;
 149         if (of_find_property(dp, "differential", NULL))
 150                 esp->flags |= ESP_FLAG_DIFFERENTIAL;
 151         else
 152                 esp->flags &= ~ESP_FLAG_DIFFERENTIAL;
 153 }
 154 
 155 static void esp_get_clock_params(struct esp *esp)
 156 {
 157         struct platform_device *op = to_platform_device(esp->dev);
 158         struct device_node *bus_dp, *dp;
 159         int fmhz;
 160 
 161         dp = op->dev.of_node;
 162         bus_dp = dp->parent;
 163 
 164         fmhz = of_getintprop_default(dp, "clock-frequency", 0);
 165         if (fmhz == 0)
 166                 fmhz = of_getintprop_default(bus_dp, "clock-frequency", 0);
 167 
 168         esp->cfreq = fmhz;
 169 }
 170 
 171 static void esp_get_bursts(struct esp *esp, struct platform_device *dma_of)
 172 {
 173         struct device_node *dma_dp = dma_of->dev.of_node;
 174         struct platform_device *op = to_platform_device(esp->dev);
 175         struct device_node *dp;
 176         u8 bursts, val;
 177 
 178         dp = op->dev.of_node;
 179         bursts = of_getintprop_default(dp, "burst-sizes", 0xff);
 180         val = of_getintprop_default(dma_dp, "burst-sizes", 0xff);
 181         if (val != 0xff)
 182                 bursts &= val;
 183 
 184         val = of_getintprop_default(dma_dp->parent, "burst-sizes", 0xff);
 185         if (val != 0xff)
 186                 bursts &= val;
 187 
 188         if (bursts == 0xff ||
 189             (bursts & DMA_BURST16) == 0 ||
 190             (bursts & DMA_BURST32) == 0)
 191                 bursts = (DMA_BURST32 - 1);
 192 
 193         esp->bursts = bursts;
 194 }
 195 
 196 static void esp_sbus_get_props(struct esp *esp, struct platform_device *espdma)
 197 {
 198         esp_get_scsi_id(esp, espdma);
 199         esp_get_differential(esp);
 200         esp_get_clock_params(esp);
 201         esp_get_bursts(esp, espdma);
 202 }
 203 
 204 static void sbus_esp_write8(struct esp *esp, u8 val, unsigned long reg)
 205 {
 206         sbus_writeb(val, esp->regs + (reg * 4UL));
 207 }
 208 
 209 static u8 sbus_esp_read8(struct esp *esp, unsigned long reg)
 210 {
 211         return sbus_readb(esp->regs + (reg * 4UL));
 212 }
 213 
 214 static int sbus_esp_irq_pending(struct esp *esp)
 215 {
 216         if (dma_read32(DMA_CSR) & (DMA_HNDL_INTR | DMA_HNDL_ERROR))
 217                 return 1;
 218         return 0;
 219 }
 220 
 221 static void sbus_esp_reset_dma(struct esp *esp)
 222 {
 223         int can_do_burst16, can_do_burst32, can_do_burst64;
 224         int can_do_sbus64, lim;
 225         struct platform_device *op = to_platform_device(esp->dev);
 226         u32 val;
 227 
 228         can_do_burst16 = (esp->bursts & DMA_BURST16) != 0;
 229         can_do_burst32 = (esp->bursts & DMA_BURST32) != 0;
 230         can_do_burst64 = 0;
 231         can_do_sbus64 = 0;
 232         if (sbus_can_dma_64bit())
 233                 can_do_sbus64 = 1;
 234         if (sbus_can_burst64())
 235                 can_do_burst64 = (esp->bursts & DMA_BURST64) != 0;
 236 
 237         /* Put the DVMA into a known state. */
 238         if (esp->dmarev != dvmahme) {
 239                 val = dma_read32(DMA_CSR);
 240                 dma_write32(val | DMA_RST_SCSI, DMA_CSR);
 241                 dma_write32(val & ~DMA_RST_SCSI, DMA_CSR);
 242         }
 243         switch (esp->dmarev) {
 244         case dvmahme:
 245                 dma_write32(DMA_RESET_FAS366, DMA_CSR);
 246                 dma_write32(DMA_RST_SCSI, DMA_CSR);
 247 
 248                 esp->prev_hme_dmacsr = (DMA_PARITY_OFF | DMA_2CLKS |
 249                                         DMA_SCSI_DISAB | DMA_INT_ENAB);
 250 
 251                 esp->prev_hme_dmacsr &= ~(DMA_ENABLE | DMA_ST_WRITE |
 252                                           DMA_BRST_SZ);
 253 
 254                 if (can_do_burst64)
 255                         esp->prev_hme_dmacsr |= DMA_BRST64;
 256                 else if (can_do_burst32)
 257                         esp->prev_hme_dmacsr |= DMA_BRST32;
 258 
 259                 if (can_do_sbus64) {
 260                         esp->prev_hme_dmacsr |= DMA_SCSI_SBUS64;
 261                         sbus_set_sbus64(&op->dev, esp->bursts);
 262                 }
 263 
 264                 lim = 1000;
 265                 while (dma_read32(DMA_CSR) & DMA_PEND_READ) {
 266                         if (--lim == 0) {
 267                                 printk(KERN_ALERT PFX "esp%d: DMA_PEND_READ "
 268                                        "will not clear!\n",
 269                                        esp->host->unique_id);
 270                                 break;
 271                         }
 272                         udelay(1);
 273                 }
 274 
 275                 dma_write32(0, DMA_CSR);
 276                 dma_write32(esp->prev_hme_dmacsr, DMA_CSR);
 277 
 278                 dma_write32(0, DMA_ADDR);
 279                 break;
 280 
 281         case dvmarev2:
 282                 if (esp->rev != ESP100) {
 283                         val = dma_read32(DMA_CSR);
 284                         dma_write32(val | DMA_3CLKS, DMA_CSR);
 285                 }
 286                 break;
 287 
 288         case dvmarev3:
 289                 val = dma_read32(DMA_CSR);
 290                 val &= ~DMA_3CLKS;
 291                 val |= DMA_2CLKS;
 292                 if (can_do_burst32) {
 293                         val &= ~DMA_BRST_SZ;
 294                         val |= DMA_BRST32;
 295                 }
 296                 dma_write32(val, DMA_CSR);
 297                 break;
 298 
 299         case dvmaesc1:
 300                 val = dma_read32(DMA_CSR);
 301                 val |= DMA_ADD_ENABLE;
 302                 val &= ~DMA_BCNT_ENAB;
 303                 if (!can_do_burst32 && can_do_burst16) {
 304                         val |= DMA_ESC_BURST;
 305                 } else {
 306                         val &= ~(DMA_ESC_BURST);
 307                 }
 308                 dma_write32(val, DMA_CSR);
 309                 break;
 310 
 311         default:
 312                 break;
 313         }
 314 
 315         /* Enable interrupts.  */
 316         val = dma_read32(DMA_CSR);
 317         dma_write32(val | DMA_INT_ENAB, DMA_CSR);
 318 }
 319 
 320 static void sbus_esp_dma_drain(struct esp *esp)
 321 {
 322         u32 csr;
 323         int lim;
 324 
 325         if (esp->dmarev == dvmahme)
 326                 return;
 327 
 328         csr = dma_read32(DMA_CSR);
 329         if (!(csr & DMA_FIFO_ISDRAIN))
 330                 return;
 331 
 332         if (esp->dmarev != dvmarev3 && esp->dmarev != dvmaesc1)
 333                 dma_write32(csr | DMA_FIFO_STDRAIN, DMA_CSR);
 334 
 335         lim = 1000;
 336         while (dma_read32(DMA_CSR) & DMA_FIFO_ISDRAIN) {
 337                 if (--lim == 0) {
 338                         printk(KERN_ALERT PFX "esp%d: DMA will not drain!\n",
 339                                esp->host->unique_id);
 340                         break;
 341                 }
 342                 udelay(1);
 343         }
 344 }
 345 
 346 static void sbus_esp_dma_invalidate(struct esp *esp)
 347 {
 348         if (esp->dmarev == dvmahme) {
 349                 dma_write32(DMA_RST_SCSI, DMA_CSR);
 350 
 351                 esp->prev_hme_dmacsr = ((esp->prev_hme_dmacsr |
 352                                          (DMA_PARITY_OFF | DMA_2CLKS |
 353                                           DMA_SCSI_DISAB | DMA_INT_ENAB)) &
 354                                         ~(DMA_ST_WRITE | DMA_ENABLE));
 355 
 356                 dma_write32(0, DMA_CSR);
 357                 dma_write32(esp->prev_hme_dmacsr, DMA_CSR);
 358 
 359                 /* This is necessary to avoid having the SCSI channel
 360                  * engine lock up on us.
 361                  */
 362                 dma_write32(0, DMA_ADDR);
 363         } else {
 364                 u32 val;
 365                 int lim;
 366 
 367                 lim = 1000;
 368                 while ((val = dma_read32(DMA_CSR)) & DMA_PEND_READ) {
 369                         if (--lim == 0) {
 370                                 printk(KERN_ALERT PFX "esp%d: DMA will not "
 371                                        "invalidate!\n", esp->host->unique_id);
 372                                 break;
 373                         }
 374                         udelay(1);
 375                 }
 376 
 377                 val &= ~(DMA_ENABLE | DMA_ST_WRITE | DMA_BCNT_ENAB);
 378                 val |= DMA_FIFO_INV;
 379                 dma_write32(val, DMA_CSR);
 380                 val &= ~DMA_FIFO_INV;
 381                 dma_write32(val, DMA_CSR);
 382         }
 383 }
 384 
 385 static void sbus_esp_send_dma_cmd(struct esp *esp, u32 addr, u32 esp_count,
 386                                   u32 dma_count, int write, u8 cmd)
 387 {
 388         u32 csr;
 389 
 390         BUG_ON(!(cmd & ESP_CMD_DMA));
 391 
 392         sbus_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
 393         sbus_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
 394         if (esp->rev == FASHME) {
 395                 sbus_esp_write8(esp, (esp_count >> 16) & 0xff, FAS_RLO);
 396                 sbus_esp_write8(esp, 0, FAS_RHI);
 397 
 398                 scsi_esp_cmd(esp, cmd);
 399 
 400                 csr = esp->prev_hme_dmacsr;
 401                 csr |= DMA_SCSI_DISAB | DMA_ENABLE;
 402                 if (write)
 403                         csr |= DMA_ST_WRITE;
 404                 else
 405                         csr &= ~DMA_ST_WRITE;
 406                 esp->prev_hme_dmacsr = csr;
 407 
 408                 dma_write32(dma_count, DMA_COUNT);
 409                 dma_write32(addr, DMA_ADDR);
 410                 dma_write32(csr, DMA_CSR);
 411         } else {
 412                 csr = dma_read32(DMA_CSR);
 413                 csr |= DMA_ENABLE;
 414                 if (write)
 415                         csr |= DMA_ST_WRITE;
 416                 else
 417                         csr &= ~DMA_ST_WRITE;
 418                 dma_write32(csr, DMA_CSR);
 419                 if (esp->dmarev == dvmaesc1) {
 420                         u32 end = PAGE_ALIGN(addr + dma_count + 16U);
 421                         dma_write32(end - addr, DMA_COUNT);
 422                 }
 423                 dma_write32(addr, DMA_ADDR);
 424 
 425                 scsi_esp_cmd(esp, cmd);
 426         }
 427 
 428 }
 429 
 430 static int sbus_esp_dma_error(struct esp *esp)
 431 {
 432         u32 csr = dma_read32(DMA_CSR);
 433 
 434         if (csr & DMA_HNDL_ERROR)
 435                 return 1;
 436 
 437         return 0;
 438 }
 439 
 440 static const struct esp_driver_ops sbus_esp_ops = {
 441         .esp_write8     =       sbus_esp_write8,
 442         .esp_read8      =       sbus_esp_read8,
 443         .irq_pending    =       sbus_esp_irq_pending,
 444         .reset_dma      =       sbus_esp_reset_dma,
 445         .dma_drain      =       sbus_esp_dma_drain,
 446         .dma_invalidate =       sbus_esp_dma_invalidate,
 447         .send_dma_cmd   =       sbus_esp_send_dma_cmd,
 448         .dma_error      =       sbus_esp_dma_error,
 449 };
 450 
 451 static int esp_sbus_probe_one(struct platform_device *op,
 452                               struct platform_device *espdma, int hme)
 453 {
 454         struct scsi_host_template *tpnt = &scsi_esp_template;
 455         struct Scsi_Host *host;
 456         struct esp *esp;
 457         int err;
 458 
 459         host = scsi_host_alloc(tpnt, sizeof(struct esp));
 460 
 461         err = -ENOMEM;
 462         if (!host)
 463                 goto fail;
 464 
 465         host->max_id = (hme ? 16 : 8);
 466         esp = shost_priv(host);
 467 
 468         esp->host = host;
 469         esp->dev = &op->dev;
 470         esp->ops = &sbus_esp_ops;
 471 
 472         if (hme)
 473                 esp->flags |= ESP_FLAG_WIDE_CAPABLE;
 474 
 475         err = esp_sbus_setup_dma(esp, espdma);
 476         if (err < 0)
 477                 goto fail_unlink;
 478 
 479         err = esp_sbus_map_regs(esp, hme);
 480         if (err < 0)
 481                 goto fail_unlink;
 482 
 483         err = esp_sbus_map_command_block(esp);
 484         if (err < 0)
 485                 goto fail_unmap_regs;
 486 
 487         err = esp_sbus_register_irq(esp);
 488         if (err < 0)
 489                 goto fail_unmap_command_block;
 490 
 491         esp_sbus_get_props(esp, espdma);
 492 
 493         /* Before we try to touch the ESP chip, ESC1 dma can
 494          * come up with the reset bit set, so make sure that
 495          * is clear first.
 496          */
 497         if (esp->dmarev == dvmaesc1) {
 498                 u32 val = dma_read32(DMA_CSR);
 499 
 500                 dma_write32(val & ~DMA_RST_SCSI, DMA_CSR);
 501         }
 502 
 503         dev_set_drvdata(&op->dev, esp);
 504 
 505         err = scsi_esp_register(esp);
 506         if (err)
 507                 goto fail_free_irq;
 508 
 509         return 0;
 510 
 511 fail_free_irq:
 512         free_irq(host->irq, esp);
 513 fail_unmap_command_block:
 514         dma_free_coherent(&op->dev, 16,
 515                           esp->command_block,
 516                           esp->command_block_dma);
 517 fail_unmap_regs:
 518         of_iounmap(&op->resource[(hme ? 1 : 0)], esp->regs, SBUS_ESP_REG_SIZE);
 519 fail_unlink:
 520         scsi_host_put(host);
 521 fail:
 522         return err;
 523 }
 524 
 525 static int esp_sbus_probe(struct platform_device *op)
 526 {
 527         struct device_node *dma_node = NULL;
 528         struct device_node *dp = op->dev.of_node;
 529         struct platform_device *dma_of = NULL;
 530         int hme = 0;
 531         int ret;
 532 
 533         if (of_node_name_eq(dp->parent, "espdma") ||
 534             of_node_name_eq(dp->parent, "dma"))
 535                 dma_node = dp->parent;
 536         else if (of_node_name_eq(dp, "SUNW,fas")) {
 537                 dma_node = op->dev.of_node;
 538                 hme = 1;
 539         }
 540         if (dma_node)
 541                 dma_of = of_find_device_by_node(dma_node);
 542         if (!dma_of)
 543                 return -ENODEV;
 544 
 545         ret = esp_sbus_probe_one(op, dma_of, hme);
 546         if (ret)
 547                 put_device(&dma_of->dev);
 548 
 549         return ret;
 550 }
 551 
 552 static int esp_sbus_remove(struct platform_device *op)
 553 {
 554         struct esp *esp = dev_get_drvdata(&op->dev);
 555         struct platform_device *dma_of = esp->dma;
 556         unsigned int irq = esp->host->irq;
 557         bool is_hme;
 558         u32 val;
 559 
 560         scsi_esp_unregister(esp);
 561 
 562         /* Disable interrupts.  */
 563         val = dma_read32(DMA_CSR);
 564         dma_write32(val & ~DMA_INT_ENAB, DMA_CSR);
 565 
 566         free_irq(irq, esp);
 567 
 568         is_hme = (esp->dmarev == dvmahme);
 569 
 570         dma_free_coherent(&op->dev, 16,
 571                           esp->command_block,
 572                           esp->command_block_dma);
 573         of_iounmap(&op->resource[(is_hme ? 1 : 0)], esp->regs,
 574                    SBUS_ESP_REG_SIZE);
 575         of_iounmap(&dma_of->resource[0], esp->dma_regs,
 576                    resource_size(&dma_of->resource[0]));
 577 
 578         scsi_host_put(esp->host);
 579 
 580         dev_set_drvdata(&op->dev, NULL);
 581 
 582         put_device(&dma_of->dev);
 583 
 584         return 0;
 585 }
 586 
 587 static const struct of_device_id esp_match[] = {
 588         {
 589                 .name = "SUNW,esp",
 590         },
 591         {
 592                 .name = "SUNW,fas",
 593         },
 594         {
 595                 .name = "esp",
 596         },
 597         {},
 598 };
 599 MODULE_DEVICE_TABLE(of, esp_match);
 600 
 601 static struct platform_driver esp_sbus_driver = {
 602         .driver = {
 603                 .name = "esp",
 604                 .of_match_table = esp_match,
 605         },
 606         .probe          = esp_sbus_probe,
 607         .remove         = esp_sbus_remove,
 608 };
 609 
 610 static int __init sunesp_init(void)
 611 {
 612         return platform_driver_register(&esp_sbus_driver);
 613 }
 614 
 615 static void __exit sunesp_exit(void)
 616 {
 617         platform_driver_unregister(&esp_sbus_driver);
 618 }
 619 
 620 MODULE_DESCRIPTION("Sun ESP SCSI driver");
 621 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
 622 MODULE_LICENSE("GPL");
 623 MODULE_VERSION(DRV_VERSION);
 624 
 625 module_init(sunesp_init);
 626 module_exit(sunesp_exit);

/* [<][>][^][v][top][bottom][index][help] */