1/* 2 * Intel Haswell SST DSP driver 3 * 4 * Copyright (C) 2013, Intel Corporation. All rights reserved. 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License version 8 * 2 as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 */ 16 17#include <linux/delay.h> 18#include <linux/fs.h> 19#include <linux/slab.h> 20#include <linux/device.h> 21#include <linux/sched.h> 22#include <linux/export.h> 23#include <linux/interrupt.h> 24#include <linux/module.h> 25#include <linux/dma-mapping.h> 26#include <linux/platform_device.h> 27#include <linux/pci.h> 28#include <linux/firmware.h> 29#include <linux/pm_runtime.h> 30 31#include "../common/sst-dsp.h" 32#include "../common/sst-dsp-priv.h" 33#include "../haswell/sst-haswell-ipc.h" 34 35#include <trace/events/hswadsp.h> 36 37#define SST_HSW_FW_SIGNATURE_SIZE 4 38#define SST_HSW_FW_SIGN "$SST" 39#define SST_HSW_FW_LIB_SIGN "$LIB" 40 41#define SST_WPT_SHIM_OFFSET 0xFB000 42#define SST_LP_SHIM_OFFSET 0xE7000 43#define SST_WPT_IRAM_OFFSET 0xA0000 44#define SST_LP_IRAM_OFFSET 0x80000 45#define SST_WPT_DSP_DRAM_OFFSET 0x400000 46#define SST_WPT_DSP_IRAM_OFFSET 0x00000 47#define SST_LPT_DSP_DRAM_OFFSET 0x400000 48#define SST_LPT_DSP_IRAM_OFFSET 0x00000 49 50#define SST_SHIM_PM_REG 0x84 51 52#define SST_HSW_IRAM 1 53#define SST_HSW_DRAM 2 54#define SST_HSW_REGS 3 55 56struct dma_block_info { 57 __le32 type; /* IRAM/DRAM */ 58 __le32 size; /* Bytes */ 59 __le32 ram_offset; /* Offset in I/DRAM */ 60 __le32 rsvd; /* Reserved field */ 61} __attribute__((packed)); 62 63struct fw_module_info { 64 __le32 persistent_size; 65 __le32 scratch_size; 66} __attribute__((packed)); 67 68struct fw_header { 69 unsigned char signature[SST_HSW_FW_SIGNATURE_SIZE]; /* FW signature */ 70 __le32 file_size; /* size of fw minus this header */ 71 __le32 modules; /* # of modules */ 72 __le32 file_format; /* version of header format */ 73 __le32 reserved[4]; 74} __attribute__((packed)); 75 76struct fw_module_header { 77 unsigned char signature[SST_HSW_FW_SIGNATURE_SIZE]; /* module signature */ 78 __le32 mod_size; /* size of module */ 79 __le32 blocks; /* # of blocks */ 80 __le16 padding; 81 __le16 type; /* codec type, pp lib */ 82 __le32 entry_point; 83 struct fw_module_info info; 84} __attribute__((packed)); 85 86static void hsw_free(struct sst_dsp *sst); 87 88static int hsw_parse_module(struct sst_dsp *dsp, struct sst_fw *fw, 89 struct fw_module_header *module) 90{ 91 struct dma_block_info *block; 92 struct sst_module *mod; 93 struct sst_module_template template; 94 int count, ret; 95 void __iomem *ram; 96 97 /* TODO: allowed module types need to be configurable */ 98 if (module->type != SST_HSW_MODULE_BASE_FW 99 && module->type != SST_HSW_MODULE_PCM_SYSTEM 100 && module->type != SST_HSW_MODULE_PCM 101 && module->type != SST_HSW_MODULE_PCM_REFERENCE 102 && module->type != SST_HSW_MODULE_PCM_CAPTURE 103 && module->type != SST_HSW_MODULE_WAVES 104 && module->type != SST_HSW_MODULE_LPAL) 105 return 0; 106 107 dev_dbg(dsp->dev, "new module sign 0x%s size 0x%x blocks 0x%x type 0x%x\n", 108 module->signature, module->mod_size, 109 module->blocks, module->type); 110 dev_dbg(dsp->dev, " entrypoint 0x%x\n", module->entry_point); 111 dev_dbg(dsp->dev, " persistent 0x%x scratch 0x%x\n", 112 module->info.persistent_size, module->info.scratch_size); 113 114 memset(&template, 0, sizeof(template)); 115 template.id = module->type; 116 template.entry = module->entry_point - 4; 117 template.persistent_size = module->info.persistent_size; 118 template.scratch_size = module->info.scratch_size; 119 120 mod = sst_module_new(fw, &template, NULL); 121 if (mod == NULL) 122 return -ENOMEM; 123 124 block = (void *)module + sizeof(*module); 125 126 for (count = 0; count < module->blocks; count++) { 127 128 if (block->size <= 0) { 129 dev_err(dsp->dev, 130 "error: block %d size invalid\n", count); 131 sst_module_free(mod); 132 return -EINVAL; 133 } 134 135 switch (block->type) { 136 case SST_HSW_IRAM: 137 ram = dsp->addr.lpe; 138 mod->offset = 139 block->ram_offset + dsp->addr.iram_offset; 140 mod->type = SST_MEM_IRAM; 141 break; 142 case SST_HSW_DRAM: 143 case SST_HSW_REGS: 144 ram = dsp->addr.lpe; 145 mod->offset = block->ram_offset; 146 mod->type = SST_MEM_DRAM; 147 break; 148 default: 149 dev_err(dsp->dev, "error: bad type 0x%x for block 0x%x\n", 150 block->type, count); 151 sst_module_free(mod); 152 return -EINVAL; 153 } 154 155 mod->size = block->size; 156 mod->data = (void *)block + sizeof(*block); 157 mod->data_offset = mod->data - fw->dma_buf; 158 159 dev_dbg(dsp->dev, "module block %d type 0x%x " 160 "size 0x%x ==> ram %p offset 0x%x\n", 161 count, mod->type, block->size, ram, 162 block->ram_offset); 163 164 ret = sst_module_alloc_blocks(mod); 165 if (ret < 0) { 166 dev_err(dsp->dev, "error: could not allocate blocks for module %d\n", 167 count); 168 sst_module_free(mod); 169 return ret; 170 } 171 172 block = (void *)block + sizeof(*block) + block->size; 173 } 174 mod->state = SST_MODULE_STATE_LOADED; 175 176 return 0; 177} 178 179static int hsw_parse_fw_image(struct sst_fw *sst_fw) 180{ 181 struct fw_header *header; 182 struct fw_module_header *module; 183 struct sst_dsp *dsp = sst_fw->dsp; 184 int ret, count; 185 186 /* Read the header information from the data pointer */ 187 header = (struct fw_header *)sst_fw->dma_buf; 188 189 /* verify FW */ 190 if ((strncmp(header->signature, SST_HSW_FW_SIGN, 4) != 0) || 191 (sst_fw->size != header->file_size + sizeof(*header))) { 192 dev_err(dsp->dev, "error: invalid fw sign/filesize mismatch\n"); 193 return -EINVAL; 194 } 195 196 dev_dbg(dsp->dev, "header size=0x%x modules=0x%x fmt=0x%x size=%zu\n", 197 header->file_size, header->modules, 198 header->file_format, sizeof(*header)); 199 200 /* parse each module */ 201 module = (void *)sst_fw->dma_buf + sizeof(*header); 202 for (count = 0; count < header->modules; count++) { 203 204 /* module */ 205 ret = hsw_parse_module(dsp, sst_fw, module); 206 if (ret < 0) { 207 dev_err(dsp->dev, "error: invalid module %d\n", count); 208 return ret; 209 } 210 module = (void *)module + sizeof(*module) + module->mod_size; 211 } 212 213 return 0; 214} 215 216static irqreturn_t hsw_irq(int irq, void *context) 217{ 218 struct sst_dsp *sst = (struct sst_dsp *) context; 219 u32 isr; 220 int ret = IRQ_NONE; 221 222 spin_lock(&sst->spinlock); 223 224 /* Interrupt arrived, check src */ 225 isr = sst_dsp_shim_read_unlocked(sst, SST_ISRX); 226 if (isr & SST_ISRX_DONE) { 227 trace_sst_irq_done(isr, 228 sst_dsp_shim_read_unlocked(sst, SST_IMRX)); 229 230 /* Mask Done interrupt before return */ 231 sst_dsp_shim_update_bits_unlocked(sst, SST_IMRX, 232 SST_IMRX_DONE, SST_IMRX_DONE); 233 ret = IRQ_WAKE_THREAD; 234 } 235 236 if (isr & SST_ISRX_BUSY) { 237 trace_sst_irq_busy(isr, 238 sst_dsp_shim_read_unlocked(sst, SST_IMRX)); 239 240 /* Mask Busy interrupt before return */ 241 sst_dsp_shim_update_bits_unlocked(sst, SST_IMRX, 242 SST_IMRX_BUSY, SST_IMRX_BUSY); 243 ret = IRQ_WAKE_THREAD; 244 } 245 246 spin_unlock(&sst->spinlock); 247 return ret; 248} 249 250static void hsw_set_dsp_D3(struct sst_dsp *sst) 251{ 252 u32 val; 253 u32 reg; 254 255 /* Disable core clock gating (VDRTCTL2.DCLCGE = 0) */ 256 reg = readl(sst->addr.pci_cfg + SST_VDRTCTL2); 257 reg &= ~(SST_VDRTCL2_DCLCGE | SST_VDRTCL2_DTCGE); 258 writel(reg, sst->addr.pci_cfg + SST_VDRTCTL2); 259 260 /* enable power gating and switch off DRAM & IRAM blocks */ 261 val = readl(sst->addr.pci_cfg + SST_VDRTCTL0); 262 val |= SST_VDRTCL0_DSRAMPGE_MASK | 263 SST_VDRTCL0_ISRAMPGE_MASK; 264 val &= ~(SST_VDRTCL0_D3PGD | SST_VDRTCL0_D3SRAMPGD); 265 writel(val, sst->addr.pci_cfg + SST_VDRTCTL0); 266 267 /* switch off audio PLL */ 268 val = readl(sst->addr.pci_cfg + SST_VDRTCTL2); 269 val |= SST_VDRTCL2_APLLSE_MASK; 270 writel(val, sst->addr.pci_cfg + SST_VDRTCTL2); 271 272 /* disable MCLK(clkctl.smos = 0) */ 273 sst_dsp_shim_update_bits_unlocked(sst, SST_CLKCTL, 274 SST_CLKCTL_MASK, 0); 275 276 /* Set D3 state, delay 50 us */ 277 val = readl(sst->addr.pci_cfg + SST_PMCS); 278 val |= SST_PMCS_PS_MASK; 279 writel(val, sst->addr.pci_cfg + SST_PMCS); 280 udelay(50); 281 282 /* Enable core clock gating (VDRTCTL2.DCLCGE = 1), delay 50 us */ 283 reg = readl(sst->addr.pci_cfg + SST_VDRTCTL2); 284 reg |= SST_VDRTCL2_DCLCGE | SST_VDRTCL2_DTCGE; 285 writel(reg, sst->addr.pci_cfg + SST_VDRTCTL2); 286 287 udelay(50); 288 289} 290 291static void hsw_reset(struct sst_dsp *sst) 292{ 293 /* put DSP into reset and stall */ 294 sst_dsp_shim_update_bits_unlocked(sst, SST_CSR, 295 SST_CSR_RST | SST_CSR_STALL, 296 SST_CSR_RST | SST_CSR_STALL); 297 298 /* keep in reset for 10ms */ 299 mdelay(10); 300 301 /* take DSP out of reset and keep stalled for FW loading */ 302 sst_dsp_shim_update_bits_unlocked(sst, SST_CSR, 303 SST_CSR_RST | SST_CSR_STALL, SST_CSR_STALL); 304} 305 306static int hsw_set_dsp_D0(struct sst_dsp *sst) 307{ 308 int tries = 10; 309 u32 reg, fw_dump_bit; 310 311 /* Disable core clock gating (VDRTCTL2.DCLCGE = 0) */ 312 reg = readl(sst->addr.pci_cfg + SST_VDRTCTL2); 313 reg &= ~(SST_VDRTCL2_DCLCGE | SST_VDRTCL2_DTCGE); 314 writel(reg, sst->addr.pci_cfg + SST_VDRTCTL2); 315 316 /* Disable D3PG (VDRTCTL0.D3PGD = 1) */ 317 reg = readl(sst->addr.pci_cfg + SST_VDRTCTL0); 318 reg |= SST_VDRTCL0_D3PGD; 319 writel(reg, sst->addr.pci_cfg + SST_VDRTCTL0); 320 321 /* Set D0 state */ 322 reg = readl(sst->addr.pci_cfg + SST_PMCS); 323 reg &= ~SST_PMCS_PS_MASK; 324 writel(reg, sst->addr.pci_cfg + SST_PMCS); 325 326 /* check that ADSP shim is enabled */ 327 while (tries--) { 328 reg = readl(sst->addr.pci_cfg + SST_PMCS) & SST_PMCS_PS_MASK; 329 if (reg == 0) 330 goto finish; 331 332 msleep(1); 333 } 334 335 return -ENODEV; 336 337finish: 338 /* select SSP1 19.2MHz base clock, SSP clock 0, turn off Low Power Clock */ 339 sst_dsp_shim_update_bits_unlocked(sst, SST_CSR, 340 SST_CSR_S1IOCS | SST_CSR_SBCS1 | SST_CSR_LPCS, 0x0); 341 342 /* stall DSP core, set clk to 192/96Mhz */ 343 sst_dsp_shim_update_bits_unlocked(sst, 344 SST_CSR, SST_CSR_STALL | SST_CSR_DCS_MASK, 345 SST_CSR_STALL | SST_CSR_DCS(4)); 346 347 /* Set 24MHz MCLK, prevent local clock gating, enable SSP0 clock */ 348 sst_dsp_shim_update_bits_unlocked(sst, SST_CLKCTL, 349 SST_CLKCTL_MASK | SST_CLKCTL_DCPLCG | SST_CLKCTL_SCOE0, 350 SST_CLKCTL_MASK | SST_CLKCTL_DCPLCG | SST_CLKCTL_SCOE0); 351 352 /* Stall and reset core, set CSR */ 353 hsw_reset(sst); 354 355 /* Enable core clock gating (VDRTCTL2.DCLCGE = 1), delay 50 us */ 356 reg = readl(sst->addr.pci_cfg + SST_VDRTCTL2); 357 reg |= SST_VDRTCL2_DCLCGE | SST_VDRTCL2_DTCGE; 358 writel(reg, sst->addr.pci_cfg + SST_VDRTCTL2); 359 360 udelay(50); 361 362 /* switch on audio PLL */ 363 reg = readl(sst->addr.pci_cfg + SST_VDRTCTL2); 364 reg &= ~SST_VDRTCL2_APLLSE_MASK; 365 writel(reg, sst->addr.pci_cfg + SST_VDRTCTL2); 366 367 /* set default power gating control, enable power gating control for all blocks. that is, 368 can't be accessed, please enable each block before accessing. */ 369 reg = readl(sst->addr.pci_cfg + SST_VDRTCTL0); 370 reg |= SST_VDRTCL0_DSRAMPGE_MASK | SST_VDRTCL0_ISRAMPGE_MASK; 371 /* for D0, always enable the block(DSRAM[0]) used for FW dump */ 372 fw_dump_bit = 1 << SST_VDRTCL0_DSRAMPGE_SHIFT; 373 writel(reg & ~fw_dump_bit, sst->addr.pci_cfg + SST_VDRTCTL0); 374 375 376 /* disable DMA finish function for SSP0 & SSP1 */ 377 sst_dsp_shim_update_bits_unlocked(sst, SST_CSR2, SST_CSR2_SDFD_SSP1, 378 SST_CSR2_SDFD_SSP1); 379 380 /* set on-demond mode on engine 0,1 for all channels */ 381 sst_dsp_shim_update_bits(sst, SST_HMDC, 382 SST_HMDC_HDDA_E0_ALLCH | SST_HMDC_HDDA_E1_ALLCH, 383 SST_HMDC_HDDA_E0_ALLCH | SST_HMDC_HDDA_E1_ALLCH); 384 385 /* Enable Interrupt from both sides */ 386 sst_dsp_shim_update_bits(sst, SST_IMRX, (SST_IMRX_BUSY | SST_IMRX_DONE), 387 0x0); 388 sst_dsp_shim_update_bits(sst, SST_IMRD, (SST_IMRD_DONE | SST_IMRD_BUSY | 389 SST_IMRD_SSP0 | SST_IMRD_DMAC), 0x0); 390 391 /* clear IPC registers */ 392 sst_dsp_shim_write(sst, SST_IPCX, 0x0); 393 sst_dsp_shim_write(sst, SST_IPCD, 0x0); 394 sst_dsp_shim_write(sst, 0x80, 0x6); 395 sst_dsp_shim_write(sst, 0xe0, 0x300a); 396 397 return 0; 398} 399 400static void hsw_boot(struct sst_dsp *sst) 401{ 402 /* set oportunistic mode on engine 0,1 for all channels */ 403 sst_dsp_shim_update_bits(sst, SST_HMDC, 404 SST_HMDC_HDDA_E0_ALLCH | SST_HMDC_HDDA_E1_ALLCH, 0); 405 406 /* set DSP to RUN */ 407 sst_dsp_shim_update_bits_unlocked(sst, SST_CSR, SST_CSR_STALL, 0x0); 408} 409 410static void hsw_stall(struct sst_dsp *sst) 411{ 412 /* stall DSP */ 413 sst_dsp_shim_update_bits(sst, SST_CSR, 414 SST_CSR_24MHZ_LPCS | SST_CSR_STALL, 415 SST_CSR_STALL | SST_CSR_24MHZ_LPCS); 416} 417 418static void hsw_sleep(struct sst_dsp *sst) 419{ 420 dev_dbg(sst->dev, "HSW_PM dsp runtime suspend\n"); 421 422 /* put DSP into reset and stall */ 423 sst_dsp_shim_update_bits(sst, SST_CSR, 424 SST_CSR_24MHZ_LPCS | SST_CSR_RST | SST_CSR_STALL, 425 SST_CSR_RST | SST_CSR_STALL | SST_CSR_24MHZ_LPCS); 426 427 hsw_set_dsp_D3(sst); 428 dev_dbg(sst->dev, "HSW_PM dsp runtime suspend exit\n"); 429} 430 431static int hsw_wake(struct sst_dsp *sst) 432{ 433 int ret; 434 435 dev_dbg(sst->dev, "HSW_PM dsp runtime resume\n"); 436 437 ret = hsw_set_dsp_D0(sst); 438 if (ret < 0) 439 return ret; 440 441 dev_dbg(sst->dev, "HSW_PM dsp runtime resume exit\n"); 442 443 return 0; 444} 445 446struct sst_adsp_memregion { 447 u32 start; 448 u32 end; 449 int blocks; 450 enum sst_mem_type type; 451}; 452 453/* lynx point ADSP mem regions */ 454static const struct sst_adsp_memregion lp_region[] = { 455 {0x00000, 0x40000, 8, SST_MEM_DRAM}, /* D-SRAM0 - 8 * 32kB */ 456 {0x40000, 0x80000, 8, SST_MEM_DRAM}, /* D-SRAM1 - 8 * 32kB */ 457 {0x80000, 0xE0000, 12, SST_MEM_IRAM}, /* I-SRAM - 12 * 32kB */ 458}; 459 460/* wild cat point ADSP mem regions */ 461static const struct sst_adsp_memregion wpt_region[] = { 462 {0x00000, 0xA0000, 20, SST_MEM_DRAM}, /* D-SRAM0,D-SRAM1,D-SRAM2 - 20 * 32kB */ 463 {0xA0000, 0xF0000, 10, SST_MEM_IRAM}, /* I-SRAM - 10 * 32kB */ 464}; 465 466static int hsw_acpi_resource_map(struct sst_dsp *sst, struct sst_pdata *pdata) 467{ 468 /* ADSP DRAM & IRAM */ 469 sst->addr.lpe_base = pdata->lpe_base; 470 sst->addr.lpe = ioremap(pdata->lpe_base, pdata->lpe_size); 471 if (!sst->addr.lpe) 472 return -ENODEV; 473 474 /* ADSP PCI MMIO config space */ 475 sst->addr.pci_cfg = ioremap(pdata->pcicfg_base, pdata->pcicfg_size); 476 if (!sst->addr.pci_cfg) { 477 iounmap(sst->addr.lpe); 478 return -ENODEV; 479 } 480 481 /* SST Shim */ 482 sst->addr.shim = sst->addr.lpe + sst->addr.shim_offset; 483 return 0; 484} 485 486struct sst_sram_shift { 487 u32 dev_id; /* SST Device IDs */ 488 u32 iram_shift; 489 u32 dram_shift; 490}; 491 492static const struct sst_sram_shift sram_shift[] = { 493 {SST_DEV_ID_LYNX_POINT, 6, 16}, /* lp */ 494 {SST_DEV_ID_WILDCAT_POINT, 2, 12}, /* wpt */ 495}; 496 497static u32 hsw_block_get_bit(struct sst_mem_block *block) 498{ 499 u32 bit = 0, shift = 0, index; 500 struct sst_dsp *sst = block->dsp; 501 502 for (index = 0; index < ARRAY_SIZE(sram_shift); index++) { 503 if (sram_shift[index].dev_id == sst->id) 504 break; 505 } 506 507 if (index < ARRAY_SIZE(sram_shift)) { 508 switch (block->type) { 509 case SST_MEM_DRAM: 510 shift = sram_shift[index].dram_shift; 511 break; 512 case SST_MEM_IRAM: 513 shift = sram_shift[index].iram_shift; 514 break; 515 default: 516 shift = 0; 517 } 518 } else 519 shift = 0; 520 521 bit = 1 << (block->index + shift); 522 523 return bit; 524} 525 526/*dummy read a SRAM block.*/ 527static void sst_mem_block_dummy_read(struct sst_mem_block *block) 528{ 529 u32 size; 530 u8 tmp_buf[4]; 531 struct sst_dsp *sst = block->dsp; 532 533 size = block->size > 4 ? 4 : block->size; 534 memcpy_fromio(tmp_buf, sst->addr.lpe + block->offset, size); 535} 536 537/* enable 32kB memory block - locks held by caller */ 538static int hsw_block_enable(struct sst_mem_block *block) 539{ 540 struct sst_dsp *sst = block->dsp; 541 u32 bit, val; 542 543 if (block->users++ > 0) 544 return 0; 545 546 dev_dbg(block->dsp->dev, " enabled block %d:%d at offset 0x%x\n", 547 block->type, block->index, block->offset); 548 549 /* Disable core clock gating (VDRTCTL2.DCLCGE = 0) */ 550 val = readl(sst->addr.pci_cfg + SST_VDRTCTL2); 551 val &= ~SST_VDRTCL2_DCLCGE; 552 writel(val, sst->addr.pci_cfg + SST_VDRTCTL2); 553 554 val = readl(sst->addr.pci_cfg + SST_VDRTCTL0); 555 bit = hsw_block_get_bit(block); 556 writel(val & ~bit, sst->addr.pci_cfg + SST_VDRTCTL0); 557 558 /* wait 18 DSP clock ticks */ 559 udelay(10); 560 561 /* Enable core clock gating (VDRTCTL2.DCLCGE = 1), delay 50 us */ 562 val = readl(sst->addr.pci_cfg + SST_VDRTCTL2); 563 val |= SST_VDRTCL2_DCLCGE; 564 writel(val, sst->addr.pci_cfg + SST_VDRTCTL2); 565 566 udelay(50); 567 568 /*add a dummy read before the SRAM block is written, otherwise the writing may miss bytes sometimes.*/ 569 sst_mem_block_dummy_read(block); 570 return 0; 571} 572 573/* disable 32kB memory block - locks held by caller */ 574static int hsw_block_disable(struct sst_mem_block *block) 575{ 576 struct sst_dsp *sst = block->dsp; 577 u32 bit, val; 578 579 if (--block->users > 0) 580 return 0; 581 582 dev_dbg(block->dsp->dev, " disabled block %d:%d at offset 0x%x\n", 583 block->type, block->index, block->offset); 584 585 /* Disable core clock gating (VDRTCTL2.DCLCGE = 0) */ 586 val = readl(sst->addr.pci_cfg + SST_VDRTCTL2); 587 val &= ~SST_VDRTCL2_DCLCGE; 588 writel(val, sst->addr.pci_cfg + SST_VDRTCTL2); 589 590 591 val = readl(sst->addr.pci_cfg + SST_VDRTCTL0); 592 bit = hsw_block_get_bit(block); 593 /* don't disable DSRAM[0], keep it always enable for FW dump*/ 594 if (bit != (1 << SST_VDRTCL0_DSRAMPGE_SHIFT)) 595 writel(val | bit, sst->addr.pci_cfg + SST_VDRTCTL0); 596 597 /* wait 18 DSP clock ticks */ 598 udelay(10); 599 600 /* Enable core clock gating (VDRTCTL2.DCLCGE = 1), delay 50 us */ 601 val = readl(sst->addr.pci_cfg + SST_VDRTCTL2); 602 val |= SST_VDRTCL2_DCLCGE; 603 writel(val, sst->addr.pci_cfg + SST_VDRTCTL2); 604 605 udelay(50); 606 607 return 0; 608} 609 610static struct sst_block_ops sst_hsw_ops = { 611 .enable = hsw_block_enable, 612 .disable = hsw_block_disable, 613}; 614 615static int hsw_init(struct sst_dsp *sst, struct sst_pdata *pdata) 616{ 617 const struct sst_adsp_memregion *region; 618 struct device *dev; 619 int ret = -ENODEV, i, j, region_count; 620 u32 offset, size, fw_dump_bit; 621 622 dev = sst->dma_dev; 623 624 switch (sst->id) { 625 case SST_DEV_ID_LYNX_POINT: 626 region = lp_region; 627 region_count = ARRAY_SIZE(lp_region); 628 sst->addr.iram_offset = SST_LP_IRAM_OFFSET; 629 sst->addr.dsp_iram_offset = SST_LPT_DSP_IRAM_OFFSET; 630 sst->addr.dsp_dram_offset = SST_LPT_DSP_DRAM_OFFSET; 631 sst->addr.shim_offset = SST_LP_SHIM_OFFSET; 632 break; 633 case SST_DEV_ID_WILDCAT_POINT: 634 region = wpt_region; 635 region_count = ARRAY_SIZE(wpt_region); 636 sst->addr.iram_offset = SST_WPT_IRAM_OFFSET; 637 sst->addr.dsp_iram_offset = SST_WPT_DSP_IRAM_OFFSET; 638 sst->addr.dsp_dram_offset = SST_WPT_DSP_DRAM_OFFSET; 639 sst->addr.shim_offset = SST_WPT_SHIM_OFFSET; 640 break; 641 default: 642 dev_err(dev, "error: failed to get mem resources\n"); 643 return ret; 644 } 645 646 ret = hsw_acpi_resource_map(sst, pdata); 647 if (ret < 0) { 648 dev_err(dev, "error: failed to map resources\n"); 649 return ret; 650 } 651 652 /* enable the DSP SHIM */ 653 ret = hsw_set_dsp_D0(sst); 654 if (ret < 0) { 655 dev_err(dev, "error: failed to set DSP D0 and reset SHIM\n"); 656 return ret; 657 } 658 659 ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(31)); 660 if (ret) 661 return ret; 662 663 664 /* register DSP memory blocks - ideally we should get this from ACPI */ 665 for (i = 0; i < region_count; i++) { 666 offset = region[i].start; 667 size = (region[i].end - region[i].start) / region[i].blocks; 668 669 /* register individual memory blocks */ 670 for (j = 0; j < region[i].blocks; j++) { 671 sst_mem_block_register(sst, offset, size, 672 region[i].type, &sst_hsw_ops, j, sst); 673 offset += size; 674 } 675 } 676 677 /* always enable the block(DSRAM[0]) used for FW dump */ 678 fw_dump_bit = 1 << SST_VDRTCL0_DSRAMPGE_SHIFT; 679 /* set default power gating control, enable power gating control for all blocks. that is, 680 can't be accessed, please enable each block before accessing. */ 681 writel(0xffffffff & ~fw_dump_bit, sst->addr.pci_cfg + SST_VDRTCTL0); 682 683 return 0; 684} 685 686static void hsw_free(struct sst_dsp *sst) 687{ 688 sst_mem_block_unregister_all(sst); 689 iounmap(sst->addr.lpe); 690 iounmap(sst->addr.pci_cfg); 691} 692 693struct sst_ops haswell_ops = { 694 .reset = hsw_reset, 695 .boot = hsw_boot, 696 .stall = hsw_stall, 697 .wake = hsw_wake, 698 .sleep = hsw_sleep, 699 .write = sst_shim32_write, 700 .read = sst_shim32_read, 701 .write64 = sst_shim32_write64, 702 .read64 = sst_shim32_read64, 703 .ram_read = sst_memcpy_fromio_32, 704 .ram_write = sst_memcpy_toio_32, 705 .irq_handler = hsw_irq, 706 .init = hsw_init, 707 .free = hsw_free, 708 .parse_fw = hsw_parse_fw_image, 709}; 710