Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame^] | 1 | /* |
| 2 | * Intel Haswell SST DSP driver |
| 3 | * |
| 4 | * Copyright (C) 2013, Intel Corporation. All rights reserved. |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public License version |
| 8 | * 2 as published by the Free Software Foundation. |
| 9 | * |
| 10 | * This program is distributed in the hope that it will be useful, |
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 13 | * GNU General Public License for more details. |
| 14 | * |
| 15 | */ |
| 16 | |
| 17 | #include <linux/delay.h> |
| 18 | #include <linux/fs.h> |
| 19 | #include <linux/slab.h> |
| 20 | #include <linux/device.h> |
| 21 | #include <linux/sched.h> |
| 22 | #include <linux/export.h> |
| 23 | #include <linux/interrupt.h> |
| 24 | #include <linux/module.h> |
| 25 | #include <linux/dma-mapping.h> |
| 26 | #include <linux/platform_device.h> |
| 27 | #include <linux/pci.h> |
| 28 | #include <linux/firmware.h> |
| 29 | #include <linux/pm_runtime.h> |
| 30 | |
| 31 | #include "../common/sst-dsp.h" |
| 32 | #include "../common/sst-dsp-priv.h" |
| 33 | #include "../haswell/sst-haswell-ipc.h" |
| 34 | |
| 35 | #include <trace/events/hswadsp.h> |
| 36 | |
| 37 | #define SST_HSW_FW_SIGNATURE_SIZE 4 |
| 38 | #define SST_HSW_FW_SIGN "$SST" |
| 39 | #define SST_HSW_FW_LIB_SIGN "$LIB" |
| 40 | |
| 41 | #define SST_WPT_SHIM_OFFSET 0xFB000 |
| 42 | #define SST_LP_SHIM_OFFSET 0xE7000 |
| 43 | #define SST_WPT_IRAM_OFFSET 0xA0000 |
| 44 | #define SST_LP_IRAM_OFFSET 0x80000 |
| 45 | #define SST_WPT_DSP_DRAM_OFFSET 0x400000 |
| 46 | #define SST_WPT_DSP_IRAM_OFFSET 0x00000 |
| 47 | #define SST_LPT_DSP_DRAM_OFFSET 0x400000 |
| 48 | #define SST_LPT_DSP_IRAM_OFFSET 0x00000 |
| 49 | |
| 50 | #define SST_SHIM_PM_REG 0x84 |
| 51 | |
| 52 | #define SST_HSW_IRAM 1 |
| 53 | #define SST_HSW_DRAM 2 |
| 54 | #define SST_HSW_REGS 3 |
| 55 | |
| 56 | struct dma_block_info { |
| 57 | __le32 type; /* IRAM/DRAM */ |
| 58 | __le32 size; /* Bytes */ |
| 59 | __le32 ram_offset; /* Offset in I/DRAM */ |
| 60 | __le32 rsvd; /* Reserved field */ |
| 61 | } __attribute__((packed)); |
| 62 | |
| 63 | struct fw_module_info { |
| 64 | __le32 persistent_size; |
| 65 | __le32 scratch_size; |
| 66 | } __attribute__((packed)); |
| 67 | |
| 68 | struct fw_header { |
| 69 | unsigned char signature[SST_HSW_FW_SIGNATURE_SIZE]; /* FW signature */ |
| 70 | __le32 file_size; /* size of fw minus this header */ |
| 71 | __le32 modules; /* # of modules */ |
| 72 | __le32 file_format; /* version of header format */ |
| 73 | __le32 reserved[4]; |
| 74 | } __attribute__((packed)); |
| 75 | |
| 76 | struct fw_module_header { |
| 77 | unsigned char signature[SST_HSW_FW_SIGNATURE_SIZE]; /* module signature */ |
| 78 | __le32 mod_size; /* size of module */ |
| 79 | __le32 blocks; /* # of blocks */ |
| 80 | __le16 padding; |
| 81 | __le16 type; /* codec type, pp lib */ |
| 82 | __le32 entry_point; |
| 83 | struct fw_module_info info; |
| 84 | } __attribute__((packed)); |
| 85 | |
| 86 | static void hsw_free(struct sst_dsp *sst); |
| 87 | |
| 88 | static int hsw_parse_module(struct sst_dsp *dsp, struct sst_fw *fw, |
| 89 | struct fw_module_header *module) |
| 90 | { |
| 91 | struct dma_block_info *block; |
| 92 | struct sst_module *mod; |
| 93 | struct sst_module_template template; |
| 94 | int count, ret; |
| 95 | void __iomem *ram; |
| 96 | int type = le16_to_cpu(module->type); |
| 97 | int entry_point = le32_to_cpu(module->entry_point); |
| 98 | |
| 99 | /* TODO: allowed module types need to be configurable */ |
| 100 | if (type != SST_HSW_MODULE_BASE_FW && |
| 101 | type != SST_HSW_MODULE_PCM_SYSTEM && |
| 102 | type != SST_HSW_MODULE_PCM && |
| 103 | type != SST_HSW_MODULE_PCM_REFERENCE && |
| 104 | type != SST_HSW_MODULE_PCM_CAPTURE && |
| 105 | type != SST_HSW_MODULE_WAVES && |
| 106 | type != SST_HSW_MODULE_LPAL) |
| 107 | return 0; |
| 108 | |
| 109 | dev_dbg(dsp->dev, "new module sign 0x%s size 0x%x blocks 0x%x type 0x%x\n", |
| 110 | module->signature, module->mod_size, |
| 111 | module->blocks, type); |
| 112 | dev_dbg(dsp->dev, " entrypoint 0x%x\n", entry_point); |
| 113 | dev_dbg(dsp->dev, " persistent 0x%x scratch 0x%x\n", |
| 114 | module->info.persistent_size, module->info.scratch_size); |
| 115 | |
| 116 | memset(&template, 0, sizeof(template)); |
| 117 | template.id = type; |
| 118 | template.entry = entry_point - 4; |
| 119 | template.persistent_size = le32_to_cpu(module->info.persistent_size); |
| 120 | template.scratch_size = le32_to_cpu(module->info.scratch_size); |
| 121 | |
| 122 | mod = sst_module_new(fw, &template, NULL); |
| 123 | if (mod == NULL) |
| 124 | return -ENOMEM; |
| 125 | |
| 126 | block = (void *)module + sizeof(*module); |
| 127 | |
| 128 | for (count = 0; count < le32_to_cpu(module->blocks); count++) { |
| 129 | |
| 130 | if (le32_to_cpu(block->size) <= 0) { |
| 131 | dev_err(dsp->dev, |
| 132 | "error: block %d size invalid\n", count); |
| 133 | sst_module_free(mod); |
| 134 | return -EINVAL; |
| 135 | } |
| 136 | |
| 137 | switch (le32_to_cpu(block->type)) { |
| 138 | case SST_HSW_IRAM: |
| 139 | ram = dsp->addr.lpe; |
| 140 | mod->offset = le32_to_cpu(block->ram_offset) + |
| 141 | dsp->addr.iram_offset; |
| 142 | mod->type = SST_MEM_IRAM; |
| 143 | break; |
| 144 | case SST_HSW_DRAM: |
| 145 | case SST_HSW_REGS: |
| 146 | ram = dsp->addr.lpe; |
| 147 | mod->offset = le32_to_cpu(block->ram_offset); |
| 148 | mod->type = SST_MEM_DRAM; |
| 149 | break; |
| 150 | default: |
| 151 | dev_err(dsp->dev, "error: bad type 0x%x for block 0x%x\n", |
| 152 | block->type, count); |
| 153 | sst_module_free(mod); |
| 154 | return -EINVAL; |
| 155 | } |
| 156 | |
| 157 | mod->size = le32_to_cpu(block->size); |
| 158 | mod->data = (void *)block + sizeof(*block); |
| 159 | mod->data_offset = mod->data - fw->dma_buf; |
| 160 | |
| 161 | dev_dbg(dsp->dev, "module block %d type 0x%x " |
| 162 | "size 0x%x ==> ram %p offset 0x%x\n", |
| 163 | count, mod->type, block->size, ram, |
| 164 | block->ram_offset); |
| 165 | |
| 166 | ret = sst_module_alloc_blocks(mod); |
| 167 | if (ret < 0) { |
| 168 | dev_err(dsp->dev, "error: could not allocate blocks for module %d\n", |
| 169 | count); |
| 170 | sst_module_free(mod); |
| 171 | return ret; |
| 172 | } |
| 173 | |
| 174 | block = (void *)block + sizeof(*block) + |
| 175 | le32_to_cpu(block->size); |
| 176 | } |
| 177 | mod->state = SST_MODULE_STATE_LOADED; |
| 178 | |
| 179 | return 0; |
| 180 | } |
| 181 | |
| 182 | static int hsw_parse_fw_image(struct sst_fw *sst_fw) |
| 183 | { |
| 184 | struct fw_header *header; |
| 185 | struct fw_module_header *module; |
| 186 | struct sst_dsp *dsp = sst_fw->dsp; |
| 187 | int ret, count; |
| 188 | |
| 189 | /* Read the header information from the data pointer */ |
| 190 | header = (struct fw_header *)sst_fw->dma_buf; |
| 191 | |
| 192 | /* verify FW */ |
| 193 | if ((strncmp(header->signature, SST_HSW_FW_SIGN, 4) != 0) || |
| 194 | (sst_fw->size != |
| 195 | le32_to_cpu(header->file_size) + sizeof(*header))) { |
| 196 | dev_err(dsp->dev, "error: invalid fw sign/filesize mismatch\n"); |
| 197 | return -EINVAL; |
| 198 | } |
| 199 | |
| 200 | dev_dbg(dsp->dev, "header size=0x%x modules=0x%x fmt=0x%x size=%zu\n", |
| 201 | header->file_size, header->modules, |
| 202 | header->file_format, sizeof(*header)); |
| 203 | |
| 204 | /* parse each module */ |
| 205 | module = (void *)sst_fw->dma_buf + sizeof(*header); |
| 206 | for (count = 0; count < le32_to_cpu(header->modules); count++) { |
| 207 | |
| 208 | /* module */ |
| 209 | ret = hsw_parse_module(dsp, sst_fw, module); |
| 210 | if (ret < 0) { |
| 211 | dev_err(dsp->dev, "error: invalid module %d\n", count); |
| 212 | return ret; |
| 213 | } |
| 214 | module = (void *)module + sizeof(*module) + |
| 215 | le32_to_cpu(module->mod_size); |
| 216 | } |
| 217 | |
| 218 | return 0; |
| 219 | } |
| 220 | |
| 221 | static irqreturn_t hsw_irq(int irq, void *context) |
| 222 | { |
| 223 | struct sst_dsp *sst = (struct sst_dsp *) context; |
| 224 | u32 isr; |
| 225 | int ret = IRQ_NONE; |
| 226 | |
| 227 | spin_lock(&sst->spinlock); |
| 228 | |
| 229 | /* Interrupt arrived, check src */ |
| 230 | isr = sst_dsp_shim_read_unlocked(sst, SST_ISRX); |
| 231 | if (isr & SST_ISRX_DONE) { |
| 232 | trace_sst_irq_done(isr, |
| 233 | sst_dsp_shim_read_unlocked(sst, SST_IMRX)); |
| 234 | |
| 235 | /* Mask Done interrupt before return */ |
| 236 | sst_dsp_shim_update_bits_unlocked(sst, SST_IMRX, |
| 237 | SST_IMRX_DONE, SST_IMRX_DONE); |
| 238 | ret = IRQ_WAKE_THREAD; |
| 239 | } |
| 240 | |
| 241 | if (isr & SST_ISRX_BUSY) { |
| 242 | trace_sst_irq_busy(isr, |
| 243 | sst_dsp_shim_read_unlocked(sst, SST_IMRX)); |
| 244 | |
| 245 | /* Mask Busy interrupt before return */ |
| 246 | sst_dsp_shim_update_bits_unlocked(sst, SST_IMRX, |
| 247 | SST_IMRX_BUSY, SST_IMRX_BUSY); |
| 248 | ret = IRQ_WAKE_THREAD; |
| 249 | } |
| 250 | |
| 251 | spin_unlock(&sst->spinlock); |
| 252 | return ret; |
| 253 | } |
| 254 | |
| 255 | static void hsw_set_dsp_D3(struct sst_dsp *sst) |
| 256 | { |
| 257 | u32 val; |
| 258 | u32 reg; |
| 259 | |
| 260 | /* Disable core clock gating (VDRTCTL2.DCLCGE = 0) */ |
| 261 | reg = readl(sst->addr.pci_cfg + SST_VDRTCTL2); |
| 262 | reg &= ~(SST_VDRTCL2_DCLCGE | SST_VDRTCL2_DTCGE); |
| 263 | writel(reg, sst->addr.pci_cfg + SST_VDRTCTL2); |
| 264 | |
| 265 | /* enable power gating and switch off DRAM & IRAM blocks */ |
| 266 | val = readl(sst->addr.pci_cfg + SST_VDRTCTL0); |
| 267 | val |= SST_VDRTCL0_DSRAMPGE_MASK | |
| 268 | SST_VDRTCL0_ISRAMPGE_MASK; |
| 269 | val &= ~(SST_VDRTCL0_D3PGD | SST_VDRTCL0_D3SRAMPGD); |
| 270 | writel(val, sst->addr.pci_cfg + SST_VDRTCTL0); |
| 271 | |
| 272 | /* switch off audio PLL */ |
| 273 | val = readl(sst->addr.pci_cfg + SST_VDRTCTL2); |
| 274 | val |= SST_VDRTCL2_APLLSE_MASK; |
| 275 | writel(val, sst->addr.pci_cfg + SST_VDRTCTL2); |
| 276 | |
| 277 | /* disable MCLK(clkctl.smos = 0) */ |
| 278 | sst_dsp_shim_update_bits_unlocked(sst, SST_CLKCTL, |
| 279 | SST_CLKCTL_MASK, 0); |
| 280 | |
| 281 | /* Set D3 state, delay 50 us */ |
| 282 | val = readl(sst->addr.pci_cfg + SST_PMCS); |
| 283 | val |= SST_PMCS_PS_MASK; |
| 284 | writel(val, sst->addr.pci_cfg + SST_PMCS); |
| 285 | udelay(50); |
| 286 | |
| 287 | /* Enable core clock gating (VDRTCTL2.DCLCGE = 1), delay 50 us */ |
| 288 | reg = readl(sst->addr.pci_cfg + SST_VDRTCTL2); |
| 289 | reg |= SST_VDRTCL2_DCLCGE | SST_VDRTCL2_DTCGE; |
| 290 | writel(reg, sst->addr.pci_cfg + SST_VDRTCTL2); |
| 291 | |
| 292 | udelay(50); |
| 293 | |
| 294 | } |
| 295 | |
| 296 | static void hsw_reset(struct sst_dsp *sst) |
| 297 | { |
| 298 | /* put DSP into reset and stall */ |
| 299 | sst_dsp_shim_update_bits_unlocked(sst, SST_CSR, |
| 300 | SST_CSR_RST | SST_CSR_STALL, |
| 301 | SST_CSR_RST | SST_CSR_STALL); |
| 302 | |
| 303 | /* keep in reset for 10ms */ |
| 304 | mdelay(10); |
| 305 | |
| 306 | /* take DSP out of reset and keep stalled for FW loading */ |
| 307 | sst_dsp_shim_update_bits_unlocked(sst, SST_CSR, |
| 308 | SST_CSR_RST | SST_CSR_STALL, SST_CSR_STALL); |
| 309 | } |
| 310 | |
| 311 | static int hsw_set_dsp_D0(struct sst_dsp *sst) |
| 312 | { |
| 313 | int tries = 10; |
| 314 | u32 reg, fw_dump_bit; |
| 315 | |
| 316 | /* Disable core clock gating (VDRTCTL2.DCLCGE = 0) */ |
| 317 | reg = readl(sst->addr.pci_cfg + SST_VDRTCTL2); |
| 318 | reg &= ~(SST_VDRTCL2_DCLCGE | SST_VDRTCL2_DTCGE); |
| 319 | writel(reg, sst->addr.pci_cfg + SST_VDRTCTL2); |
| 320 | |
| 321 | /* Disable D3PG (VDRTCTL0.D3PGD = 1) */ |
| 322 | reg = readl(sst->addr.pci_cfg + SST_VDRTCTL0); |
| 323 | reg |= SST_VDRTCL0_D3PGD; |
| 324 | writel(reg, sst->addr.pci_cfg + SST_VDRTCTL0); |
| 325 | |
| 326 | /* Set D0 state */ |
| 327 | reg = readl(sst->addr.pci_cfg + SST_PMCS); |
| 328 | reg &= ~SST_PMCS_PS_MASK; |
| 329 | writel(reg, sst->addr.pci_cfg + SST_PMCS); |
| 330 | |
| 331 | /* check that ADSP shim is enabled */ |
| 332 | while (tries--) { |
| 333 | reg = readl(sst->addr.pci_cfg + SST_PMCS) & SST_PMCS_PS_MASK; |
| 334 | if (reg == 0) |
| 335 | goto finish; |
| 336 | |
| 337 | msleep(1); |
| 338 | } |
| 339 | |
| 340 | return -ENODEV; |
| 341 | |
| 342 | finish: |
| 343 | /* select SSP1 19.2MHz base clock, SSP clock 0, turn off Low Power Clock */ |
| 344 | sst_dsp_shim_update_bits_unlocked(sst, SST_CSR, |
| 345 | SST_CSR_S1IOCS | SST_CSR_SBCS1 | SST_CSR_LPCS, 0x0); |
| 346 | |
| 347 | /* stall DSP core, set clk to 192/96Mhz */ |
| 348 | sst_dsp_shim_update_bits_unlocked(sst, |
| 349 | SST_CSR, SST_CSR_STALL | SST_CSR_DCS_MASK, |
| 350 | SST_CSR_STALL | SST_CSR_DCS(4)); |
| 351 | |
| 352 | /* Set 24MHz MCLK, prevent local clock gating, enable SSP0 clock */ |
| 353 | sst_dsp_shim_update_bits_unlocked(sst, SST_CLKCTL, |
| 354 | SST_CLKCTL_MASK | SST_CLKCTL_DCPLCG | SST_CLKCTL_SCOE0, |
| 355 | SST_CLKCTL_MASK | SST_CLKCTL_DCPLCG | SST_CLKCTL_SCOE0); |
| 356 | |
| 357 | /* Stall and reset core, set CSR */ |
| 358 | hsw_reset(sst); |
| 359 | |
| 360 | /* Enable core clock gating (VDRTCTL2.DCLCGE = 1), delay 50 us */ |
| 361 | reg = readl(sst->addr.pci_cfg + SST_VDRTCTL2); |
| 362 | reg |= SST_VDRTCL2_DCLCGE | SST_VDRTCL2_DTCGE; |
| 363 | writel(reg, sst->addr.pci_cfg + SST_VDRTCTL2); |
| 364 | |
| 365 | udelay(50); |
| 366 | |
| 367 | /* switch on audio PLL */ |
| 368 | reg = readl(sst->addr.pci_cfg + SST_VDRTCTL2); |
| 369 | reg &= ~SST_VDRTCL2_APLLSE_MASK; |
| 370 | writel(reg, sst->addr.pci_cfg + SST_VDRTCTL2); |
| 371 | |
| 372 | /* set default power gating control, enable power gating control for all blocks. that is, |
| 373 | can't be accessed, please enable each block before accessing. */ |
| 374 | reg = readl(sst->addr.pci_cfg + SST_VDRTCTL0); |
| 375 | reg |= SST_VDRTCL0_DSRAMPGE_MASK | SST_VDRTCL0_ISRAMPGE_MASK; |
| 376 | /* for D0, always enable the block(DSRAM[0]) used for FW dump */ |
| 377 | fw_dump_bit = 1 << SST_VDRTCL0_DSRAMPGE_SHIFT; |
| 378 | writel(reg & ~fw_dump_bit, sst->addr.pci_cfg + SST_VDRTCTL0); |
| 379 | |
| 380 | |
| 381 | /* disable DMA finish function for SSP0 & SSP1 */ |
| 382 | sst_dsp_shim_update_bits_unlocked(sst, SST_CSR2, SST_CSR2_SDFD_SSP1, |
| 383 | SST_CSR2_SDFD_SSP1); |
| 384 | |
| 385 | /* set on-demond mode on engine 0,1 for all channels */ |
| 386 | sst_dsp_shim_update_bits(sst, SST_HMDC, |
| 387 | SST_HMDC_HDDA_E0_ALLCH | SST_HMDC_HDDA_E1_ALLCH, |
| 388 | SST_HMDC_HDDA_E0_ALLCH | SST_HMDC_HDDA_E1_ALLCH); |
| 389 | |
| 390 | /* Enable Interrupt from both sides */ |
| 391 | sst_dsp_shim_update_bits(sst, SST_IMRX, (SST_IMRX_BUSY | SST_IMRX_DONE), |
| 392 | 0x0); |
| 393 | sst_dsp_shim_update_bits(sst, SST_IMRD, (SST_IMRD_DONE | SST_IMRD_BUSY | |
| 394 | SST_IMRD_SSP0 | SST_IMRD_DMAC), 0x0); |
| 395 | |
| 396 | /* clear IPC registers */ |
| 397 | sst_dsp_shim_write(sst, SST_IPCX, 0x0); |
| 398 | sst_dsp_shim_write(sst, SST_IPCD, 0x0); |
| 399 | sst_dsp_shim_write(sst, 0x80, 0x6); |
| 400 | sst_dsp_shim_write(sst, 0xe0, 0x300a); |
| 401 | |
| 402 | return 0; |
| 403 | } |
| 404 | |
| 405 | static void hsw_boot(struct sst_dsp *sst) |
| 406 | { |
| 407 | /* set oportunistic mode on engine 0,1 for all channels */ |
| 408 | sst_dsp_shim_update_bits(sst, SST_HMDC, |
| 409 | SST_HMDC_HDDA_E0_ALLCH | SST_HMDC_HDDA_E1_ALLCH, 0); |
| 410 | |
| 411 | /* set DSP to RUN */ |
| 412 | sst_dsp_shim_update_bits_unlocked(sst, SST_CSR, SST_CSR_STALL, 0x0); |
| 413 | } |
| 414 | |
| 415 | static void hsw_stall(struct sst_dsp *sst) |
| 416 | { |
| 417 | /* stall DSP */ |
| 418 | sst_dsp_shim_update_bits(sst, SST_CSR, |
| 419 | SST_CSR_24MHZ_LPCS | SST_CSR_STALL, |
| 420 | SST_CSR_STALL | SST_CSR_24MHZ_LPCS); |
| 421 | } |
| 422 | |
| 423 | static void hsw_sleep(struct sst_dsp *sst) |
| 424 | { |
| 425 | dev_dbg(sst->dev, "HSW_PM dsp runtime suspend\n"); |
| 426 | |
| 427 | /* put DSP into reset and stall */ |
| 428 | sst_dsp_shim_update_bits(sst, SST_CSR, |
| 429 | SST_CSR_24MHZ_LPCS | SST_CSR_RST | SST_CSR_STALL, |
| 430 | SST_CSR_RST | SST_CSR_STALL | SST_CSR_24MHZ_LPCS); |
| 431 | |
| 432 | hsw_set_dsp_D3(sst); |
| 433 | dev_dbg(sst->dev, "HSW_PM dsp runtime suspend exit\n"); |
| 434 | } |
| 435 | |
| 436 | static int hsw_wake(struct sst_dsp *sst) |
| 437 | { |
| 438 | int ret; |
| 439 | |
| 440 | dev_dbg(sst->dev, "HSW_PM dsp runtime resume\n"); |
| 441 | |
| 442 | ret = hsw_set_dsp_D0(sst); |
| 443 | if (ret < 0) |
| 444 | return ret; |
| 445 | |
| 446 | dev_dbg(sst->dev, "HSW_PM dsp runtime resume exit\n"); |
| 447 | |
| 448 | return 0; |
| 449 | } |
| 450 | |
| 451 | struct sst_adsp_memregion { |
| 452 | u32 start; |
| 453 | u32 end; |
| 454 | int blocks; |
| 455 | enum sst_mem_type type; |
| 456 | }; |
| 457 | |
| 458 | /* lynx point ADSP mem regions */ |
| 459 | static const struct sst_adsp_memregion lp_region[] = { |
| 460 | {0x00000, 0x40000, 8, SST_MEM_DRAM}, /* D-SRAM0 - 8 * 32kB */ |
| 461 | {0x40000, 0x80000, 8, SST_MEM_DRAM}, /* D-SRAM1 - 8 * 32kB */ |
| 462 | {0x80000, 0xE0000, 12, SST_MEM_IRAM}, /* I-SRAM - 12 * 32kB */ |
| 463 | }; |
| 464 | |
| 465 | /* wild cat point ADSP mem regions */ |
| 466 | static const struct sst_adsp_memregion wpt_region[] = { |
| 467 | {0x00000, 0xA0000, 20, SST_MEM_DRAM}, /* D-SRAM0,D-SRAM1,D-SRAM2 - 20 * 32kB */ |
| 468 | {0xA0000, 0xF0000, 10, SST_MEM_IRAM}, /* I-SRAM - 10 * 32kB */ |
| 469 | }; |
| 470 | |
| 471 | static int hsw_acpi_resource_map(struct sst_dsp *sst, struct sst_pdata *pdata) |
| 472 | { |
| 473 | /* ADSP DRAM & IRAM */ |
| 474 | sst->addr.lpe_base = pdata->lpe_base; |
| 475 | sst->addr.lpe = ioremap(pdata->lpe_base, pdata->lpe_size); |
| 476 | if (!sst->addr.lpe) |
| 477 | return -ENODEV; |
| 478 | |
| 479 | /* ADSP PCI MMIO config space */ |
| 480 | sst->addr.pci_cfg = ioremap(pdata->pcicfg_base, pdata->pcicfg_size); |
| 481 | if (!sst->addr.pci_cfg) { |
| 482 | iounmap(sst->addr.lpe); |
| 483 | return -ENODEV; |
| 484 | } |
| 485 | |
| 486 | /* SST Shim */ |
| 487 | sst->addr.shim = sst->addr.lpe + sst->addr.shim_offset; |
| 488 | return 0; |
| 489 | } |
| 490 | |
| 491 | struct sst_sram_shift { |
| 492 | u32 dev_id; /* SST Device IDs */ |
| 493 | u32 iram_shift; |
| 494 | u32 dram_shift; |
| 495 | }; |
| 496 | |
| 497 | static const struct sst_sram_shift sram_shift[] = { |
| 498 | {SST_DEV_ID_LYNX_POINT, 6, 16}, /* lp */ |
| 499 | {SST_DEV_ID_WILDCAT_POINT, 2, 12}, /* wpt */ |
| 500 | }; |
| 501 | |
| 502 | static u32 hsw_block_get_bit(struct sst_mem_block *block) |
| 503 | { |
| 504 | u32 bit = 0, shift = 0, index; |
| 505 | struct sst_dsp *sst = block->dsp; |
| 506 | |
| 507 | for (index = 0; index < ARRAY_SIZE(sram_shift); index++) { |
| 508 | if (sram_shift[index].dev_id == sst->id) |
| 509 | break; |
| 510 | } |
| 511 | |
| 512 | if (index < ARRAY_SIZE(sram_shift)) { |
| 513 | switch (block->type) { |
| 514 | case SST_MEM_DRAM: |
| 515 | shift = sram_shift[index].dram_shift; |
| 516 | break; |
| 517 | case SST_MEM_IRAM: |
| 518 | shift = sram_shift[index].iram_shift; |
| 519 | break; |
| 520 | default: |
| 521 | shift = 0; |
| 522 | } |
| 523 | } else |
| 524 | shift = 0; |
| 525 | |
| 526 | bit = 1 << (block->index + shift); |
| 527 | |
| 528 | return bit; |
| 529 | } |
| 530 | |
| 531 | /*dummy read a SRAM block.*/ |
| 532 | static void sst_mem_block_dummy_read(struct sst_mem_block *block) |
| 533 | { |
| 534 | u32 size; |
| 535 | u8 tmp_buf[4]; |
| 536 | struct sst_dsp *sst = block->dsp; |
| 537 | |
| 538 | size = block->size > 4 ? 4 : block->size; |
| 539 | memcpy_fromio(tmp_buf, sst->addr.lpe + block->offset, size); |
| 540 | } |
| 541 | |
| 542 | /* enable 32kB memory block - locks held by caller */ |
| 543 | static int hsw_block_enable(struct sst_mem_block *block) |
| 544 | { |
| 545 | struct sst_dsp *sst = block->dsp; |
| 546 | u32 bit, val; |
| 547 | |
| 548 | if (block->users++ > 0) |
| 549 | return 0; |
| 550 | |
| 551 | dev_dbg(block->dsp->dev, " enabled block %d:%d at offset 0x%x\n", |
| 552 | block->type, block->index, block->offset); |
| 553 | |
| 554 | /* Disable core clock gating (VDRTCTL2.DCLCGE = 0) */ |
| 555 | val = readl(sst->addr.pci_cfg + SST_VDRTCTL2); |
| 556 | val &= ~SST_VDRTCL2_DCLCGE; |
| 557 | writel(val, sst->addr.pci_cfg + SST_VDRTCTL2); |
| 558 | |
| 559 | val = readl(sst->addr.pci_cfg + SST_VDRTCTL0); |
| 560 | bit = hsw_block_get_bit(block); |
| 561 | writel(val & ~bit, sst->addr.pci_cfg + SST_VDRTCTL0); |
| 562 | |
| 563 | /* wait 18 DSP clock ticks */ |
| 564 | udelay(10); |
| 565 | |
| 566 | /* Enable core clock gating (VDRTCTL2.DCLCGE = 1), delay 50 us */ |
| 567 | val = readl(sst->addr.pci_cfg + SST_VDRTCTL2); |
| 568 | val |= SST_VDRTCL2_DCLCGE; |
| 569 | writel(val, sst->addr.pci_cfg + SST_VDRTCTL2); |
| 570 | |
| 571 | udelay(50); |
| 572 | |
| 573 | /*add a dummy read before the SRAM block is written, otherwise the writing may miss bytes sometimes.*/ |
| 574 | sst_mem_block_dummy_read(block); |
| 575 | return 0; |
| 576 | } |
| 577 | |
| 578 | /* disable 32kB memory block - locks held by caller */ |
| 579 | static int hsw_block_disable(struct sst_mem_block *block) |
| 580 | { |
| 581 | struct sst_dsp *sst = block->dsp; |
| 582 | u32 bit, val; |
| 583 | |
| 584 | if (--block->users > 0) |
| 585 | return 0; |
| 586 | |
| 587 | dev_dbg(block->dsp->dev, " disabled block %d:%d at offset 0x%x\n", |
| 588 | block->type, block->index, block->offset); |
| 589 | |
| 590 | /* Disable core clock gating (VDRTCTL2.DCLCGE = 0) */ |
| 591 | val = readl(sst->addr.pci_cfg + SST_VDRTCTL2); |
| 592 | val &= ~SST_VDRTCL2_DCLCGE; |
| 593 | writel(val, sst->addr.pci_cfg + SST_VDRTCTL2); |
| 594 | |
| 595 | |
| 596 | val = readl(sst->addr.pci_cfg + SST_VDRTCTL0); |
| 597 | bit = hsw_block_get_bit(block); |
| 598 | /* don't disable DSRAM[0], keep it always enable for FW dump*/ |
| 599 | if (bit != (1 << SST_VDRTCL0_DSRAMPGE_SHIFT)) |
| 600 | writel(val | bit, sst->addr.pci_cfg + SST_VDRTCTL0); |
| 601 | |
| 602 | /* wait 18 DSP clock ticks */ |
| 603 | udelay(10); |
| 604 | |
| 605 | /* Enable core clock gating (VDRTCTL2.DCLCGE = 1), delay 50 us */ |
| 606 | val = readl(sst->addr.pci_cfg + SST_VDRTCTL2); |
| 607 | val |= SST_VDRTCL2_DCLCGE; |
| 608 | writel(val, sst->addr.pci_cfg + SST_VDRTCTL2); |
| 609 | |
| 610 | udelay(50); |
| 611 | |
| 612 | return 0; |
| 613 | } |
| 614 | |
| 615 | static const struct sst_block_ops sst_hsw_ops = { |
| 616 | .enable = hsw_block_enable, |
| 617 | .disable = hsw_block_disable, |
| 618 | }; |
| 619 | |
| 620 | static int hsw_init(struct sst_dsp *sst, struct sst_pdata *pdata) |
| 621 | { |
| 622 | const struct sst_adsp_memregion *region; |
| 623 | struct device *dev; |
| 624 | int ret = -ENODEV, i, j, region_count; |
| 625 | u32 offset, size, fw_dump_bit; |
| 626 | |
| 627 | dev = sst->dma_dev; |
| 628 | |
| 629 | switch (sst->id) { |
| 630 | case SST_DEV_ID_LYNX_POINT: |
| 631 | region = lp_region; |
| 632 | region_count = ARRAY_SIZE(lp_region); |
| 633 | sst->addr.iram_offset = SST_LP_IRAM_OFFSET; |
| 634 | sst->addr.dsp_iram_offset = SST_LPT_DSP_IRAM_OFFSET; |
| 635 | sst->addr.dsp_dram_offset = SST_LPT_DSP_DRAM_OFFSET; |
| 636 | sst->addr.shim_offset = SST_LP_SHIM_OFFSET; |
| 637 | break; |
| 638 | case SST_DEV_ID_WILDCAT_POINT: |
| 639 | region = wpt_region; |
| 640 | region_count = ARRAY_SIZE(wpt_region); |
| 641 | sst->addr.iram_offset = SST_WPT_IRAM_OFFSET; |
| 642 | sst->addr.dsp_iram_offset = SST_WPT_DSP_IRAM_OFFSET; |
| 643 | sst->addr.dsp_dram_offset = SST_WPT_DSP_DRAM_OFFSET; |
| 644 | sst->addr.shim_offset = SST_WPT_SHIM_OFFSET; |
| 645 | break; |
| 646 | default: |
| 647 | dev_err(dev, "error: failed to get mem resources\n"); |
| 648 | return ret; |
| 649 | } |
| 650 | |
| 651 | ret = hsw_acpi_resource_map(sst, pdata); |
| 652 | if (ret < 0) { |
| 653 | dev_err(dev, "error: failed to map resources\n"); |
| 654 | return ret; |
| 655 | } |
| 656 | |
| 657 | /* enable the DSP SHIM */ |
| 658 | ret = hsw_set_dsp_D0(sst); |
| 659 | if (ret < 0) { |
| 660 | dev_err(dev, "error: failed to set DSP D0 and reset SHIM\n"); |
| 661 | return ret; |
| 662 | } |
| 663 | |
| 664 | ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(31)); |
| 665 | if (ret) |
| 666 | return ret; |
| 667 | |
| 668 | |
| 669 | /* register DSP memory blocks - ideally we should get this from ACPI */ |
| 670 | for (i = 0; i < region_count; i++) { |
| 671 | offset = region[i].start; |
| 672 | size = (region[i].end - region[i].start) / region[i].blocks; |
| 673 | |
| 674 | /* register individual memory blocks */ |
| 675 | for (j = 0; j < region[i].blocks; j++) { |
| 676 | sst_mem_block_register(sst, offset, size, |
| 677 | region[i].type, &sst_hsw_ops, j, sst); |
| 678 | offset += size; |
| 679 | } |
| 680 | } |
| 681 | |
| 682 | /* always enable the block(DSRAM[0]) used for FW dump */ |
| 683 | fw_dump_bit = 1 << SST_VDRTCL0_DSRAMPGE_SHIFT; |
| 684 | /* set default power gating control, enable power gating control for all blocks. that is, |
| 685 | can't be accessed, please enable each block before accessing. */ |
| 686 | writel(0xffffffff & ~fw_dump_bit, sst->addr.pci_cfg + SST_VDRTCTL0); |
| 687 | |
| 688 | return 0; |
| 689 | } |
| 690 | |
| 691 | static void hsw_free(struct sst_dsp *sst) |
| 692 | { |
| 693 | sst_mem_block_unregister_all(sst); |
| 694 | iounmap(sst->addr.lpe); |
| 695 | iounmap(sst->addr.pci_cfg); |
| 696 | } |
| 697 | |
| 698 | struct sst_ops haswell_ops = { |
| 699 | .reset = hsw_reset, |
| 700 | .boot = hsw_boot, |
| 701 | .stall = hsw_stall, |
| 702 | .wake = hsw_wake, |
| 703 | .sleep = hsw_sleep, |
| 704 | .write = sst_shim32_write, |
| 705 | .read = sst_shim32_read, |
| 706 | .write64 = sst_shim32_write64, |
| 707 | .read64 = sst_shim32_read64, |
| 708 | .ram_read = sst_memcpy_fromio_32, |
| 709 | .ram_write = sst_memcpy_toio_32, |
| 710 | .irq_handler = hsw_irq, |
| 711 | .init = hsw_init, |
| 712 | .free = hsw_free, |
| 713 | .parse_fw = hsw_parse_fw_image, |
| 714 | }; |