1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * bxt-sst.c - DSP library functions for BXT platform
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2015-16 Intel Corp
6*4882a593Smuzhiyun * Author:Rafal Redzimski <rafal.f.redzimski@intel.com>
7*4882a593Smuzhiyun * Jeeja KP <jeeja.kp@intel.com>
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include <linux/module.h>
11*4882a593Smuzhiyun #include <linux/delay.h>
12*4882a593Smuzhiyun #include <linux/firmware.h>
13*4882a593Smuzhiyun #include <linux/device.h>
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #include "../common/sst-dsp.h"
16*4882a593Smuzhiyun #include "../common/sst-dsp-priv.h"
17*4882a593Smuzhiyun #include "skl.h"
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #define BXT_BASEFW_TIMEOUT 3000
20*4882a593Smuzhiyun #define BXT_ROM_INIT_TIMEOUT 70
21*4882a593Smuzhiyun #define BXT_IPC_PURGE_FW 0x01004000
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun #define BXT_ROM_INIT 0x5
24*4882a593Smuzhiyun #define BXT_ADSP_SRAM0_BASE 0x80000
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun /* Firmware status window */
27*4882a593Smuzhiyun #define BXT_ADSP_FW_STATUS BXT_ADSP_SRAM0_BASE
28*4882a593Smuzhiyun #define BXT_ADSP_ERROR_CODE (BXT_ADSP_FW_STATUS + 0x4)
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun #define BXT_ADSP_SRAM1_BASE 0xA0000
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun #define BXT_INSTANCE_ID 0
33*4882a593Smuzhiyun #define BXT_BASE_FW_MODULE_ID 0
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun #define BXT_ADSP_FW_BIN_HDR_OFFSET 0x2000
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun /* Delay before scheduling D0i3 entry */
38*4882a593Smuzhiyun #define BXT_D0I3_DELAY 5000
39*4882a593Smuzhiyun
bxt_get_errorcode(struct sst_dsp * ctx)40*4882a593Smuzhiyun static unsigned int bxt_get_errorcode(struct sst_dsp *ctx)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun return sst_dsp_shim_read(ctx, BXT_ADSP_ERROR_CODE);
43*4882a593Smuzhiyun }
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun static int
bxt_load_library(struct sst_dsp * ctx,struct skl_lib_info * linfo,int lib_count)46*4882a593Smuzhiyun bxt_load_library(struct sst_dsp *ctx, struct skl_lib_info *linfo, int lib_count)
47*4882a593Smuzhiyun {
48*4882a593Smuzhiyun struct snd_dma_buffer dmab;
49*4882a593Smuzhiyun struct skl_dev *skl = ctx->thread_context;
50*4882a593Smuzhiyun struct firmware stripped_fw;
51*4882a593Smuzhiyun int ret = 0, i, dma_id, stream_tag;
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun /* library indices start from 1 to N. 0 represents base FW */
54*4882a593Smuzhiyun for (i = 1; i < lib_count; i++) {
55*4882a593Smuzhiyun ret = skl_prepare_lib_load(skl, &skl->lib_info[i], &stripped_fw,
56*4882a593Smuzhiyun BXT_ADSP_FW_BIN_HDR_OFFSET, i);
57*4882a593Smuzhiyun if (ret < 0)
58*4882a593Smuzhiyun goto load_library_failed;
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun stream_tag = ctx->dsp_ops.prepare(ctx->dev, 0x40,
61*4882a593Smuzhiyun stripped_fw.size, &dmab);
62*4882a593Smuzhiyun if (stream_tag <= 0) {
63*4882a593Smuzhiyun dev_err(ctx->dev, "Lib prepare DMA err: %x\n",
64*4882a593Smuzhiyun stream_tag);
65*4882a593Smuzhiyun ret = stream_tag;
66*4882a593Smuzhiyun goto load_library_failed;
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun dma_id = stream_tag - 1;
70*4882a593Smuzhiyun memcpy(dmab.area, stripped_fw.data, stripped_fw.size);
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun ctx->dsp_ops.trigger(ctx->dev, true, stream_tag);
73*4882a593Smuzhiyun ret = skl_sst_ipc_load_library(&skl->ipc, dma_id, i, true);
74*4882a593Smuzhiyun if (ret < 0)
75*4882a593Smuzhiyun dev_err(ctx->dev, "IPC Load Lib for %s fail: %d\n",
76*4882a593Smuzhiyun linfo[i].name, ret);
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun ctx->dsp_ops.trigger(ctx->dev, false, stream_tag);
79*4882a593Smuzhiyun ctx->dsp_ops.cleanup(ctx->dev, &dmab, stream_tag);
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun return ret;
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun load_library_failed:
85*4882a593Smuzhiyun skl_release_library(linfo, lib_count);
86*4882a593Smuzhiyun return ret;
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun /*
90*4882a593Smuzhiyun * First boot sequence has some extra steps. Core 0 waits for power
91*4882a593Smuzhiyun * status on core 1, so power up core 1 also momentarily, keep it in
92*4882a593Smuzhiyun * reset/stall and then turn it off
93*4882a593Smuzhiyun */
sst_bxt_prepare_fw(struct sst_dsp * ctx,const void * fwdata,u32 fwsize)94*4882a593Smuzhiyun static int sst_bxt_prepare_fw(struct sst_dsp *ctx,
95*4882a593Smuzhiyun const void *fwdata, u32 fwsize)
96*4882a593Smuzhiyun {
97*4882a593Smuzhiyun int stream_tag, ret;
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun stream_tag = ctx->dsp_ops.prepare(ctx->dev, 0x40, fwsize, &ctx->dmab);
100*4882a593Smuzhiyun if (stream_tag <= 0) {
101*4882a593Smuzhiyun dev_err(ctx->dev, "Failed to prepare DMA FW loading err: %x\n",
102*4882a593Smuzhiyun stream_tag);
103*4882a593Smuzhiyun return stream_tag;
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun ctx->dsp_ops.stream_tag = stream_tag;
107*4882a593Smuzhiyun memcpy(ctx->dmab.area, fwdata, fwsize);
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun /* Step 1: Power up core 0 and core1 */
110*4882a593Smuzhiyun ret = skl_dsp_core_power_up(ctx, SKL_DSP_CORE0_MASK |
111*4882a593Smuzhiyun SKL_DSP_CORE_MASK(1));
112*4882a593Smuzhiyun if (ret < 0) {
113*4882a593Smuzhiyun dev_err(ctx->dev, "dsp core0/1 power up failed\n");
114*4882a593Smuzhiyun goto base_fw_load_failed;
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun /* Step 2: Purge FW request */
118*4882a593Smuzhiyun sst_dsp_shim_write(ctx, SKL_ADSP_REG_HIPCI, SKL_ADSP_REG_HIPCI_BUSY |
119*4882a593Smuzhiyun (BXT_IPC_PURGE_FW | ((stream_tag - 1) << 9)));
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun /* Step 3: Unset core0 reset state & unstall/run core0 */
122*4882a593Smuzhiyun ret = skl_dsp_start_core(ctx, SKL_DSP_CORE0_MASK);
123*4882a593Smuzhiyun if (ret < 0) {
124*4882a593Smuzhiyun dev_err(ctx->dev, "Start dsp core failed ret: %d\n", ret);
125*4882a593Smuzhiyun ret = -EIO;
126*4882a593Smuzhiyun goto base_fw_load_failed;
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun /* Step 4: Wait for DONE Bit */
130*4882a593Smuzhiyun ret = sst_dsp_register_poll(ctx, SKL_ADSP_REG_HIPCIE,
131*4882a593Smuzhiyun SKL_ADSP_REG_HIPCIE_DONE,
132*4882a593Smuzhiyun SKL_ADSP_REG_HIPCIE_DONE,
133*4882a593Smuzhiyun BXT_INIT_TIMEOUT, "HIPCIE Done");
134*4882a593Smuzhiyun if (ret < 0) {
135*4882a593Smuzhiyun dev_err(ctx->dev, "Timeout for Purge Request%d\n", ret);
136*4882a593Smuzhiyun goto base_fw_load_failed;
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun /* Step 5: power down core1 */
140*4882a593Smuzhiyun ret = skl_dsp_core_power_down(ctx, SKL_DSP_CORE_MASK(1));
141*4882a593Smuzhiyun if (ret < 0) {
142*4882a593Smuzhiyun dev_err(ctx->dev, "dsp core1 power down failed\n");
143*4882a593Smuzhiyun goto base_fw_load_failed;
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun /* Step 6: Enable Interrupt */
147*4882a593Smuzhiyun skl_ipc_int_enable(ctx);
148*4882a593Smuzhiyun skl_ipc_op_int_enable(ctx);
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun /* Step 7: Wait for ROM init */
151*4882a593Smuzhiyun ret = sst_dsp_register_poll(ctx, BXT_ADSP_FW_STATUS, SKL_FW_STS_MASK,
152*4882a593Smuzhiyun SKL_FW_INIT, BXT_ROM_INIT_TIMEOUT, "ROM Load");
153*4882a593Smuzhiyun if (ret < 0) {
154*4882a593Smuzhiyun dev_err(ctx->dev, "Timeout for ROM init, ret:%d\n", ret);
155*4882a593Smuzhiyun goto base_fw_load_failed;
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun return ret;
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun base_fw_load_failed:
161*4882a593Smuzhiyun ctx->dsp_ops.cleanup(ctx->dev, &ctx->dmab, stream_tag);
162*4882a593Smuzhiyun skl_dsp_core_power_down(ctx, SKL_DSP_CORE_MASK(1));
163*4882a593Smuzhiyun skl_dsp_disable_core(ctx, SKL_DSP_CORE0_MASK);
164*4882a593Smuzhiyun return ret;
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun
sst_transfer_fw_host_dma(struct sst_dsp * ctx)167*4882a593Smuzhiyun static int sst_transfer_fw_host_dma(struct sst_dsp *ctx)
168*4882a593Smuzhiyun {
169*4882a593Smuzhiyun int ret;
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun ctx->dsp_ops.trigger(ctx->dev, true, ctx->dsp_ops.stream_tag);
172*4882a593Smuzhiyun ret = sst_dsp_register_poll(ctx, BXT_ADSP_FW_STATUS, SKL_FW_STS_MASK,
173*4882a593Smuzhiyun BXT_ROM_INIT, BXT_BASEFW_TIMEOUT, "Firmware boot");
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun ctx->dsp_ops.trigger(ctx->dev, false, ctx->dsp_ops.stream_tag);
176*4882a593Smuzhiyun ctx->dsp_ops.cleanup(ctx->dev, &ctx->dmab, ctx->dsp_ops.stream_tag);
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun return ret;
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun
bxt_load_base_firmware(struct sst_dsp * ctx)181*4882a593Smuzhiyun static int bxt_load_base_firmware(struct sst_dsp *ctx)
182*4882a593Smuzhiyun {
183*4882a593Smuzhiyun struct firmware stripped_fw;
184*4882a593Smuzhiyun struct skl_dev *skl = ctx->thread_context;
185*4882a593Smuzhiyun int ret, i;
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun if (ctx->fw == NULL) {
188*4882a593Smuzhiyun ret = request_firmware(&ctx->fw, ctx->fw_name, ctx->dev);
189*4882a593Smuzhiyun if (ret < 0) {
190*4882a593Smuzhiyun dev_err(ctx->dev, "Request firmware failed %d\n", ret);
191*4882a593Smuzhiyun return ret;
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun /* prase uuids on first boot */
196*4882a593Smuzhiyun if (skl->is_first_boot) {
197*4882a593Smuzhiyun ret = snd_skl_parse_uuids(ctx, ctx->fw, BXT_ADSP_FW_BIN_HDR_OFFSET, 0);
198*4882a593Smuzhiyun if (ret < 0)
199*4882a593Smuzhiyun goto sst_load_base_firmware_failed;
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun stripped_fw.data = ctx->fw->data;
203*4882a593Smuzhiyun stripped_fw.size = ctx->fw->size;
204*4882a593Smuzhiyun skl_dsp_strip_extended_manifest(&stripped_fw);
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun for (i = 0; i < BXT_FW_ROM_INIT_RETRY; i++) {
208*4882a593Smuzhiyun ret = sst_bxt_prepare_fw(ctx, stripped_fw.data, stripped_fw.size);
209*4882a593Smuzhiyun if (ret == 0)
210*4882a593Smuzhiyun break;
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun if (ret < 0) {
214*4882a593Smuzhiyun dev_err(ctx->dev, "Error code=0x%x: FW status=0x%x\n",
215*4882a593Smuzhiyun sst_dsp_shim_read(ctx, BXT_ADSP_ERROR_CODE),
216*4882a593Smuzhiyun sst_dsp_shim_read(ctx, BXT_ADSP_FW_STATUS));
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun dev_err(ctx->dev, "Core En/ROM load fail:%d\n", ret);
219*4882a593Smuzhiyun goto sst_load_base_firmware_failed;
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun ret = sst_transfer_fw_host_dma(ctx);
223*4882a593Smuzhiyun if (ret < 0) {
224*4882a593Smuzhiyun dev_err(ctx->dev, "Transfer firmware failed %d\n", ret);
225*4882a593Smuzhiyun dev_info(ctx->dev, "Error code=0x%x: FW status=0x%x\n",
226*4882a593Smuzhiyun sst_dsp_shim_read(ctx, BXT_ADSP_ERROR_CODE),
227*4882a593Smuzhiyun sst_dsp_shim_read(ctx, BXT_ADSP_FW_STATUS));
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun skl_dsp_disable_core(ctx, SKL_DSP_CORE0_MASK);
230*4882a593Smuzhiyun } else {
231*4882a593Smuzhiyun dev_dbg(ctx->dev, "Firmware download successful\n");
232*4882a593Smuzhiyun ret = wait_event_timeout(skl->boot_wait, skl->boot_complete,
233*4882a593Smuzhiyun msecs_to_jiffies(SKL_IPC_BOOT_MSECS));
234*4882a593Smuzhiyun if (ret == 0) {
235*4882a593Smuzhiyun dev_err(ctx->dev, "DSP boot fail, FW Ready timeout\n");
236*4882a593Smuzhiyun skl_dsp_disable_core(ctx, SKL_DSP_CORE0_MASK);
237*4882a593Smuzhiyun ret = -EIO;
238*4882a593Smuzhiyun } else {
239*4882a593Smuzhiyun ret = 0;
240*4882a593Smuzhiyun skl->fw_loaded = true;
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun return ret;
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun sst_load_base_firmware_failed:
247*4882a593Smuzhiyun release_firmware(ctx->fw);
248*4882a593Smuzhiyun ctx->fw = NULL;
249*4882a593Smuzhiyun return ret;
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun /*
253*4882a593Smuzhiyun * Decide the D0i3 state that can be targeted based on the usecase
254*4882a593Smuzhiyun * ref counts and DSP state
255*4882a593Smuzhiyun *
256*4882a593Smuzhiyun * Decision Matrix: (X= dont care; state = target state)
257*4882a593Smuzhiyun *
258*4882a593Smuzhiyun * DSP state != SKL_DSP_RUNNING ; state = no d0i3
259*4882a593Smuzhiyun *
260*4882a593Smuzhiyun * DSP state == SKL_DSP_RUNNING , the following matrix applies
261*4882a593Smuzhiyun * non_d0i3 >0; streaming =X; non_streaming =X; state = no d0i3
262*4882a593Smuzhiyun * non_d0i3 =X; streaming =0; non_streaming =0; state = no d0i3
263*4882a593Smuzhiyun * non_d0i3 =0; streaming >0; non_streaming =X; state = streaming d0i3
264*4882a593Smuzhiyun * non_d0i3 =0; streaming =0; non_streaming =X; state = non-streaming d0i3
265*4882a593Smuzhiyun */
bxt_d0i3_target_state(struct sst_dsp * ctx)266*4882a593Smuzhiyun static int bxt_d0i3_target_state(struct sst_dsp *ctx)
267*4882a593Smuzhiyun {
268*4882a593Smuzhiyun struct skl_dev *skl = ctx->thread_context;
269*4882a593Smuzhiyun struct skl_d0i3_data *d0i3 = &skl->d0i3;
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun if (skl->cores.state[SKL_DSP_CORE0_ID] != SKL_DSP_RUNNING)
272*4882a593Smuzhiyun return SKL_DSP_D0I3_NONE;
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun if (d0i3->non_d0i3)
275*4882a593Smuzhiyun return SKL_DSP_D0I3_NONE;
276*4882a593Smuzhiyun else if (d0i3->streaming)
277*4882a593Smuzhiyun return SKL_DSP_D0I3_STREAMING;
278*4882a593Smuzhiyun else if (d0i3->non_streaming)
279*4882a593Smuzhiyun return SKL_DSP_D0I3_NON_STREAMING;
280*4882a593Smuzhiyun else
281*4882a593Smuzhiyun return SKL_DSP_D0I3_NONE;
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun
bxt_set_dsp_D0i3(struct work_struct * work)284*4882a593Smuzhiyun static void bxt_set_dsp_D0i3(struct work_struct *work)
285*4882a593Smuzhiyun {
286*4882a593Smuzhiyun int ret;
287*4882a593Smuzhiyun struct skl_ipc_d0ix_msg msg;
288*4882a593Smuzhiyun struct skl_dev *skl = container_of(work,
289*4882a593Smuzhiyun struct skl_dev, d0i3.work.work);
290*4882a593Smuzhiyun struct sst_dsp *ctx = skl->dsp;
291*4882a593Smuzhiyun struct skl_d0i3_data *d0i3 = &skl->d0i3;
292*4882a593Smuzhiyun int target_state;
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun dev_dbg(ctx->dev, "In %s:\n", __func__);
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun /* D0i3 entry allowed only if core 0 alone is running */
297*4882a593Smuzhiyun if (skl_dsp_get_enabled_cores(ctx) != SKL_DSP_CORE0_MASK) {
298*4882a593Smuzhiyun dev_warn(ctx->dev,
299*4882a593Smuzhiyun "D0i3 allowed when only core0 running:Exit\n");
300*4882a593Smuzhiyun return;
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun target_state = bxt_d0i3_target_state(ctx);
304*4882a593Smuzhiyun if (target_state == SKL_DSP_D0I3_NONE)
305*4882a593Smuzhiyun return;
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun msg.instance_id = 0;
308*4882a593Smuzhiyun msg.module_id = 0;
309*4882a593Smuzhiyun msg.wake = 1;
310*4882a593Smuzhiyun msg.streaming = 0;
311*4882a593Smuzhiyun if (target_state == SKL_DSP_D0I3_STREAMING)
312*4882a593Smuzhiyun msg.streaming = 1;
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun ret = skl_ipc_set_d0ix(&skl->ipc, &msg);
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun if (ret < 0) {
317*4882a593Smuzhiyun dev_err(ctx->dev, "Failed to set DSP to D0i3 state\n");
318*4882a593Smuzhiyun return;
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun /* Set Vendor specific register D0I3C.I3 to enable D0i3*/
322*4882a593Smuzhiyun if (skl->update_d0i3c)
323*4882a593Smuzhiyun skl->update_d0i3c(skl->dev, true);
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun d0i3->state = target_state;
326*4882a593Smuzhiyun skl->cores.state[SKL_DSP_CORE0_ID] = SKL_DSP_RUNNING_D0I3;
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun
bxt_schedule_dsp_D0i3(struct sst_dsp * ctx)329*4882a593Smuzhiyun static int bxt_schedule_dsp_D0i3(struct sst_dsp *ctx)
330*4882a593Smuzhiyun {
331*4882a593Smuzhiyun struct skl_dev *skl = ctx->thread_context;
332*4882a593Smuzhiyun struct skl_d0i3_data *d0i3 = &skl->d0i3;
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun /* Schedule D0i3 only if the usecase ref counts are appropriate */
335*4882a593Smuzhiyun if (bxt_d0i3_target_state(ctx) != SKL_DSP_D0I3_NONE) {
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun dev_dbg(ctx->dev, "%s: Schedule D0i3\n", __func__);
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun schedule_delayed_work(&d0i3->work,
340*4882a593Smuzhiyun msecs_to_jiffies(BXT_D0I3_DELAY));
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun return 0;
344*4882a593Smuzhiyun }
345*4882a593Smuzhiyun
bxt_set_dsp_D0i0(struct sst_dsp * ctx)346*4882a593Smuzhiyun static int bxt_set_dsp_D0i0(struct sst_dsp *ctx)
347*4882a593Smuzhiyun {
348*4882a593Smuzhiyun int ret;
349*4882a593Smuzhiyun struct skl_ipc_d0ix_msg msg;
350*4882a593Smuzhiyun struct skl_dev *skl = ctx->thread_context;
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun dev_dbg(ctx->dev, "In %s:\n", __func__);
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun /* First Cancel any pending attempt to put DSP to D0i3 */
355*4882a593Smuzhiyun cancel_delayed_work_sync(&skl->d0i3.work);
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun /* If DSP is currently in D0i3, bring it to D0i0 */
358*4882a593Smuzhiyun if (skl->cores.state[SKL_DSP_CORE0_ID] != SKL_DSP_RUNNING_D0I3)
359*4882a593Smuzhiyun return 0;
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun dev_dbg(ctx->dev, "Set DSP to D0i0\n");
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun msg.instance_id = 0;
364*4882a593Smuzhiyun msg.module_id = 0;
365*4882a593Smuzhiyun msg.streaming = 0;
366*4882a593Smuzhiyun msg.wake = 0;
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun if (skl->d0i3.state == SKL_DSP_D0I3_STREAMING)
369*4882a593Smuzhiyun msg.streaming = 1;
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun /* Clear Vendor specific register D0I3C.I3 to disable D0i3*/
372*4882a593Smuzhiyun if (skl->update_d0i3c)
373*4882a593Smuzhiyun skl->update_d0i3c(skl->dev, false);
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun ret = skl_ipc_set_d0ix(&skl->ipc, &msg);
376*4882a593Smuzhiyun if (ret < 0) {
377*4882a593Smuzhiyun dev_err(ctx->dev, "Failed to set DSP to D0i0\n");
378*4882a593Smuzhiyun return ret;
379*4882a593Smuzhiyun }
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun skl->cores.state[SKL_DSP_CORE0_ID] = SKL_DSP_RUNNING;
382*4882a593Smuzhiyun skl->d0i3.state = SKL_DSP_D0I3_NONE;
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun return 0;
385*4882a593Smuzhiyun }
386*4882a593Smuzhiyun
bxt_set_dsp_D0(struct sst_dsp * ctx,unsigned int core_id)387*4882a593Smuzhiyun static int bxt_set_dsp_D0(struct sst_dsp *ctx, unsigned int core_id)
388*4882a593Smuzhiyun {
389*4882a593Smuzhiyun struct skl_dev *skl = ctx->thread_context;
390*4882a593Smuzhiyun int ret;
391*4882a593Smuzhiyun struct skl_ipc_dxstate_info dx;
392*4882a593Smuzhiyun unsigned int core_mask = SKL_DSP_CORE_MASK(core_id);
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun if (skl->fw_loaded == false) {
395*4882a593Smuzhiyun skl->boot_complete = false;
396*4882a593Smuzhiyun ret = bxt_load_base_firmware(ctx);
397*4882a593Smuzhiyun if (ret < 0) {
398*4882a593Smuzhiyun dev_err(ctx->dev, "reload fw failed: %d\n", ret);
399*4882a593Smuzhiyun return ret;
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun if (skl->lib_count > 1) {
403*4882a593Smuzhiyun ret = bxt_load_library(ctx, skl->lib_info,
404*4882a593Smuzhiyun skl->lib_count);
405*4882a593Smuzhiyun if (ret < 0) {
406*4882a593Smuzhiyun dev_err(ctx->dev, "reload libs failed: %d\n", ret);
407*4882a593Smuzhiyun return ret;
408*4882a593Smuzhiyun }
409*4882a593Smuzhiyun }
410*4882a593Smuzhiyun skl->cores.state[core_id] = SKL_DSP_RUNNING;
411*4882a593Smuzhiyun return ret;
412*4882a593Smuzhiyun }
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun /* If core 0 is being turned on, turn on core 1 as well */
415*4882a593Smuzhiyun if (core_id == SKL_DSP_CORE0_ID)
416*4882a593Smuzhiyun ret = skl_dsp_core_power_up(ctx, core_mask |
417*4882a593Smuzhiyun SKL_DSP_CORE_MASK(1));
418*4882a593Smuzhiyun else
419*4882a593Smuzhiyun ret = skl_dsp_core_power_up(ctx, core_mask);
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun if (ret < 0)
422*4882a593Smuzhiyun goto err;
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun if (core_id == SKL_DSP_CORE0_ID) {
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun /*
427*4882a593Smuzhiyun * Enable interrupt after SPA is set and before
428*4882a593Smuzhiyun * DSP is unstalled
429*4882a593Smuzhiyun */
430*4882a593Smuzhiyun skl_ipc_int_enable(ctx);
431*4882a593Smuzhiyun skl_ipc_op_int_enable(ctx);
432*4882a593Smuzhiyun skl->boot_complete = false;
433*4882a593Smuzhiyun }
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun ret = skl_dsp_start_core(ctx, core_mask);
436*4882a593Smuzhiyun if (ret < 0)
437*4882a593Smuzhiyun goto err;
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun if (core_id == SKL_DSP_CORE0_ID) {
440*4882a593Smuzhiyun ret = wait_event_timeout(skl->boot_wait,
441*4882a593Smuzhiyun skl->boot_complete,
442*4882a593Smuzhiyun msecs_to_jiffies(SKL_IPC_BOOT_MSECS));
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun /* If core 1 was turned on for booting core 0, turn it off */
445*4882a593Smuzhiyun skl_dsp_core_power_down(ctx, SKL_DSP_CORE_MASK(1));
446*4882a593Smuzhiyun if (ret == 0) {
447*4882a593Smuzhiyun dev_err(ctx->dev, "%s: DSP boot timeout\n", __func__);
448*4882a593Smuzhiyun dev_err(ctx->dev, "Error code=0x%x: FW status=0x%x\n",
449*4882a593Smuzhiyun sst_dsp_shim_read(ctx, BXT_ADSP_ERROR_CODE),
450*4882a593Smuzhiyun sst_dsp_shim_read(ctx, BXT_ADSP_FW_STATUS));
451*4882a593Smuzhiyun dev_err(ctx->dev, "Failed to set core0 to D0 state\n");
452*4882a593Smuzhiyun ret = -EIO;
453*4882a593Smuzhiyun goto err;
454*4882a593Smuzhiyun }
455*4882a593Smuzhiyun }
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun /* Tell FW if additional core in now On */
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun if (core_id != SKL_DSP_CORE0_ID) {
460*4882a593Smuzhiyun dx.core_mask = core_mask;
461*4882a593Smuzhiyun dx.dx_mask = core_mask;
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun ret = skl_ipc_set_dx(&skl->ipc, BXT_INSTANCE_ID,
464*4882a593Smuzhiyun BXT_BASE_FW_MODULE_ID, &dx);
465*4882a593Smuzhiyun if (ret < 0) {
466*4882a593Smuzhiyun dev_err(ctx->dev, "IPC set_dx for core %d fail: %d\n",
467*4882a593Smuzhiyun core_id, ret);
468*4882a593Smuzhiyun goto err;
469*4882a593Smuzhiyun }
470*4882a593Smuzhiyun }
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun skl->cores.state[core_id] = SKL_DSP_RUNNING;
473*4882a593Smuzhiyun return 0;
474*4882a593Smuzhiyun err:
475*4882a593Smuzhiyun if (core_id == SKL_DSP_CORE0_ID)
476*4882a593Smuzhiyun core_mask |= SKL_DSP_CORE_MASK(1);
477*4882a593Smuzhiyun skl_dsp_disable_core(ctx, core_mask);
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun return ret;
480*4882a593Smuzhiyun }
481*4882a593Smuzhiyun
bxt_set_dsp_D3(struct sst_dsp * ctx,unsigned int core_id)482*4882a593Smuzhiyun static int bxt_set_dsp_D3(struct sst_dsp *ctx, unsigned int core_id)
483*4882a593Smuzhiyun {
484*4882a593Smuzhiyun int ret;
485*4882a593Smuzhiyun struct skl_ipc_dxstate_info dx;
486*4882a593Smuzhiyun struct skl_dev *skl = ctx->thread_context;
487*4882a593Smuzhiyun unsigned int core_mask = SKL_DSP_CORE_MASK(core_id);
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun dx.core_mask = core_mask;
490*4882a593Smuzhiyun dx.dx_mask = SKL_IPC_D3_MASK;
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun dev_dbg(ctx->dev, "core mask=%x dx_mask=%x\n",
493*4882a593Smuzhiyun dx.core_mask, dx.dx_mask);
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun ret = skl_ipc_set_dx(&skl->ipc, BXT_INSTANCE_ID,
496*4882a593Smuzhiyun BXT_BASE_FW_MODULE_ID, &dx);
497*4882a593Smuzhiyun if (ret < 0) {
498*4882a593Smuzhiyun dev_err(ctx->dev,
499*4882a593Smuzhiyun "Failed to set DSP to D3:core id = %d;Continue reset\n",
500*4882a593Smuzhiyun core_id);
501*4882a593Smuzhiyun /*
502*4882a593Smuzhiyun * In case of D3 failure, re-download the firmware, so set
503*4882a593Smuzhiyun * fw_loaded to false.
504*4882a593Smuzhiyun */
505*4882a593Smuzhiyun skl->fw_loaded = false;
506*4882a593Smuzhiyun }
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun if (core_id == SKL_DSP_CORE0_ID) {
509*4882a593Smuzhiyun /* disable Interrupt */
510*4882a593Smuzhiyun skl_ipc_op_int_disable(ctx);
511*4882a593Smuzhiyun skl_ipc_int_disable(ctx);
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun ret = skl_dsp_disable_core(ctx, core_mask);
514*4882a593Smuzhiyun if (ret < 0) {
515*4882a593Smuzhiyun dev_err(ctx->dev, "Failed to disable core %d\n", ret);
516*4882a593Smuzhiyun return ret;
517*4882a593Smuzhiyun }
518*4882a593Smuzhiyun skl->cores.state[core_id] = SKL_DSP_RESET;
519*4882a593Smuzhiyun return 0;
520*4882a593Smuzhiyun }
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun static const struct skl_dsp_fw_ops bxt_fw_ops = {
523*4882a593Smuzhiyun .set_state_D0 = bxt_set_dsp_D0,
524*4882a593Smuzhiyun .set_state_D3 = bxt_set_dsp_D3,
525*4882a593Smuzhiyun .set_state_D0i3 = bxt_schedule_dsp_D0i3,
526*4882a593Smuzhiyun .set_state_D0i0 = bxt_set_dsp_D0i0,
527*4882a593Smuzhiyun .load_fw = bxt_load_base_firmware,
528*4882a593Smuzhiyun .get_fw_errcode = bxt_get_errorcode,
529*4882a593Smuzhiyun .load_library = bxt_load_library,
530*4882a593Smuzhiyun };
531*4882a593Smuzhiyun
532*4882a593Smuzhiyun static struct sst_ops skl_ops = {
533*4882a593Smuzhiyun .irq_handler = skl_dsp_sst_interrupt,
534*4882a593Smuzhiyun .write = sst_shim32_write,
535*4882a593Smuzhiyun .read = sst_shim32_read,
536*4882a593Smuzhiyun .free = skl_dsp_free,
537*4882a593Smuzhiyun };
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun static struct sst_dsp_device skl_dev = {
540*4882a593Smuzhiyun .thread = skl_dsp_irq_thread_handler,
541*4882a593Smuzhiyun .ops = &skl_ops,
542*4882a593Smuzhiyun };
543*4882a593Smuzhiyun
bxt_sst_dsp_init(struct device * dev,void __iomem * mmio_base,int irq,const char * fw_name,struct skl_dsp_loader_ops dsp_ops,struct skl_dev ** dsp)544*4882a593Smuzhiyun int bxt_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
545*4882a593Smuzhiyun const char *fw_name, struct skl_dsp_loader_ops dsp_ops,
546*4882a593Smuzhiyun struct skl_dev **dsp)
547*4882a593Smuzhiyun {
548*4882a593Smuzhiyun struct skl_dev *skl;
549*4882a593Smuzhiyun struct sst_dsp *sst;
550*4882a593Smuzhiyun int ret;
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun ret = skl_sst_ctx_init(dev, irq, fw_name, dsp_ops, dsp, &skl_dev);
553*4882a593Smuzhiyun if (ret < 0) {
554*4882a593Smuzhiyun dev_err(dev, "%s: no device\n", __func__);
555*4882a593Smuzhiyun return ret;
556*4882a593Smuzhiyun }
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun skl = *dsp;
559*4882a593Smuzhiyun sst = skl->dsp;
560*4882a593Smuzhiyun sst->fw_ops = bxt_fw_ops;
561*4882a593Smuzhiyun sst->addr.lpe = mmio_base;
562*4882a593Smuzhiyun sst->addr.shim = mmio_base;
563*4882a593Smuzhiyun sst->addr.sram0_base = BXT_ADSP_SRAM0_BASE;
564*4882a593Smuzhiyun sst->addr.sram1_base = BXT_ADSP_SRAM1_BASE;
565*4882a593Smuzhiyun sst->addr.w0_stat_sz = SKL_ADSP_W0_STAT_SZ;
566*4882a593Smuzhiyun sst->addr.w0_up_sz = SKL_ADSP_W0_UP_SZ;
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun sst_dsp_mailbox_init(sst, (BXT_ADSP_SRAM0_BASE + SKL_ADSP_W0_STAT_SZ),
569*4882a593Smuzhiyun SKL_ADSP_W0_UP_SZ, BXT_ADSP_SRAM1_BASE, SKL_ADSP_W1_SZ);
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun ret = skl_ipc_init(dev, skl);
572*4882a593Smuzhiyun if (ret) {
573*4882a593Smuzhiyun skl_dsp_free(sst);
574*4882a593Smuzhiyun return ret;
575*4882a593Smuzhiyun }
576*4882a593Smuzhiyun
577*4882a593Smuzhiyun /* set the D0i3 check */
578*4882a593Smuzhiyun skl->ipc.ops.check_dsp_lp_on = skl_ipc_check_D0i0;
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun skl->boot_complete = false;
581*4882a593Smuzhiyun init_waitqueue_head(&skl->boot_wait);
582*4882a593Smuzhiyun INIT_DELAYED_WORK(&skl->d0i3.work, bxt_set_dsp_D0i3);
583*4882a593Smuzhiyun skl->d0i3.state = SKL_DSP_D0I3_NONE;
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun return skl_dsp_acquire_irq(sst);
586*4882a593Smuzhiyun }
587*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(bxt_sst_dsp_init);
588*4882a593Smuzhiyun
bxt_sst_init_fw(struct device * dev,struct skl_dev * skl)589*4882a593Smuzhiyun int bxt_sst_init_fw(struct device *dev, struct skl_dev *skl)
590*4882a593Smuzhiyun {
591*4882a593Smuzhiyun int ret;
592*4882a593Smuzhiyun struct sst_dsp *sst = skl->dsp;
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun ret = sst->fw_ops.load_fw(sst);
595*4882a593Smuzhiyun if (ret < 0) {
596*4882a593Smuzhiyun dev_err(dev, "Load base fw failed: %x\n", ret);
597*4882a593Smuzhiyun return ret;
598*4882a593Smuzhiyun }
599*4882a593Smuzhiyun
600*4882a593Smuzhiyun skl_dsp_init_core_state(sst);
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun if (skl->lib_count > 1) {
603*4882a593Smuzhiyun ret = sst->fw_ops.load_library(sst, skl->lib_info,
604*4882a593Smuzhiyun skl->lib_count);
605*4882a593Smuzhiyun if (ret < 0) {
606*4882a593Smuzhiyun dev_err(dev, "Load Library failed : %x\n", ret);
607*4882a593Smuzhiyun return ret;
608*4882a593Smuzhiyun }
609*4882a593Smuzhiyun }
610*4882a593Smuzhiyun skl->is_first_boot = false;
611*4882a593Smuzhiyun
612*4882a593Smuzhiyun return 0;
613*4882a593Smuzhiyun }
614*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(bxt_sst_init_fw);
615*4882a593Smuzhiyun
bxt_sst_dsp_cleanup(struct device * dev,struct skl_dev * skl)616*4882a593Smuzhiyun void bxt_sst_dsp_cleanup(struct device *dev, struct skl_dev *skl)
617*4882a593Smuzhiyun {
618*4882a593Smuzhiyun
619*4882a593Smuzhiyun skl_release_library(skl->lib_info, skl->lib_count);
620*4882a593Smuzhiyun if (skl->dsp->fw)
621*4882a593Smuzhiyun release_firmware(skl->dsp->fw);
622*4882a593Smuzhiyun skl_freeup_uuid_list(skl);
623*4882a593Smuzhiyun skl_ipc_free(&skl->ipc);
624*4882a593Smuzhiyun skl->dsp->ops->free(skl->dsp);
625*4882a593Smuzhiyun }
626*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(bxt_sst_dsp_cleanup);
627*4882a593Smuzhiyun
628*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
629*4882a593Smuzhiyun MODULE_DESCRIPTION("Intel Broxton IPC driver");
630