1 // SPDX-License-Identifier: GPL-2.0-only
2 //
3 // Copyright(c) 2021-2022 Intel Corporation. All rights reserved.
4 //
5 // Authors: Cezary Rojewski <cezary.rojewski@intel.com>
6 // Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>
7 //
8
9 #include <sound/hdaudio_ext.h>
10 #include "avs.h"
11 #include "registers.h"
12 #include "trace.h"
13
14 #define AVS_ADSPCS_INTERVAL_US 500
15 #define AVS_ADSPCS_TIMEOUT_US 50000
16 #define AVS_ADSPCS_DELAY_US 1000
17
avs_dsp_core_power(struct avs_dev * adev,u32 core_mask,bool power)18 int avs_dsp_core_power(struct avs_dev *adev, u32 core_mask, bool power)
19 {
20 u32 value, mask, reg;
21 int ret;
22
23 value = snd_hdac_adsp_readl(adev, AVS_ADSP_REG_ADSPCS);
24 trace_avs_dsp_core_op(value, core_mask, "power", power);
25
26 mask = AVS_ADSPCS_SPA_MASK(core_mask);
27 value = power ? mask : 0;
28
29 snd_hdac_adsp_updatel(adev, AVS_ADSP_REG_ADSPCS, mask, value);
30 /* Delay the polling to avoid false positives. */
31 usleep_range(AVS_ADSPCS_DELAY_US, 2 * AVS_ADSPCS_DELAY_US);
32
33 mask = AVS_ADSPCS_CPA_MASK(core_mask);
34 value = power ? mask : 0;
35
36 ret = snd_hdac_adsp_readl_poll(adev, AVS_ADSP_REG_ADSPCS,
37 reg, (reg & mask) == value,
38 AVS_ADSPCS_INTERVAL_US,
39 AVS_ADSPCS_TIMEOUT_US);
40 if (ret)
41 dev_err(adev->dev, "core_mask %d power %s failed: %d\n",
42 core_mask, power ? "on" : "off", ret);
43
44 return ret;
45 }
46
avs_dsp_core_reset(struct avs_dev * adev,u32 core_mask,bool reset)47 int avs_dsp_core_reset(struct avs_dev *adev, u32 core_mask, bool reset)
48 {
49 u32 value, mask, reg;
50 int ret;
51
52 value = snd_hdac_adsp_readl(adev, AVS_ADSP_REG_ADSPCS);
53 trace_avs_dsp_core_op(value, core_mask, "reset", reset);
54
55 mask = AVS_ADSPCS_CRST_MASK(core_mask);
56 value = reset ? mask : 0;
57
58 snd_hdac_adsp_updatel(adev, AVS_ADSP_REG_ADSPCS, mask, value);
59
60 ret = snd_hdac_adsp_readl_poll(adev, AVS_ADSP_REG_ADSPCS,
61 reg, (reg & mask) == value,
62 AVS_ADSPCS_INTERVAL_US,
63 AVS_ADSPCS_TIMEOUT_US);
64 if (ret)
65 dev_err(adev->dev, "core_mask %d %s reset failed: %d\n",
66 core_mask, reset ? "enter" : "exit", ret);
67
68 return ret;
69 }
70
avs_dsp_core_stall(struct avs_dev * adev,u32 core_mask,bool stall)71 int avs_dsp_core_stall(struct avs_dev *adev, u32 core_mask, bool stall)
72 {
73 u32 value, mask, reg;
74 int ret;
75
76 value = snd_hdac_adsp_readl(adev, AVS_ADSP_REG_ADSPCS);
77 trace_avs_dsp_core_op(value, core_mask, "stall", stall);
78
79 mask = AVS_ADSPCS_CSTALL_MASK(core_mask);
80 value = stall ? mask : 0;
81
82 snd_hdac_adsp_updatel(adev, AVS_ADSP_REG_ADSPCS, mask, value);
83
84 ret = snd_hdac_adsp_readl_poll(adev, AVS_ADSP_REG_ADSPCS,
85 reg, (reg & mask) == value,
86 AVS_ADSPCS_INTERVAL_US,
87 AVS_ADSPCS_TIMEOUT_US);
88 if (ret) {
89 dev_err(adev->dev, "core_mask %d %sstall failed: %d\n",
90 core_mask, stall ? "" : "un", ret);
91 return ret;
92 }
93
94 /* Give HW time to propagate the change. */
95 usleep_range(AVS_ADSPCS_DELAY_US, 2 * AVS_ADSPCS_DELAY_US);
96 return 0;
97 }
98
avs_dsp_core_enable(struct avs_dev * adev,u32 core_mask)99 int avs_dsp_core_enable(struct avs_dev *adev, u32 core_mask)
100 {
101 int ret;
102
103 ret = avs_dsp_op(adev, power, core_mask, true);
104 if (ret)
105 return ret;
106
107 ret = avs_dsp_op(adev, reset, core_mask, false);
108 if (ret)
109 return ret;
110
111 return avs_dsp_op(adev, stall, core_mask, false);
112 }
113
avs_dsp_core_disable(struct avs_dev * adev,u32 core_mask)114 int avs_dsp_core_disable(struct avs_dev *adev, u32 core_mask)
115 {
116 /* No error checks to allow for complete DSP shutdown. */
117 avs_dsp_op(adev, stall, core_mask, true);
118 avs_dsp_op(adev, reset, core_mask, true);
119
120 return avs_dsp_op(adev, power, core_mask, false);
121 }
122
avs_dsp_enable(struct avs_dev * adev,u32 core_mask)123 static int avs_dsp_enable(struct avs_dev *adev, u32 core_mask)
124 {
125 u32 mask;
126 int ret;
127
128 ret = avs_dsp_core_enable(adev, core_mask);
129 if (ret < 0)
130 return ret;
131
132 mask = core_mask & ~AVS_MAIN_CORE_MASK;
133 if (!mask)
134 /*
135 * without main core, fw is dead anyway
136 * so setting D0 for it is futile.
137 */
138 return 0;
139
140 ret = avs_ipc_set_dx(adev, mask, true);
141 return AVS_IPC_RET(ret);
142 }
143
avs_dsp_disable(struct avs_dev * adev,u32 core_mask)144 static int avs_dsp_disable(struct avs_dev *adev, u32 core_mask)
145 {
146 int ret;
147
148 ret = avs_ipc_set_dx(adev, core_mask, false);
149 if (ret)
150 return AVS_IPC_RET(ret);
151
152 return avs_dsp_core_disable(adev, core_mask);
153 }
154
avs_dsp_get_core(struct avs_dev * adev,u32 core_id)155 static int avs_dsp_get_core(struct avs_dev *adev, u32 core_id)
156 {
157 u32 mask;
158 int ret;
159
160 mask = BIT_MASK(core_id);
161 if (mask == AVS_MAIN_CORE_MASK)
162 /* nothing to do for main core */
163 return 0;
164 if (core_id >= adev->hw_cfg.dsp_cores) {
165 ret = -EINVAL;
166 goto err;
167 }
168
169 adev->core_refs[core_id]++;
170 if (adev->core_refs[core_id] == 1) {
171 /*
172 * No cores other than main-core can be running for DSP
173 * to achieve d0ix. Conscious SET_D0IX IPC failure is permitted,
174 * simply d0ix power state will no longer be attempted.
175 */
176 ret = avs_dsp_disable_d0ix(adev);
177 if (ret && ret != -AVS_EIPC)
178 goto err_disable_d0ix;
179
180 ret = avs_dsp_enable(adev, mask);
181 if (ret)
182 goto err_enable_dsp;
183 }
184
185 return 0;
186
187 err_enable_dsp:
188 avs_dsp_enable_d0ix(adev);
189 err_disable_d0ix:
190 adev->core_refs[core_id]--;
191 err:
192 dev_err(adev->dev, "get core %d failed: %d\n", core_id, ret);
193 return ret;
194 }
195
avs_dsp_put_core(struct avs_dev * adev,u32 core_id)196 static int avs_dsp_put_core(struct avs_dev *adev, u32 core_id)
197 {
198 u32 mask;
199 int ret;
200
201 mask = BIT_MASK(core_id);
202 if (mask == AVS_MAIN_CORE_MASK)
203 /* nothing to do for main core */
204 return 0;
205 if (core_id >= adev->hw_cfg.dsp_cores) {
206 ret = -EINVAL;
207 goto err;
208 }
209
210 adev->core_refs[core_id]--;
211 if (!adev->core_refs[core_id]) {
212 ret = avs_dsp_disable(adev, mask);
213 if (ret)
214 goto err;
215
216 /* Match disable_d0ix in avs_dsp_get_core(). */
217 avs_dsp_enable_d0ix(adev);
218 }
219
220 return 0;
221 err:
222 dev_err(adev->dev, "put core %d failed: %d\n", core_id, ret);
223 return ret;
224 }
225
avs_dsp_init_module(struct avs_dev * adev,u16 module_id,u8 ppl_instance_id,u8 core_id,u8 domain,void * param,u32 param_size,u16 * instance_id)226 int avs_dsp_init_module(struct avs_dev *adev, u16 module_id, u8 ppl_instance_id,
227 u8 core_id, u8 domain, void *param, u32 param_size,
228 u16 *instance_id)
229 {
230 struct avs_module_entry mentry;
231 bool was_loaded = false;
232 int ret, id;
233
234 id = avs_module_id_alloc(adev, module_id);
235 if (id < 0)
236 return id;
237
238 ret = avs_get_module_id_entry(adev, module_id, &mentry);
239 if (ret)
240 goto err_mod_entry;
241
242 ret = avs_dsp_get_core(adev, core_id);
243 if (ret)
244 goto err_mod_entry;
245
246 /* Load code into memory if this is the first instance. */
247 if (!id && !avs_module_entry_is_loaded(&mentry)) {
248 ret = avs_dsp_op(adev, transfer_mods, true, &mentry, 1);
249 if (ret) {
250 dev_err(adev->dev, "load modules failed: %d\n", ret);
251 goto err_mod_entry;
252 }
253 was_loaded = true;
254 }
255
256 ret = avs_ipc_init_instance(adev, module_id, id, ppl_instance_id,
257 core_id, domain, param, param_size);
258 if (ret) {
259 ret = AVS_IPC_RET(ret);
260 goto err_ipc;
261 }
262
263 *instance_id = id;
264 return 0;
265
266 err_ipc:
267 if (was_loaded)
268 avs_dsp_op(adev, transfer_mods, false, &mentry, 1);
269 avs_dsp_put_core(adev, core_id);
270 err_mod_entry:
271 avs_module_id_free(adev, module_id, id);
272 return ret;
273 }
274
avs_dsp_delete_module(struct avs_dev * adev,u16 module_id,u16 instance_id,u8 ppl_instance_id,u8 core_id)275 void avs_dsp_delete_module(struct avs_dev *adev, u16 module_id, u16 instance_id,
276 u8 ppl_instance_id, u8 core_id)
277 {
278 struct avs_module_entry mentry;
279 int ret;
280
281 /* Modules not owned by any pipeline need to be freed explicitly. */
282 if (ppl_instance_id == INVALID_PIPELINE_ID)
283 avs_ipc_delete_instance(adev, module_id, instance_id);
284
285 avs_module_id_free(adev, module_id, instance_id);
286
287 ret = avs_get_module_id_entry(adev, module_id, &mentry);
288 /* Unload occupied memory if this was the last instance. */
289 if (!ret && mentry.type.load_type == AVS_MODULE_LOAD_TYPE_LOADABLE) {
290 if (avs_is_module_ida_empty(adev, module_id)) {
291 ret = avs_dsp_op(adev, transfer_mods, false, &mentry, 1);
292 if (ret)
293 dev_err(adev->dev, "unload modules failed: %d\n", ret);
294 }
295 }
296
297 avs_dsp_put_core(adev, core_id);
298 }
299
avs_dsp_create_pipeline(struct avs_dev * adev,u16 req_size,u8 priority,bool lp,u16 attributes,u8 * instance_id)300 int avs_dsp_create_pipeline(struct avs_dev *adev, u16 req_size, u8 priority,
301 bool lp, u16 attributes, u8 *instance_id)
302 {
303 struct avs_fw_cfg *fw_cfg = &adev->fw_cfg;
304 int ret, id;
305
306 id = ida_alloc_max(&adev->ppl_ida, fw_cfg->max_ppl_count - 1, GFP_KERNEL);
307 if (id < 0)
308 return id;
309
310 ret = avs_ipc_create_pipeline(adev, req_size, priority, id, lp, attributes);
311 if (ret) {
312 ida_free(&adev->ppl_ida, id);
313 return AVS_IPC_RET(ret);
314 }
315
316 *instance_id = id;
317 return 0;
318 }
319
avs_dsp_delete_pipeline(struct avs_dev * adev,u8 instance_id)320 int avs_dsp_delete_pipeline(struct avs_dev *adev, u8 instance_id)
321 {
322 int ret;
323
324 ret = avs_ipc_delete_pipeline(adev, instance_id);
325 if (ret)
326 ret = AVS_IPC_RET(ret);
327
328 ida_free(&adev->ppl_ida, instance_id);
329 return ret;
330 }
331