1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3 * Copyright (C) 2018-2022 Intel Corporation
4 */
5 #include "iwl-trans.h"
6 #include "iwl-fh.h"
7 #include "iwl-context-info-gen3.h"
8 #include "internal.h"
9 #include "iwl-prph.h"
10
11 static void
iwl_pcie_ctxt_info_dbg_enable(struct iwl_trans * trans,struct iwl_prph_scratch_hwm_cfg * dbg_cfg,u32 * control_flags)12 iwl_pcie_ctxt_info_dbg_enable(struct iwl_trans *trans,
13 struct iwl_prph_scratch_hwm_cfg *dbg_cfg,
14 u32 *control_flags)
15 {
16 enum iwl_fw_ini_allocation_id alloc_id = IWL_FW_INI_ALLOCATION_ID_DBGC1;
17 struct iwl_fw_ini_allocation_tlv *fw_mon_cfg;
18 u32 dbg_flags = 0;
19
20 if (!iwl_trans_dbg_ini_valid(trans)) {
21 struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
22
23 iwl_pcie_alloc_fw_monitor(trans, 0);
24
25 if (fw_mon->size) {
26 dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_DRAM;
27
28 IWL_DEBUG_FW(trans,
29 "WRT: Applying DRAM buffer destination\n");
30
31 dbg_cfg->hwm_base_addr = cpu_to_le64(fw_mon->physical);
32 dbg_cfg->hwm_size = cpu_to_le32(fw_mon->size);
33 }
34
35 goto out;
36 }
37
38 fw_mon_cfg = &trans->dbg.fw_mon_cfg[alloc_id];
39
40 switch (le32_to_cpu(fw_mon_cfg->buf_location)) {
41 case IWL_FW_INI_LOCATION_SRAM_PATH:
42 dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_INTERNAL;
43 IWL_DEBUG_FW(trans,
44 "WRT: Applying SMEM buffer destination\n");
45 break;
46
47 case IWL_FW_INI_LOCATION_NPK_PATH:
48 dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_TB22DTF;
49 IWL_DEBUG_FW(trans,
50 "WRT: Applying NPK buffer destination\n");
51 break;
52
53 case IWL_FW_INI_LOCATION_DRAM_PATH:
54 if (trans->dbg.fw_mon_ini[alloc_id].num_frags) {
55 struct iwl_dram_data *frag =
56 &trans->dbg.fw_mon_ini[alloc_id].frags[0];
57 dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_DRAM;
58 dbg_cfg->hwm_base_addr = cpu_to_le64(frag->physical);
59 dbg_cfg->hwm_size = cpu_to_le32(frag->size);
60 dbg_cfg->debug_token_config = cpu_to_le32(trans->dbg.ucode_preset);
61 IWL_DEBUG_FW(trans,
62 "WRT: Applying DRAM destination (debug_token_config=%u)\n",
63 dbg_cfg->debug_token_config);
64 IWL_DEBUG_FW(trans,
65 "WRT: Applying DRAM destination (alloc_id=%u, num_frags=%u)\n",
66 alloc_id,
67 trans->dbg.fw_mon_ini[alloc_id].num_frags);
68 }
69 break;
70 default:
71 IWL_ERR(trans, "WRT: Invalid buffer destination\n");
72 }
73 out:
74 if (dbg_flags)
75 *control_flags |= IWL_PRPH_SCRATCH_EARLY_DEBUG_EN | dbg_flags;
76 }
77
iwl_pcie_ctxt_info_gen3_init(struct iwl_trans * trans,const struct fw_img * fw)78 int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
79 const struct fw_img *fw)
80 {
81 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
82 struct iwl_context_info_gen3 *ctxt_info_gen3;
83 struct iwl_prph_scratch *prph_scratch;
84 struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl;
85 struct iwl_prph_info *prph_info;
86 u32 control_flags = 0;
87 int ret;
88 int cmdq_size = max_t(u32, IWL_CMD_QUEUE_SIZE,
89 trans->cfg->min_txq_size);
90
91 switch (trans_pcie->rx_buf_size) {
92 case IWL_AMSDU_DEF:
93 return -EINVAL;
94 case IWL_AMSDU_2K:
95 break;
96 case IWL_AMSDU_4K:
97 control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_4K;
98 break;
99 case IWL_AMSDU_8K:
100 control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_4K;
101 /* if firmware supports the ext size, tell it */
102 control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_EXT_8K;
103 break;
104 case IWL_AMSDU_12K:
105 control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_4K;
106 /* if firmware supports the ext size, tell it */
107 control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_EXT_16K;
108 break;
109 }
110
111 /* Allocate prph scratch */
112 prph_scratch = dma_alloc_coherent(trans->dev, sizeof(*prph_scratch),
113 &trans_pcie->prph_scratch_dma_addr,
114 GFP_KERNEL);
115 if (!prph_scratch)
116 return -ENOMEM;
117
118 prph_sc_ctrl = &prph_scratch->ctrl_cfg;
119
120 prph_sc_ctrl->version.version = 0;
121 prph_sc_ctrl->version.mac_id =
122 cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV));
123 prph_sc_ctrl->version.size = cpu_to_le16(sizeof(*prph_scratch) / 4);
124
125 control_flags |= IWL_PRPH_SCRATCH_MTR_MODE;
126 control_flags |= IWL_PRPH_MTR_FORMAT_256B & IWL_PRPH_SCRATCH_MTR_FORMAT;
127
128 if (trans->trans_cfg->imr_enabled)
129 control_flags |= IWL_PRPH_SCRATCH_IMR_DEBUG_EN;
130
131 /* initialize RX default queue */
132 prph_sc_ctrl->rbd_cfg.free_rbd_addr =
133 cpu_to_le64(trans_pcie->rxq->bd_dma);
134
135 iwl_pcie_ctxt_info_dbg_enable(trans, &prph_sc_ctrl->hwm_cfg,
136 &control_flags);
137 prph_sc_ctrl->control.control_flags = cpu_to_le32(control_flags);
138
139 /* initialize the Step equalizer data */
140 prph_sc_ctrl->step_cfg.mbx_addr_0 = cpu_to_le32(trans->mbx_addr_0_step);
141 prph_sc_ctrl->step_cfg.mbx_addr_1 = cpu_to_le32(trans->mbx_addr_1_step);
142
143 /* allocate ucode sections in dram and set addresses */
144 ret = iwl_pcie_init_fw_sec(trans, fw, &prph_scratch->dram);
145 if (ret)
146 goto err_free_prph_scratch;
147
148
149 /* Allocate prph information
150 * currently we don't assign to the prph info anything, but it would get
151 * assigned later
152 *
153 * We also use the second half of this page to give the device some
154 * dummy TR/CR tail pointers - which shouldn't be necessary as we don't
155 * use this, but the hardware still reads/writes there and we can't let
156 * it go do that with a NULL pointer.
157 */
158 BUILD_BUG_ON(sizeof(*prph_info) > PAGE_SIZE / 2);
159 prph_info = dma_alloc_coherent(trans->dev, PAGE_SIZE,
160 &trans_pcie->prph_info_dma_addr,
161 GFP_KERNEL);
162 if (!prph_info) {
163 ret = -ENOMEM;
164 goto err_free_prph_scratch;
165 }
166
167 /* Allocate context info */
168 ctxt_info_gen3 = dma_alloc_coherent(trans->dev,
169 sizeof(*ctxt_info_gen3),
170 &trans_pcie->ctxt_info_dma_addr,
171 GFP_KERNEL);
172 if (!ctxt_info_gen3) {
173 ret = -ENOMEM;
174 goto err_free_prph_info;
175 }
176
177 ctxt_info_gen3->prph_info_base_addr =
178 cpu_to_le64(trans_pcie->prph_info_dma_addr);
179 ctxt_info_gen3->prph_scratch_base_addr =
180 cpu_to_le64(trans_pcie->prph_scratch_dma_addr);
181 ctxt_info_gen3->prph_scratch_size =
182 cpu_to_le32(sizeof(*prph_scratch));
183 ctxt_info_gen3->cr_head_idx_arr_base_addr =
184 cpu_to_le64(trans_pcie->rxq->rb_stts_dma);
185 ctxt_info_gen3->tr_tail_idx_arr_base_addr =
186 cpu_to_le64(trans_pcie->prph_info_dma_addr + PAGE_SIZE / 2);
187 ctxt_info_gen3->cr_tail_idx_arr_base_addr =
188 cpu_to_le64(trans_pcie->prph_info_dma_addr + 3 * PAGE_SIZE / 4);
189 ctxt_info_gen3->mtr_base_addr =
190 cpu_to_le64(trans->txqs.txq[trans->txqs.cmd.q_id]->dma_addr);
191 ctxt_info_gen3->mcr_base_addr =
192 cpu_to_le64(trans_pcie->rxq->used_bd_dma);
193 ctxt_info_gen3->mtr_size =
194 cpu_to_le16(TFD_QUEUE_CB_SIZE(cmdq_size));
195 ctxt_info_gen3->mcr_size =
196 cpu_to_le16(RX_QUEUE_CB_SIZE(trans->cfg->num_rbds));
197
198 trans_pcie->ctxt_info_gen3 = ctxt_info_gen3;
199 trans_pcie->prph_info = prph_info;
200 trans_pcie->prph_scratch = prph_scratch;
201
202 /* Allocate IML */
203 trans_pcie->iml = dma_alloc_coherent(trans->dev, trans->iml_len,
204 &trans_pcie->iml_dma_addr,
205 GFP_KERNEL);
206 if (!trans_pcie->iml) {
207 ret = -ENOMEM;
208 goto err_free_ctxt_info;
209 }
210
211 memcpy(trans_pcie->iml, trans->iml, trans->iml_len);
212
213 iwl_enable_fw_load_int_ctx_info(trans);
214
215 /* kick FW self load */
216 iwl_write64(trans, CSR_CTXT_INFO_ADDR,
217 trans_pcie->ctxt_info_dma_addr);
218 iwl_write64(trans, CSR_IML_DATA_ADDR,
219 trans_pcie->iml_dma_addr);
220 iwl_write32(trans, CSR_IML_SIZE_ADDR, trans->iml_len);
221
222 iwl_set_bit(trans, CSR_CTXT_INFO_BOOT_CTRL,
223 CSR_AUTO_FUNC_BOOT_ENA);
224
225 return 0;
226
227 err_free_ctxt_info:
228 dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_gen3),
229 trans_pcie->ctxt_info_gen3,
230 trans_pcie->ctxt_info_dma_addr);
231 trans_pcie->ctxt_info_gen3 = NULL;
232 err_free_prph_info:
233 dma_free_coherent(trans->dev, PAGE_SIZE, prph_info,
234 trans_pcie->prph_info_dma_addr);
235
236 err_free_prph_scratch:
237 dma_free_coherent(trans->dev,
238 sizeof(*prph_scratch),
239 prph_scratch,
240 trans_pcie->prph_scratch_dma_addr);
241 return ret;
242
243 }
244
iwl_pcie_ctxt_info_gen3_free(struct iwl_trans * trans,bool alive)245 void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans, bool alive)
246 {
247 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
248
249 if (trans_pcie->iml) {
250 dma_free_coherent(trans->dev, trans->iml_len, trans_pcie->iml,
251 trans_pcie->iml_dma_addr);
252 trans_pcie->iml_dma_addr = 0;
253 trans_pcie->iml = NULL;
254 }
255
256 iwl_pcie_ctxt_info_free_fw_img(trans);
257
258 if (alive)
259 return;
260
261 if (!trans_pcie->ctxt_info_gen3)
262 return;
263
264 /* ctxt_info_gen3 and prph_scratch are still needed for PNVM load */
265 dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_gen3),
266 trans_pcie->ctxt_info_gen3,
267 trans_pcie->ctxt_info_dma_addr);
268 trans_pcie->ctxt_info_dma_addr = 0;
269 trans_pcie->ctxt_info_gen3 = NULL;
270
271 dma_free_coherent(trans->dev, sizeof(*trans_pcie->prph_scratch),
272 trans_pcie->prph_scratch,
273 trans_pcie->prph_scratch_dma_addr);
274 trans_pcie->prph_scratch_dma_addr = 0;
275 trans_pcie->prph_scratch = NULL;
276
277 /* this is needed for the entire lifetime */
278 dma_free_coherent(trans->dev, PAGE_SIZE, trans_pcie->prph_info,
279 trans_pcie->prph_info_dma_addr);
280 trans_pcie->prph_info_dma_addr = 0;
281 trans_pcie->prph_info = NULL;
282 }
283
iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans * trans,const void * data,u32 len)284 int iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans *trans,
285 const void *data, u32 len)
286 {
287 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
288 struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
289 &trans_pcie->prph_scratch->ctrl_cfg;
290 int ret;
291
292 if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
293 return 0;
294
295 /* only allocate the DRAM if not allocated yet */
296 if (!trans->pnvm_loaded) {
297 if (WARN_ON(prph_sc_ctrl->pnvm_cfg.pnvm_size))
298 return -EBUSY;
299
300 ret = iwl_pcie_ctxt_info_alloc_dma(trans, data, len,
301 &trans_pcie->pnvm_dram);
302 if (ret < 0) {
303 IWL_DEBUG_FW(trans, "Failed to allocate PNVM DMA %d.\n",
304 ret);
305 return ret;
306 }
307 }
308
309 prph_sc_ctrl->pnvm_cfg.pnvm_base_addr =
310 cpu_to_le64(trans_pcie->pnvm_dram.physical);
311 prph_sc_ctrl->pnvm_cfg.pnvm_size =
312 cpu_to_le32(trans_pcie->pnvm_dram.size);
313
314 return 0;
315 }
316
iwl_trans_pcie_ctx_info_gen3_set_reduce_power(struct iwl_trans * trans,const void * data,u32 len)317 int iwl_trans_pcie_ctx_info_gen3_set_reduce_power(struct iwl_trans *trans,
318 const void *data, u32 len)
319 {
320 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
321 struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
322 &trans_pcie->prph_scratch->ctrl_cfg;
323 int ret;
324
325 if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
326 return 0;
327
328 /* only allocate the DRAM if not allocated yet */
329 if (!trans->reduce_power_loaded) {
330 if (WARN_ON(prph_sc_ctrl->reduce_power_cfg.size))
331 return -EBUSY;
332
333 ret = iwl_pcie_ctxt_info_alloc_dma(trans, data, len,
334 &trans_pcie->reduce_power_dram);
335 if (ret < 0) {
336 IWL_DEBUG_FW(trans,
337 "Failed to allocate reduce power DMA %d.\n",
338 ret);
339 return ret;
340 }
341 }
342
343 prph_sc_ctrl->reduce_power_cfg.base_addr =
344 cpu_to_le64(trans_pcie->reduce_power_dram.physical);
345 prph_sc_ctrl->reduce_power_cfg.size =
346 cpu_to_le32(trans_pcie->reduce_power_dram.size);
347
348 return 0;
349 }
350
351