1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2018-2019 Realtek Corporation
3 */
4
5 #include "main.h"
6 #include "mac.h"
7 #include "reg.h"
8 #include "fw.h"
9 #include "debug.h"
10
rtw_set_channel_mac(struct rtw_dev * rtwdev,u8 channel,u8 bw,u8 primary_ch_idx)11 void rtw_set_channel_mac(struct rtw_dev *rtwdev, u8 channel, u8 bw,
12 u8 primary_ch_idx)
13 {
14 u8 txsc40 = 0, txsc20 = 0;
15 u32 value32;
16 u8 value8;
17
18 txsc20 = primary_ch_idx;
19 if (bw == RTW_CHANNEL_WIDTH_80) {
20 if (txsc20 == RTW_SC_20_UPPER || txsc20 == RTW_SC_20_UPMOST)
21 txsc40 = RTW_SC_40_UPPER;
22 else
23 txsc40 = RTW_SC_40_LOWER;
24 }
25 rtw_write8(rtwdev, REG_DATA_SC,
26 BIT_TXSC_20M(txsc20) | BIT_TXSC_40M(txsc40));
27
28 value32 = rtw_read32(rtwdev, REG_WMAC_TRXPTCL_CTL);
29 value32 &= ~BIT_RFMOD;
30 switch (bw) {
31 case RTW_CHANNEL_WIDTH_80:
32 value32 |= BIT_RFMOD_80M;
33 break;
34 case RTW_CHANNEL_WIDTH_40:
35 value32 |= BIT_RFMOD_40M;
36 break;
37 case RTW_CHANNEL_WIDTH_20:
38 default:
39 break;
40 }
41 rtw_write32(rtwdev, REG_WMAC_TRXPTCL_CTL, value32);
42
43 if (rtw_chip_wcpu_11n(rtwdev))
44 return;
45
46 value32 = rtw_read32(rtwdev, REG_AFE_CTRL1) & ~(BIT_MAC_CLK_SEL);
47 value32 |= (MAC_CLK_HW_DEF_80M << BIT_SHIFT_MAC_CLK_SEL);
48 rtw_write32(rtwdev, REG_AFE_CTRL1, value32);
49
50 rtw_write8(rtwdev, REG_USTIME_TSF, MAC_CLK_SPEED);
51 rtw_write8(rtwdev, REG_USTIME_EDCA, MAC_CLK_SPEED);
52
53 value8 = rtw_read8(rtwdev, REG_CCK_CHECK);
54 value8 = value8 & ~BIT_CHECK_CCK_EN;
55 if (IS_CH_5G_BAND(channel))
56 value8 |= BIT_CHECK_CCK_EN;
57 rtw_write8(rtwdev, REG_CCK_CHECK, value8);
58 }
59 EXPORT_SYMBOL(rtw_set_channel_mac);
60
rtw_mac_pre_system_cfg(struct rtw_dev * rtwdev)61 static int rtw_mac_pre_system_cfg(struct rtw_dev *rtwdev)
62 {
63 u32 value32;
64 u8 value8;
65
66 rtw_write8(rtwdev, REG_RSV_CTRL, 0);
67
68 if (rtw_chip_wcpu_11n(rtwdev)) {
69 if (rtw_read32(rtwdev, REG_SYS_CFG1) & BIT_LDO)
70 rtw_write8(rtwdev, REG_LDO_SWR_CTRL, LDO_SEL);
71 else
72 rtw_write8(rtwdev, REG_LDO_SWR_CTRL, SPS_SEL);
73 return 0;
74 }
75
76 switch (rtw_hci_type(rtwdev)) {
77 case RTW_HCI_TYPE_PCIE:
78 rtw_write32_set(rtwdev, REG_HCI_OPT_CTRL, BIT_USB_SUS_DIS);
79 break;
80 case RTW_HCI_TYPE_USB:
81 break;
82 default:
83 return -EINVAL;
84 }
85
86 /* config PIN Mux */
87 value32 = rtw_read32(rtwdev, REG_PAD_CTRL1);
88 value32 |= BIT_PAPE_WLBT_SEL | BIT_LNAON_WLBT_SEL;
89 rtw_write32(rtwdev, REG_PAD_CTRL1, value32);
90
91 value32 = rtw_read32(rtwdev, REG_LED_CFG);
92 value32 &= ~(BIT_PAPE_SEL_EN | BIT_LNAON_SEL_EN);
93 rtw_write32(rtwdev, REG_LED_CFG, value32);
94
95 value32 = rtw_read32(rtwdev, REG_GPIO_MUXCFG);
96 value32 |= BIT_WLRFE_4_5_EN;
97 rtw_write32(rtwdev, REG_GPIO_MUXCFG, value32);
98
99 /* disable BB/RF */
100 value8 = rtw_read8(rtwdev, REG_SYS_FUNC_EN);
101 value8 &= ~(BIT_FEN_BB_RSTB | BIT_FEN_BB_GLB_RST);
102 rtw_write8(rtwdev, REG_SYS_FUNC_EN, value8);
103
104 value8 = rtw_read8(rtwdev, REG_RF_CTRL);
105 value8 &= ~(BIT_RF_SDM_RSTB | BIT_RF_RSTB | BIT_RF_EN);
106 rtw_write8(rtwdev, REG_RF_CTRL, value8);
107
108 value32 = rtw_read32(rtwdev, REG_WLRF1);
109 value32 &= ~BIT_WLRF1_BBRF_EN;
110 rtw_write32(rtwdev, REG_WLRF1, value32);
111
112 return 0;
113 }
114
do_pwr_poll_cmd(struct rtw_dev * rtwdev,u32 addr,u32 mask,u32 target)115 static bool do_pwr_poll_cmd(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 target)
116 {
117 u32 val;
118
119 target &= mask;
120
121 return read_poll_timeout_atomic(rtw_read8, val, (val & mask) == target,
122 50, 50 * RTW_PWR_POLLING_CNT, false,
123 rtwdev, addr) == 0;
124 }
125
rtw_pwr_cmd_polling(struct rtw_dev * rtwdev,const struct rtw_pwr_seq_cmd * cmd)126 static int rtw_pwr_cmd_polling(struct rtw_dev *rtwdev,
127 const struct rtw_pwr_seq_cmd *cmd)
128 {
129 u8 value;
130 u32 offset;
131
132 if (cmd->base == RTW_PWR_ADDR_SDIO)
133 offset = cmd->offset | SDIO_LOCAL_OFFSET;
134 else
135 offset = cmd->offset;
136
137 if (do_pwr_poll_cmd(rtwdev, offset, cmd->mask, cmd->value))
138 return 0;
139
140 if (rtw_hci_type(rtwdev) != RTW_HCI_TYPE_PCIE)
141 goto err;
142
143 /* if PCIE, toggle BIT_PFM_WOWL and try again */
144 value = rtw_read8(rtwdev, REG_SYS_PW_CTRL);
145 if (rtwdev->chip->id == RTW_CHIP_TYPE_8723D)
146 rtw_write8(rtwdev, REG_SYS_PW_CTRL, value & ~BIT_PFM_WOWL);
147 rtw_write8(rtwdev, REG_SYS_PW_CTRL, value | BIT_PFM_WOWL);
148 rtw_write8(rtwdev, REG_SYS_PW_CTRL, value & ~BIT_PFM_WOWL);
149 if (rtwdev->chip->id == RTW_CHIP_TYPE_8723D)
150 rtw_write8(rtwdev, REG_SYS_PW_CTRL, value | BIT_PFM_WOWL);
151
152 if (do_pwr_poll_cmd(rtwdev, offset, cmd->mask, cmd->value))
153 return 0;
154
155 err:
156 rtw_err(rtwdev, "failed to poll offset=0x%x mask=0x%x value=0x%x\n",
157 offset, cmd->mask, cmd->value);
158 return -EBUSY;
159 }
160
rtw_sub_pwr_seq_parser(struct rtw_dev * rtwdev,u8 intf_mask,u8 cut_mask,const struct rtw_pwr_seq_cmd * cmd)161 static int rtw_sub_pwr_seq_parser(struct rtw_dev *rtwdev, u8 intf_mask,
162 u8 cut_mask,
163 const struct rtw_pwr_seq_cmd *cmd)
164 {
165 const struct rtw_pwr_seq_cmd *cur_cmd;
166 u32 offset;
167 u8 value;
168
169 for (cur_cmd = cmd; cur_cmd->cmd != RTW_PWR_CMD_END; cur_cmd++) {
170 if (!(cur_cmd->intf_mask & intf_mask) ||
171 !(cur_cmd->cut_mask & cut_mask))
172 continue;
173
174 switch (cur_cmd->cmd) {
175 case RTW_PWR_CMD_WRITE:
176 offset = cur_cmd->offset;
177
178 if (cur_cmd->base == RTW_PWR_ADDR_SDIO)
179 offset |= SDIO_LOCAL_OFFSET;
180
181 value = rtw_read8(rtwdev, offset);
182 value &= ~cur_cmd->mask;
183 value |= (cur_cmd->value & cur_cmd->mask);
184 rtw_write8(rtwdev, offset, value);
185 break;
186 case RTW_PWR_CMD_POLLING:
187 if (rtw_pwr_cmd_polling(rtwdev, cur_cmd))
188 return -EBUSY;
189 break;
190 case RTW_PWR_CMD_DELAY:
191 if (cur_cmd->value == RTW_PWR_DELAY_US)
192 udelay(cur_cmd->offset);
193 else
194 mdelay(cur_cmd->offset);
195 break;
196 case RTW_PWR_CMD_READ:
197 break;
198 default:
199 return -EINVAL;
200 }
201 }
202
203 return 0;
204 }
205
rtw_pwr_seq_parser(struct rtw_dev * rtwdev,const struct rtw_pwr_seq_cmd ** cmd_seq)206 static int rtw_pwr_seq_parser(struct rtw_dev *rtwdev,
207 const struct rtw_pwr_seq_cmd **cmd_seq)
208 {
209 u8 cut_mask;
210 u8 intf_mask;
211 u8 cut;
212 u32 idx = 0;
213 const struct rtw_pwr_seq_cmd *cmd;
214 int ret;
215
216 cut = rtwdev->hal.cut_version;
217 cut_mask = cut_version_to_mask(cut);
218 switch (rtw_hci_type(rtwdev)) {
219 case RTW_HCI_TYPE_PCIE:
220 intf_mask = RTW_PWR_INTF_PCI_MSK;
221 break;
222 case RTW_HCI_TYPE_USB:
223 intf_mask = RTW_PWR_INTF_USB_MSK;
224 break;
225 default:
226 return -EINVAL;
227 }
228
229 do {
230 cmd = cmd_seq[idx];
231 if (!cmd)
232 break;
233
234 ret = rtw_sub_pwr_seq_parser(rtwdev, intf_mask, cut_mask, cmd);
235 if (ret)
236 return -EBUSY;
237
238 idx++;
239 } while (1);
240
241 return 0;
242 }
243
rtw_mac_power_switch(struct rtw_dev * rtwdev,bool pwr_on)244 static int rtw_mac_power_switch(struct rtw_dev *rtwdev, bool pwr_on)
245 {
246 const struct rtw_chip_info *chip = rtwdev->chip;
247 const struct rtw_pwr_seq_cmd **pwr_seq;
248 u8 rpwm;
249 bool cur_pwr;
250
251 if (rtw_chip_wcpu_11ac(rtwdev)) {
252 rpwm = rtw_read8(rtwdev, rtwdev->hci.rpwm_addr);
253
254 /* Check FW still exist or not */
255 if (rtw_read16(rtwdev, REG_MCUFW_CTRL) == 0xC078) {
256 rpwm = (rpwm ^ BIT_RPWM_TOGGLE) & BIT_RPWM_TOGGLE;
257 rtw_write8(rtwdev, rtwdev->hci.rpwm_addr, rpwm);
258 }
259 }
260
261 if (rtw_read8(rtwdev, REG_CR) == 0xea)
262 cur_pwr = false;
263 else if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB &&
264 (rtw_read8(rtwdev, REG_SYS_STATUS1 + 1) & BIT(0)))
265 cur_pwr = false;
266 else
267 cur_pwr = true;
268
269 if (pwr_on == cur_pwr)
270 return -EALREADY;
271
272 pwr_seq = pwr_on ? chip->pwr_on_seq : chip->pwr_off_seq;
273 if (rtw_pwr_seq_parser(rtwdev, pwr_seq))
274 return -EINVAL;
275
276 if (pwr_on)
277 set_bit(RTW_FLAG_POWERON, rtwdev->flags);
278 else
279 clear_bit(RTW_FLAG_POWERON, rtwdev->flags);
280
281 return 0;
282 }
283
__rtw_mac_init_system_cfg(struct rtw_dev * rtwdev)284 static int __rtw_mac_init_system_cfg(struct rtw_dev *rtwdev)
285 {
286 u8 sys_func_en = rtwdev->chip->sys_func_en;
287 u8 value8;
288 u32 value, tmp;
289
290 value = rtw_read32(rtwdev, REG_CPU_DMEM_CON);
291 value |= BIT_WL_PLATFORM_RST | BIT_DDMA_EN;
292 rtw_write32(rtwdev, REG_CPU_DMEM_CON, value);
293
294 rtw_write8_set(rtwdev, REG_SYS_FUNC_EN + 1, sys_func_en);
295 value8 = (rtw_read8(rtwdev, REG_CR_EXT + 3) & 0xF0) | 0x0C;
296 rtw_write8(rtwdev, REG_CR_EXT + 3, value8);
297
298 /* disable boot-from-flash for driver's DL FW */
299 tmp = rtw_read32(rtwdev, REG_MCUFW_CTRL);
300 if (tmp & BIT_BOOT_FSPI_EN) {
301 rtw_write32(rtwdev, REG_MCUFW_CTRL, tmp & (~BIT_BOOT_FSPI_EN));
302 value = rtw_read32(rtwdev, REG_GPIO_MUXCFG) & (~BIT_FSPI_EN);
303 rtw_write32(rtwdev, REG_GPIO_MUXCFG, value);
304 }
305
306 return 0;
307 }
308
__rtw_mac_init_system_cfg_legacy(struct rtw_dev * rtwdev)309 static int __rtw_mac_init_system_cfg_legacy(struct rtw_dev *rtwdev)
310 {
311 rtw_write8(rtwdev, REG_CR, 0xff);
312 mdelay(2);
313 rtw_write8(rtwdev, REG_HWSEQ_CTRL, 0x7f);
314 mdelay(2);
315
316 rtw_write8_set(rtwdev, REG_SYS_CLKR, BIT_WAKEPAD_EN);
317 rtw_write16_clr(rtwdev, REG_GPIO_MUXCFG, BIT_EN_SIC);
318
319 rtw_write16(rtwdev, REG_CR, 0x2ff);
320
321 return 0;
322 }
323
rtw_mac_init_system_cfg(struct rtw_dev * rtwdev)324 static int rtw_mac_init_system_cfg(struct rtw_dev *rtwdev)
325 {
326 if (rtw_chip_wcpu_11n(rtwdev))
327 return __rtw_mac_init_system_cfg_legacy(rtwdev);
328
329 return __rtw_mac_init_system_cfg(rtwdev);
330 }
331
rtw_mac_power_on(struct rtw_dev * rtwdev)332 int rtw_mac_power_on(struct rtw_dev *rtwdev)
333 {
334 int ret = 0;
335
336 ret = rtw_mac_pre_system_cfg(rtwdev);
337 if (ret)
338 goto err;
339
340 ret = rtw_mac_power_switch(rtwdev, true);
341 if (ret == -EALREADY) {
342 rtw_mac_power_switch(rtwdev, false);
343
344 ret = rtw_mac_pre_system_cfg(rtwdev);
345 if (ret)
346 goto err;
347
348 ret = rtw_mac_power_switch(rtwdev, true);
349 if (ret)
350 goto err;
351 } else if (ret) {
352 goto err;
353 }
354
355 ret = rtw_mac_init_system_cfg(rtwdev);
356 if (ret)
357 goto err;
358
359 return 0;
360
361 err:
362 rtw_err(rtwdev, "mac power on failed");
363 return ret;
364 }
365
rtw_mac_power_off(struct rtw_dev * rtwdev)366 void rtw_mac_power_off(struct rtw_dev *rtwdev)
367 {
368 rtw_mac_power_switch(rtwdev, false);
369 }
370
check_firmware_size(const u8 * data,u32 size)371 static bool check_firmware_size(const u8 *data, u32 size)
372 {
373 const struct rtw_fw_hdr *fw_hdr = (const struct rtw_fw_hdr *)data;
374 u32 dmem_size;
375 u32 imem_size;
376 u32 emem_size;
377 u32 real_size;
378
379 dmem_size = le32_to_cpu(fw_hdr->dmem_size);
380 imem_size = le32_to_cpu(fw_hdr->imem_size);
381 emem_size = (fw_hdr->mem_usage & BIT(4)) ?
382 le32_to_cpu(fw_hdr->emem_size) : 0;
383
384 dmem_size += FW_HDR_CHKSUM_SIZE;
385 imem_size += FW_HDR_CHKSUM_SIZE;
386 emem_size += emem_size ? FW_HDR_CHKSUM_SIZE : 0;
387 real_size = FW_HDR_SIZE + dmem_size + imem_size + emem_size;
388 if (real_size != size)
389 return false;
390
391 return true;
392 }
393
wlan_cpu_enable(struct rtw_dev * rtwdev,bool enable)394 static void wlan_cpu_enable(struct rtw_dev *rtwdev, bool enable)
395 {
396 if (enable) {
397 /* cpu io interface enable */
398 rtw_write8_set(rtwdev, REG_RSV_CTRL + 1, BIT_WLMCU_IOIF);
399
400 /* cpu enable */
401 rtw_write8_set(rtwdev, REG_SYS_FUNC_EN + 1, BIT_FEN_CPUEN);
402 } else {
403 /* cpu io interface disable */
404 rtw_write8_clr(rtwdev, REG_SYS_FUNC_EN + 1, BIT_FEN_CPUEN);
405
406 /* cpu disable */
407 rtw_write8_clr(rtwdev, REG_RSV_CTRL + 1, BIT_WLMCU_IOIF);
408 }
409 }
410
411 #define DLFW_RESTORE_REG_NUM 6
412
download_firmware_reg_backup(struct rtw_dev * rtwdev,struct rtw_backup_info * bckp)413 static void download_firmware_reg_backup(struct rtw_dev *rtwdev,
414 struct rtw_backup_info *bckp)
415 {
416 u8 tmp;
417 u8 bckp_idx = 0;
418
419 /* set HIQ to hi priority */
420 bckp[bckp_idx].len = 1;
421 bckp[bckp_idx].reg = REG_TXDMA_PQ_MAP + 1;
422 bckp[bckp_idx].val = rtw_read8(rtwdev, REG_TXDMA_PQ_MAP + 1);
423 bckp_idx++;
424 tmp = RTW_DMA_MAPPING_HIGH << 6;
425 rtw_write8(rtwdev, REG_TXDMA_PQ_MAP + 1, tmp);
426
427 /* DLFW only use HIQ, map HIQ to hi priority */
428 bckp[bckp_idx].len = 1;
429 bckp[bckp_idx].reg = REG_CR;
430 bckp[bckp_idx].val = rtw_read8(rtwdev, REG_CR);
431 bckp_idx++;
432 bckp[bckp_idx].len = 4;
433 bckp[bckp_idx].reg = REG_H2CQ_CSR;
434 bckp[bckp_idx].val = BIT_H2CQ_FULL;
435 bckp_idx++;
436 tmp = BIT_HCI_TXDMA_EN | BIT_TXDMA_EN;
437 rtw_write8(rtwdev, REG_CR, tmp);
438 rtw_write32(rtwdev, REG_H2CQ_CSR, BIT_H2CQ_FULL);
439
440 /* Config hi priority queue and public priority queue page number */
441 bckp[bckp_idx].len = 2;
442 bckp[bckp_idx].reg = REG_FIFOPAGE_INFO_1;
443 bckp[bckp_idx].val = rtw_read16(rtwdev, REG_FIFOPAGE_INFO_1);
444 bckp_idx++;
445 bckp[bckp_idx].len = 4;
446 bckp[bckp_idx].reg = REG_RQPN_CTRL_2;
447 bckp[bckp_idx].val = rtw_read32(rtwdev, REG_RQPN_CTRL_2) | BIT_LD_RQPN;
448 bckp_idx++;
449 rtw_write16(rtwdev, REG_FIFOPAGE_INFO_1, 0x200);
450 rtw_write32(rtwdev, REG_RQPN_CTRL_2, bckp[bckp_idx - 1].val);
451
452 /* Disable beacon related functions */
453 tmp = rtw_read8(rtwdev, REG_BCN_CTRL);
454 bckp[bckp_idx].len = 1;
455 bckp[bckp_idx].reg = REG_BCN_CTRL;
456 bckp[bckp_idx].val = tmp;
457 bckp_idx++;
458 tmp = (u8)((tmp & (~BIT_EN_BCN_FUNCTION)) | BIT_DIS_TSF_UDT);
459 rtw_write8(rtwdev, REG_BCN_CTRL, tmp);
460
461 WARN(bckp_idx != DLFW_RESTORE_REG_NUM, "wrong backup number\n");
462 }
463
download_firmware_reset_platform(struct rtw_dev * rtwdev)464 static void download_firmware_reset_platform(struct rtw_dev *rtwdev)
465 {
466 rtw_write8_clr(rtwdev, REG_CPU_DMEM_CON + 2, BIT_WL_PLATFORM_RST >> 16);
467 rtw_write8_clr(rtwdev, REG_SYS_CLK_CTRL + 1, BIT_CPU_CLK_EN >> 8);
468 rtw_write8_set(rtwdev, REG_CPU_DMEM_CON + 2, BIT_WL_PLATFORM_RST >> 16);
469 rtw_write8_set(rtwdev, REG_SYS_CLK_CTRL + 1, BIT_CPU_CLK_EN >> 8);
470 }
471
download_firmware_reg_restore(struct rtw_dev * rtwdev,struct rtw_backup_info * bckp,u8 bckp_num)472 static void download_firmware_reg_restore(struct rtw_dev *rtwdev,
473 struct rtw_backup_info *bckp,
474 u8 bckp_num)
475 {
476 rtw_restore_reg(rtwdev, bckp, bckp_num);
477 }
478
479 #define TX_DESC_SIZE 48
480
send_firmware_pkt_rsvd_page(struct rtw_dev * rtwdev,u16 pg_addr,const u8 * data,u32 size)481 static int send_firmware_pkt_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr,
482 const u8 *data, u32 size)
483 {
484 u8 *buf;
485 int ret;
486
487 buf = kmemdup(data, size, GFP_KERNEL);
488 if (!buf)
489 return -ENOMEM;
490
491 ret = rtw_fw_write_data_rsvd_page(rtwdev, pg_addr, buf, size);
492 kfree(buf);
493 return ret;
494 }
495
496 static int
send_firmware_pkt(struct rtw_dev * rtwdev,u16 pg_addr,const u8 * data,u32 size)497 send_firmware_pkt(struct rtw_dev *rtwdev, u16 pg_addr, const u8 *data, u32 size)
498 {
499 int ret;
500
501 if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB &&
502 !((size + TX_DESC_SIZE) & (512 - 1)))
503 size += 1;
504
505 ret = send_firmware_pkt_rsvd_page(rtwdev, pg_addr, data, size);
506 if (ret)
507 rtw_err(rtwdev, "failed to download rsvd page\n");
508
509 return ret;
510 }
511
512 static int
iddma_enable(struct rtw_dev * rtwdev,u32 src,u32 dst,u32 ctrl)513 iddma_enable(struct rtw_dev *rtwdev, u32 src, u32 dst, u32 ctrl)
514 {
515 rtw_write32(rtwdev, REG_DDMA_CH0SA, src);
516 rtw_write32(rtwdev, REG_DDMA_CH0DA, dst);
517 rtw_write32(rtwdev, REG_DDMA_CH0CTRL, ctrl);
518
519 if (!check_hw_ready(rtwdev, REG_DDMA_CH0CTRL, BIT_DDMACH0_OWN, 0))
520 return -EBUSY;
521
522 return 0;
523 }
524
iddma_download_firmware(struct rtw_dev * rtwdev,u32 src,u32 dst,u32 len,u8 first)525 static int iddma_download_firmware(struct rtw_dev *rtwdev, u32 src, u32 dst,
526 u32 len, u8 first)
527 {
528 u32 ch0_ctrl = BIT_DDMACH0_CHKSUM_EN | BIT_DDMACH0_OWN;
529
530 if (!check_hw_ready(rtwdev, REG_DDMA_CH0CTRL, BIT_DDMACH0_OWN, 0))
531 return -EBUSY;
532
533 ch0_ctrl |= len & BIT_MASK_DDMACH0_DLEN;
534 if (!first)
535 ch0_ctrl |= BIT_DDMACH0_CHKSUM_CONT;
536
537 if (iddma_enable(rtwdev, src, dst, ch0_ctrl))
538 return -EBUSY;
539
540 return 0;
541 }
542
rtw_ddma_to_fw_fifo(struct rtw_dev * rtwdev,u32 ocp_src,u32 size)543 int rtw_ddma_to_fw_fifo(struct rtw_dev *rtwdev, u32 ocp_src, u32 size)
544 {
545 u32 ch0_ctrl = BIT_DDMACH0_OWN | BIT_DDMACH0_DDMA_MODE;
546
547 if (!check_hw_ready(rtwdev, REG_DDMA_CH0CTRL, BIT_DDMACH0_OWN, 0)) {
548 rtw_dbg(rtwdev, RTW_DBG_FW, "busy to start ddma\n");
549 return -EBUSY;
550 }
551
552 ch0_ctrl |= size & BIT_MASK_DDMACH0_DLEN;
553
554 if (iddma_enable(rtwdev, ocp_src, OCPBASE_RXBUF_FW_88XX, ch0_ctrl)) {
555 rtw_dbg(rtwdev, RTW_DBG_FW, "busy to complete ddma\n");
556 return -EBUSY;
557 }
558
559 return 0;
560 }
561
562 static bool
check_fw_checksum(struct rtw_dev * rtwdev,u32 addr)563 check_fw_checksum(struct rtw_dev *rtwdev, u32 addr)
564 {
565 u8 fw_ctrl;
566
567 fw_ctrl = rtw_read8(rtwdev, REG_MCUFW_CTRL);
568
569 if (rtw_read32(rtwdev, REG_DDMA_CH0CTRL) & BIT_DDMACH0_CHKSUM_STS) {
570 if (addr < OCPBASE_DMEM_88XX) {
571 fw_ctrl |= BIT_IMEM_DW_OK;
572 fw_ctrl &= ~BIT_IMEM_CHKSUM_OK;
573 rtw_write8(rtwdev, REG_MCUFW_CTRL, fw_ctrl);
574 } else {
575 fw_ctrl |= BIT_DMEM_DW_OK;
576 fw_ctrl &= ~BIT_DMEM_CHKSUM_OK;
577 rtw_write8(rtwdev, REG_MCUFW_CTRL, fw_ctrl);
578 }
579
580 rtw_err(rtwdev, "invalid fw checksum\n");
581
582 return false;
583 }
584
585 if (addr < OCPBASE_DMEM_88XX) {
586 fw_ctrl |= (BIT_IMEM_DW_OK | BIT_IMEM_CHKSUM_OK);
587 rtw_write8(rtwdev, REG_MCUFW_CTRL, fw_ctrl);
588 } else {
589 fw_ctrl |= (BIT_DMEM_DW_OK | BIT_DMEM_CHKSUM_OK);
590 rtw_write8(rtwdev, REG_MCUFW_CTRL, fw_ctrl);
591 }
592
593 return true;
594 }
595
596 static int
download_firmware_to_mem(struct rtw_dev * rtwdev,const u8 * data,u32 src,u32 dst,u32 size)597 download_firmware_to_mem(struct rtw_dev *rtwdev, const u8 *data,
598 u32 src, u32 dst, u32 size)
599 {
600 const struct rtw_chip_info *chip = rtwdev->chip;
601 u32 desc_size = chip->tx_pkt_desc_sz;
602 u8 first_part;
603 u32 mem_offset;
604 u32 residue_size;
605 u32 pkt_size;
606 u32 max_size = 0x1000;
607 u32 val;
608 int ret;
609
610 mem_offset = 0;
611 first_part = 1;
612 residue_size = size;
613
614 val = rtw_read32(rtwdev, REG_DDMA_CH0CTRL);
615 val |= BIT_DDMACH0_RESET_CHKSUM_STS;
616 rtw_write32(rtwdev, REG_DDMA_CH0CTRL, val);
617
618 while (residue_size) {
619 if (residue_size >= max_size)
620 pkt_size = max_size;
621 else
622 pkt_size = residue_size;
623
624 ret = send_firmware_pkt(rtwdev, (u16)(src >> 7),
625 data + mem_offset, pkt_size);
626 if (ret)
627 return ret;
628
629 ret = iddma_download_firmware(rtwdev, OCPBASE_TXBUF_88XX +
630 src + desc_size,
631 dst + mem_offset, pkt_size,
632 first_part);
633 if (ret)
634 return ret;
635
636 first_part = 0;
637 mem_offset += pkt_size;
638 residue_size -= pkt_size;
639 }
640
641 if (!check_fw_checksum(rtwdev, dst))
642 return -EINVAL;
643
644 return 0;
645 }
646
647 static int
start_download_firmware(struct rtw_dev * rtwdev,const u8 * data,u32 size)648 start_download_firmware(struct rtw_dev *rtwdev, const u8 *data, u32 size)
649 {
650 const struct rtw_fw_hdr *fw_hdr = (const struct rtw_fw_hdr *)data;
651 const u8 *cur_fw;
652 u16 val;
653 u32 imem_size;
654 u32 dmem_size;
655 u32 emem_size;
656 u32 addr;
657 int ret;
658
659 dmem_size = le32_to_cpu(fw_hdr->dmem_size);
660 imem_size = le32_to_cpu(fw_hdr->imem_size);
661 emem_size = (fw_hdr->mem_usage & BIT(4)) ?
662 le32_to_cpu(fw_hdr->emem_size) : 0;
663 dmem_size += FW_HDR_CHKSUM_SIZE;
664 imem_size += FW_HDR_CHKSUM_SIZE;
665 emem_size += emem_size ? FW_HDR_CHKSUM_SIZE : 0;
666
667 val = (u16)(rtw_read16(rtwdev, REG_MCUFW_CTRL) & 0x3800);
668 val |= BIT_MCUFWDL_EN;
669 rtw_write16(rtwdev, REG_MCUFW_CTRL, val);
670
671 cur_fw = data + FW_HDR_SIZE;
672 addr = le32_to_cpu(fw_hdr->dmem_addr);
673 addr &= ~BIT(31);
674 ret = download_firmware_to_mem(rtwdev, cur_fw, 0, addr, dmem_size);
675 if (ret)
676 return ret;
677
678 cur_fw = data + FW_HDR_SIZE + dmem_size;
679 addr = le32_to_cpu(fw_hdr->imem_addr);
680 addr &= ~BIT(31);
681 ret = download_firmware_to_mem(rtwdev, cur_fw, 0, addr, imem_size);
682 if (ret)
683 return ret;
684
685 if (emem_size) {
686 cur_fw = data + FW_HDR_SIZE + dmem_size + imem_size;
687 addr = le32_to_cpu(fw_hdr->emem_addr);
688 addr &= ~BIT(31);
689 ret = download_firmware_to_mem(rtwdev, cur_fw, 0, addr,
690 emem_size);
691 if (ret)
692 return ret;
693 }
694
695 return 0;
696 }
697
download_firmware_validate(struct rtw_dev * rtwdev)698 static int download_firmware_validate(struct rtw_dev *rtwdev)
699 {
700 u32 fw_key;
701
702 if (!check_hw_ready(rtwdev, REG_MCUFW_CTRL, FW_READY_MASK, FW_READY)) {
703 fw_key = rtw_read32(rtwdev, REG_FW_DBG7) & FW_KEY_MASK;
704 if (fw_key == ILLEGAL_KEY_GROUP)
705 rtw_err(rtwdev, "invalid fw key\n");
706 return -EINVAL;
707 }
708
709 return 0;
710 }
711
download_firmware_end_flow(struct rtw_dev * rtwdev)712 static void download_firmware_end_flow(struct rtw_dev *rtwdev)
713 {
714 u16 fw_ctrl;
715
716 rtw_write32(rtwdev, REG_TXDMA_STATUS, BTI_PAGE_OVF);
717
718 /* Check IMEM & DMEM checksum is OK or not */
719 fw_ctrl = rtw_read16(rtwdev, REG_MCUFW_CTRL);
720 if ((fw_ctrl & BIT_CHECK_SUM_OK) != BIT_CHECK_SUM_OK)
721 return;
722
723 fw_ctrl = (fw_ctrl | BIT_FW_DW_RDY) & ~BIT_MCUFWDL_EN;
724 rtw_write16(rtwdev, REG_MCUFW_CTRL, fw_ctrl);
725 }
726
__rtw_download_firmware(struct rtw_dev * rtwdev,struct rtw_fw_state * fw)727 static int __rtw_download_firmware(struct rtw_dev *rtwdev,
728 struct rtw_fw_state *fw)
729 {
730 struct rtw_backup_info bckp[DLFW_RESTORE_REG_NUM];
731 const u8 *data = fw->firmware->data;
732 u32 size = fw->firmware->size;
733 u32 ltecoex_bckp;
734 int ret;
735
736 if (!check_firmware_size(data, size))
737 return -EINVAL;
738
739 if (!ltecoex_read_reg(rtwdev, 0x38, <ecoex_bckp))
740 return -EBUSY;
741
742 wlan_cpu_enable(rtwdev, false);
743
744 download_firmware_reg_backup(rtwdev, bckp);
745 download_firmware_reset_platform(rtwdev);
746
747 ret = start_download_firmware(rtwdev, data, size);
748 if (ret)
749 goto dlfw_fail;
750
751 download_firmware_reg_restore(rtwdev, bckp, DLFW_RESTORE_REG_NUM);
752
753 download_firmware_end_flow(rtwdev);
754
755 wlan_cpu_enable(rtwdev, true);
756
757 if (!ltecoex_reg_write(rtwdev, 0x38, ltecoex_bckp))
758 return -EBUSY;
759
760 ret = download_firmware_validate(rtwdev);
761 if (ret)
762 goto dlfw_fail;
763
764 /* reset desc and index */
765 rtw_hci_setup(rtwdev);
766
767 rtwdev->h2c.last_box_num = 0;
768 rtwdev->h2c.seq = 0;
769
770 set_bit(RTW_FLAG_FW_RUNNING, rtwdev->flags);
771
772 return 0;
773
774 dlfw_fail:
775 /* Disable FWDL_EN */
776 rtw_write8_clr(rtwdev, REG_MCUFW_CTRL, BIT_MCUFWDL_EN);
777 rtw_write8_set(rtwdev, REG_SYS_FUNC_EN + 1, BIT_FEN_CPUEN);
778
779 return ret;
780 }
781
en_download_firmware_legacy(struct rtw_dev * rtwdev,bool en)782 static void en_download_firmware_legacy(struct rtw_dev *rtwdev, bool en)
783 {
784 int try;
785
786 if (en) {
787 wlan_cpu_enable(rtwdev, false);
788 wlan_cpu_enable(rtwdev, true);
789
790 rtw_write8_set(rtwdev, REG_MCUFW_CTRL, BIT_MCUFWDL_EN);
791
792 for (try = 0; try < 10; try++) {
793 if (rtw_read8(rtwdev, REG_MCUFW_CTRL) & BIT_MCUFWDL_EN)
794 goto fwdl_ready;
795 rtw_write8_set(rtwdev, REG_MCUFW_CTRL, BIT_MCUFWDL_EN);
796 msleep(20);
797 }
798 rtw_err(rtwdev, "failed to check fw download ready\n");
799 fwdl_ready:
800 rtw_write32_clr(rtwdev, REG_MCUFW_CTRL, BIT_ROM_DLEN);
801 } else {
802 rtw_write8_clr(rtwdev, REG_MCUFW_CTRL, BIT_MCUFWDL_EN);
803 }
804 }
805
806 static void
write_firmware_page(struct rtw_dev * rtwdev,u32 page,const u8 * data,u32 size)807 write_firmware_page(struct rtw_dev *rtwdev, u32 page, const u8 *data, u32 size)
808 {
809 u32 val32;
810 u32 block_nr;
811 u32 remain_size;
812 u32 write_addr = FW_START_ADDR_LEGACY;
813 const __le32 *ptr = (const __le32 *)data;
814 u32 block;
815 __le32 remain_data = 0;
816
817 block_nr = size >> DLFW_BLK_SIZE_SHIFT_LEGACY;
818 remain_size = size & (DLFW_BLK_SIZE_LEGACY - 1);
819
820 val32 = rtw_read32(rtwdev, REG_MCUFW_CTRL);
821 val32 &= ~BIT_ROM_PGE;
822 val32 |= (page << BIT_SHIFT_ROM_PGE) & BIT_ROM_PGE;
823 rtw_write32(rtwdev, REG_MCUFW_CTRL, val32);
824
825 for (block = 0; block < block_nr; block++) {
826 rtw_write32(rtwdev, write_addr, le32_to_cpu(*ptr));
827
828 write_addr += DLFW_BLK_SIZE_LEGACY;
829 ptr++;
830 }
831
832 if (remain_size) {
833 memcpy(&remain_data, ptr, remain_size);
834 rtw_write32(rtwdev, write_addr, le32_to_cpu(remain_data));
835 }
836 }
837
838 static int
download_firmware_legacy(struct rtw_dev * rtwdev,const u8 * data,u32 size)839 download_firmware_legacy(struct rtw_dev *rtwdev, const u8 *data, u32 size)
840 {
841 u32 page;
842 u32 total_page;
843 u32 last_page_size;
844
845 data += sizeof(struct rtw_fw_hdr_legacy);
846 size -= sizeof(struct rtw_fw_hdr_legacy);
847
848 total_page = size >> DLFW_PAGE_SIZE_SHIFT_LEGACY;
849 last_page_size = size & (DLFW_PAGE_SIZE_LEGACY - 1);
850
851 rtw_write8_set(rtwdev, REG_MCUFW_CTRL, BIT_FWDL_CHK_RPT);
852
853 for (page = 0; page < total_page; page++) {
854 write_firmware_page(rtwdev, page, data, DLFW_PAGE_SIZE_LEGACY);
855 data += DLFW_PAGE_SIZE_LEGACY;
856 }
857 if (last_page_size)
858 write_firmware_page(rtwdev, page, data, last_page_size);
859
860 if (!check_hw_ready(rtwdev, REG_MCUFW_CTRL, BIT_FWDL_CHK_RPT, 1)) {
861 rtw_err(rtwdev, "failed to check download firmware report\n");
862 return -EINVAL;
863 }
864
865 return 0;
866 }
867
download_firmware_validate_legacy(struct rtw_dev * rtwdev)868 static int download_firmware_validate_legacy(struct rtw_dev *rtwdev)
869 {
870 u32 val32;
871 int try;
872
873 val32 = rtw_read32(rtwdev, REG_MCUFW_CTRL);
874 val32 |= BIT_MCUFWDL_RDY;
875 val32 &= ~BIT_WINTINI_RDY;
876 rtw_write32(rtwdev, REG_MCUFW_CTRL, val32);
877
878 wlan_cpu_enable(rtwdev, false);
879 wlan_cpu_enable(rtwdev, true);
880
881 for (try = 0; try < 10; try++) {
882 val32 = rtw_read32(rtwdev, REG_MCUFW_CTRL);
883 if ((val32 & FW_READY_LEGACY) == FW_READY_LEGACY)
884 return 0;
885 msleep(20);
886 }
887
888 rtw_err(rtwdev, "failed to validate firmware\n");
889 return -EINVAL;
890 }
891
__rtw_download_firmware_legacy(struct rtw_dev * rtwdev,struct rtw_fw_state * fw)892 static int __rtw_download_firmware_legacy(struct rtw_dev *rtwdev,
893 struct rtw_fw_state *fw)
894 {
895 int ret = 0;
896
897 en_download_firmware_legacy(rtwdev, true);
898 ret = download_firmware_legacy(rtwdev, fw->firmware->data, fw->firmware->size);
899 en_download_firmware_legacy(rtwdev, false);
900 if (ret)
901 goto out;
902
903 ret = download_firmware_validate_legacy(rtwdev);
904 if (ret)
905 goto out;
906
907 /* reset desc and index */
908 rtw_hci_setup(rtwdev);
909
910 rtwdev->h2c.last_box_num = 0;
911 rtwdev->h2c.seq = 0;
912
913 set_bit(RTW_FLAG_FW_RUNNING, rtwdev->flags);
914
915 out:
916 return ret;
917 }
918
919 static
_rtw_download_firmware(struct rtw_dev * rtwdev,struct rtw_fw_state * fw)920 int _rtw_download_firmware(struct rtw_dev *rtwdev, struct rtw_fw_state *fw)
921 {
922 if (rtw_chip_wcpu_11n(rtwdev))
923 return __rtw_download_firmware_legacy(rtwdev, fw);
924
925 return __rtw_download_firmware(rtwdev, fw);
926 }
927
rtw_download_firmware(struct rtw_dev * rtwdev,struct rtw_fw_state * fw)928 int rtw_download_firmware(struct rtw_dev *rtwdev, struct rtw_fw_state *fw)
929 {
930 int ret;
931
932 ret = _rtw_download_firmware(rtwdev, fw);
933 if (ret)
934 return ret;
935
936 if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_PCIE &&
937 rtwdev->chip->id == RTW_CHIP_TYPE_8821C)
938 rtw_fw_set_recover_bt_device(rtwdev);
939
940 return 0;
941 }
942
get_priority_queues(struct rtw_dev * rtwdev,u32 queues)943 static u32 get_priority_queues(struct rtw_dev *rtwdev, u32 queues)
944 {
945 const struct rtw_rqpn *rqpn = rtwdev->fifo.rqpn;
946 u32 prio_queues = 0;
947
948 if (queues & BIT(IEEE80211_AC_VO))
949 prio_queues |= BIT(rqpn->dma_map_vo);
950 if (queues & BIT(IEEE80211_AC_VI))
951 prio_queues |= BIT(rqpn->dma_map_vi);
952 if (queues & BIT(IEEE80211_AC_BE))
953 prio_queues |= BIT(rqpn->dma_map_be);
954 if (queues & BIT(IEEE80211_AC_BK))
955 prio_queues |= BIT(rqpn->dma_map_bk);
956
957 return prio_queues;
958 }
959
__rtw_mac_flush_prio_queue(struct rtw_dev * rtwdev,u32 prio_queue,bool drop)960 static void __rtw_mac_flush_prio_queue(struct rtw_dev *rtwdev,
961 u32 prio_queue, bool drop)
962 {
963 const struct rtw_chip_info *chip = rtwdev->chip;
964 const struct rtw_prioq_addr *addr;
965 bool wsize;
966 u16 avail_page, rsvd_page;
967 int i;
968
969 if (prio_queue >= RTW_DMA_MAPPING_MAX)
970 return;
971
972 addr = &chip->prioq_addrs->prio[prio_queue];
973 wsize = chip->prioq_addrs->wsize;
974
975 /* check if all of the reserved pages are available for 100 msecs */
976 for (i = 0; i < 5; i++) {
977 rsvd_page = wsize ? rtw_read16(rtwdev, addr->rsvd) :
978 rtw_read8(rtwdev, addr->rsvd);
979 avail_page = wsize ? rtw_read16(rtwdev, addr->avail) :
980 rtw_read8(rtwdev, addr->avail);
981 if (rsvd_page == avail_page)
982 return;
983
984 msleep(20);
985 }
986
987 /* priority queue is still not empty, throw a warning,
988 *
989 * Note that if we want to flush the tx queue when having a lot of
990 * traffic (ex, 100Mbps up), some of the packets could be dropped.
991 * And it requires like ~2secs to flush the full priority queue.
992 */
993 if (!drop)
994 rtw_warn(rtwdev, "timed out to flush queue %d\n", prio_queue);
995 }
996
rtw_mac_flush_prio_queues(struct rtw_dev * rtwdev,u32 prio_queues,bool drop)997 static void rtw_mac_flush_prio_queues(struct rtw_dev *rtwdev,
998 u32 prio_queues, bool drop)
999 {
1000 u32 q;
1001
1002 for (q = 0; q < RTW_DMA_MAPPING_MAX; q++)
1003 if (prio_queues & BIT(q))
1004 __rtw_mac_flush_prio_queue(rtwdev, q, drop);
1005 }
1006
rtw_mac_flush_queues(struct rtw_dev * rtwdev,u32 queues,bool drop)1007 void rtw_mac_flush_queues(struct rtw_dev *rtwdev, u32 queues, bool drop)
1008 {
1009 u32 prio_queues = 0;
1010
1011 /* If all of the hardware queues are requested to flush,
1012 * or the priority queues are not mapped yet,
1013 * flush all of the priority queues
1014 */
1015 if (queues == BIT(rtwdev->hw->queues) - 1 || !rtwdev->fifo.rqpn)
1016 prio_queues = BIT(RTW_DMA_MAPPING_MAX) - 1;
1017 else
1018 prio_queues = get_priority_queues(rtwdev, queues);
1019
1020 rtw_mac_flush_prio_queues(rtwdev, prio_queues, drop);
1021 }
1022
txdma_queue_mapping(struct rtw_dev * rtwdev)1023 static int txdma_queue_mapping(struct rtw_dev *rtwdev)
1024 {
1025 const struct rtw_chip_info *chip = rtwdev->chip;
1026 const struct rtw_rqpn *rqpn = NULL;
1027 u16 txdma_pq_map = 0;
1028
1029 switch (rtw_hci_type(rtwdev)) {
1030 case RTW_HCI_TYPE_PCIE:
1031 rqpn = &chip->rqpn_table[1];
1032 break;
1033 case RTW_HCI_TYPE_USB:
1034 if (rtwdev->hci.bulkout_num == 2)
1035 rqpn = &chip->rqpn_table[2];
1036 else if (rtwdev->hci.bulkout_num == 3)
1037 rqpn = &chip->rqpn_table[3];
1038 else if (rtwdev->hci.bulkout_num == 4)
1039 rqpn = &chip->rqpn_table[4];
1040 else
1041 return -EINVAL;
1042 break;
1043 default:
1044 return -EINVAL;
1045 }
1046
1047 rtwdev->fifo.rqpn = rqpn;
1048 txdma_pq_map |= BIT_TXDMA_HIQ_MAP(rqpn->dma_map_hi);
1049 txdma_pq_map |= BIT_TXDMA_MGQ_MAP(rqpn->dma_map_mg);
1050 txdma_pq_map |= BIT_TXDMA_BKQ_MAP(rqpn->dma_map_bk);
1051 txdma_pq_map |= BIT_TXDMA_BEQ_MAP(rqpn->dma_map_be);
1052 txdma_pq_map |= BIT_TXDMA_VIQ_MAP(rqpn->dma_map_vi);
1053 txdma_pq_map |= BIT_TXDMA_VOQ_MAP(rqpn->dma_map_vo);
1054 rtw_write16(rtwdev, REG_TXDMA_PQ_MAP, txdma_pq_map);
1055
1056 rtw_write8(rtwdev, REG_CR, 0);
1057 rtw_write8(rtwdev, REG_CR, MAC_TRX_ENABLE);
1058 if (rtw_chip_wcpu_11ac(rtwdev))
1059 rtw_write32(rtwdev, REG_H2CQ_CSR, BIT_H2CQ_FULL);
1060
1061 if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB)
1062 rtw_write8_set(rtwdev, REG_TXDMA_PQ_MAP, BIT_RXDMA_ARBBW_EN);
1063
1064 return 0;
1065 }
1066
set_trx_fifo_info(struct rtw_dev * rtwdev)1067 static int set_trx_fifo_info(struct rtw_dev *rtwdev)
1068 {
1069 const struct rtw_chip_info *chip = rtwdev->chip;
1070 struct rtw_fifo_conf *fifo = &rtwdev->fifo;
1071 u16 cur_pg_addr;
1072 u8 csi_buf_pg_num = chip->csi_buf_pg_num;
1073
1074 /* config rsvd page num */
1075 fifo->rsvd_drv_pg_num = 8;
1076 fifo->txff_pg_num = chip->txff_size >> 7;
1077 if (rtw_chip_wcpu_11n(rtwdev))
1078 fifo->rsvd_pg_num = fifo->rsvd_drv_pg_num;
1079 else
1080 fifo->rsvd_pg_num = fifo->rsvd_drv_pg_num +
1081 RSVD_PG_H2C_EXTRAINFO_NUM +
1082 RSVD_PG_H2C_STATICINFO_NUM +
1083 RSVD_PG_H2CQ_NUM +
1084 RSVD_PG_CPU_INSTRUCTION_NUM +
1085 RSVD_PG_FW_TXBUF_NUM +
1086 csi_buf_pg_num;
1087
1088 if (fifo->rsvd_pg_num > fifo->txff_pg_num)
1089 return -ENOMEM;
1090
1091 fifo->acq_pg_num = fifo->txff_pg_num - fifo->rsvd_pg_num;
1092 fifo->rsvd_boundary = fifo->txff_pg_num - fifo->rsvd_pg_num;
1093
1094 cur_pg_addr = fifo->txff_pg_num;
1095 if (rtw_chip_wcpu_11ac(rtwdev)) {
1096 cur_pg_addr -= csi_buf_pg_num;
1097 fifo->rsvd_csibuf_addr = cur_pg_addr;
1098 cur_pg_addr -= RSVD_PG_FW_TXBUF_NUM;
1099 fifo->rsvd_fw_txbuf_addr = cur_pg_addr;
1100 cur_pg_addr -= RSVD_PG_CPU_INSTRUCTION_NUM;
1101 fifo->rsvd_cpu_instr_addr = cur_pg_addr;
1102 cur_pg_addr -= RSVD_PG_H2CQ_NUM;
1103 fifo->rsvd_h2cq_addr = cur_pg_addr;
1104 cur_pg_addr -= RSVD_PG_H2C_STATICINFO_NUM;
1105 fifo->rsvd_h2c_sta_info_addr = cur_pg_addr;
1106 cur_pg_addr -= RSVD_PG_H2C_EXTRAINFO_NUM;
1107 fifo->rsvd_h2c_info_addr = cur_pg_addr;
1108 }
1109 cur_pg_addr -= fifo->rsvd_drv_pg_num;
1110 fifo->rsvd_drv_addr = cur_pg_addr;
1111
1112 if (fifo->rsvd_boundary != fifo->rsvd_drv_addr) {
1113 rtw_err(rtwdev, "wrong rsvd driver address\n");
1114 return -EINVAL;
1115 }
1116
1117 return 0;
1118 }
1119
__priority_queue_cfg(struct rtw_dev * rtwdev,const struct rtw_page_table * pg_tbl,u16 pubq_num)1120 static int __priority_queue_cfg(struct rtw_dev *rtwdev,
1121 const struct rtw_page_table *pg_tbl,
1122 u16 pubq_num)
1123 {
1124 const struct rtw_chip_info *chip = rtwdev->chip;
1125 struct rtw_fifo_conf *fifo = &rtwdev->fifo;
1126
1127 rtw_write16(rtwdev, REG_FIFOPAGE_INFO_1, pg_tbl->hq_num);
1128 rtw_write16(rtwdev, REG_FIFOPAGE_INFO_2, pg_tbl->lq_num);
1129 rtw_write16(rtwdev, REG_FIFOPAGE_INFO_3, pg_tbl->nq_num);
1130 rtw_write16(rtwdev, REG_FIFOPAGE_INFO_4, pg_tbl->exq_num);
1131 rtw_write16(rtwdev, REG_FIFOPAGE_INFO_5, pubq_num);
1132 rtw_write32_set(rtwdev, REG_RQPN_CTRL_2, BIT_LD_RQPN);
1133
1134 rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2, fifo->rsvd_boundary);
1135 rtw_write8_set(rtwdev, REG_FWHW_TXQ_CTRL + 2, BIT_EN_WR_FREE_TAIL >> 16);
1136
1137 rtw_write16(rtwdev, REG_BCNQ_BDNY_V1, fifo->rsvd_boundary);
1138 rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2 + 2, fifo->rsvd_boundary);
1139 rtw_write16(rtwdev, REG_BCNQ1_BDNY_V1, fifo->rsvd_boundary);
1140 rtw_write32(rtwdev, REG_RXFF_BNDY, chip->rxff_size - C2H_PKT_BUF - 1);
1141 rtw_write8_set(rtwdev, REG_AUTO_LLT_V1, BIT_AUTO_INIT_LLT_V1);
1142
1143 if (!check_hw_ready(rtwdev, REG_AUTO_LLT_V1, BIT_AUTO_INIT_LLT_V1, 0))
1144 return -EBUSY;
1145
1146 rtw_write8(rtwdev, REG_CR + 3, 0);
1147
1148 return 0;
1149 }
1150
__priority_queue_cfg_legacy(struct rtw_dev * rtwdev,const struct rtw_page_table * pg_tbl,u16 pubq_num)1151 static int __priority_queue_cfg_legacy(struct rtw_dev *rtwdev,
1152 const struct rtw_page_table *pg_tbl,
1153 u16 pubq_num)
1154 {
1155 const struct rtw_chip_info *chip = rtwdev->chip;
1156 struct rtw_fifo_conf *fifo = &rtwdev->fifo;
1157 u32 val32;
1158
1159 val32 = BIT_RQPN_NE(pg_tbl->nq_num, pg_tbl->exq_num);
1160 rtw_write32(rtwdev, REG_RQPN_NPQ, val32);
1161 val32 = BIT_RQPN_HLP(pg_tbl->hq_num, pg_tbl->lq_num, pubq_num);
1162 rtw_write32(rtwdev, REG_RQPN, val32);
1163
1164 rtw_write8(rtwdev, REG_TRXFF_BNDY, fifo->rsvd_boundary);
1165 rtw_write16(rtwdev, REG_TRXFF_BNDY + 2, chip->rxff_size - REPORT_BUF - 1);
1166 rtw_write8(rtwdev, REG_DWBCN0_CTRL + 1, fifo->rsvd_boundary);
1167 rtw_write8(rtwdev, REG_BCNQ_BDNY, fifo->rsvd_boundary);
1168 rtw_write8(rtwdev, REG_MGQ_BDNY, fifo->rsvd_boundary);
1169 rtw_write8(rtwdev, REG_WMAC_LBK_BF_HD, fifo->rsvd_boundary);
1170
1171 rtw_write32_set(rtwdev, REG_AUTO_LLT, BIT_AUTO_INIT_LLT);
1172
1173 if (!check_hw_ready(rtwdev, REG_AUTO_LLT, BIT_AUTO_INIT_LLT, 0))
1174 return -EBUSY;
1175
1176 return 0;
1177 }
1178
priority_queue_cfg(struct rtw_dev * rtwdev)1179 static int priority_queue_cfg(struct rtw_dev *rtwdev)
1180 {
1181 const struct rtw_chip_info *chip = rtwdev->chip;
1182 struct rtw_fifo_conf *fifo = &rtwdev->fifo;
1183 const struct rtw_page_table *pg_tbl = NULL;
1184 u16 pubq_num;
1185 int ret;
1186
1187 ret = set_trx_fifo_info(rtwdev);
1188 if (ret)
1189 return ret;
1190
1191 switch (rtw_hci_type(rtwdev)) {
1192 case RTW_HCI_TYPE_PCIE:
1193 pg_tbl = &chip->page_table[1];
1194 break;
1195 case RTW_HCI_TYPE_USB:
1196 if (rtwdev->hci.bulkout_num == 2)
1197 pg_tbl = &chip->page_table[2];
1198 else if (rtwdev->hci.bulkout_num == 3)
1199 pg_tbl = &chip->page_table[3];
1200 else if (rtwdev->hci.bulkout_num == 4)
1201 pg_tbl = &chip->page_table[4];
1202 else
1203 return -EINVAL;
1204 break;
1205 default:
1206 return -EINVAL;
1207 }
1208
1209 pubq_num = fifo->acq_pg_num - pg_tbl->hq_num - pg_tbl->lq_num -
1210 pg_tbl->nq_num - pg_tbl->exq_num - pg_tbl->gapq_num;
1211 if (rtw_chip_wcpu_11n(rtwdev))
1212 return __priority_queue_cfg_legacy(rtwdev, pg_tbl, pubq_num);
1213 else
1214 return __priority_queue_cfg(rtwdev, pg_tbl, pubq_num);
1215 }
1216
init_h2c(struct rtw_dev * rtwdev)1217 static int init_h2c(struct rtw_dev *rtwdev)
1218 {
1219 struct rtw_fifo_conf *fifo = &rtwdev->fifo;
1220 u8 value8;
1221 u32 value32;
1222 u32 h2cq_addr;
1223 u32 h2cq_size;
1224 u32 h2cq_free;
1225 u32 wp, rp;
1226
1227 if (rtw_chip_wcpu_11n(rtwdev))
1228 return 0;
1229
1230 h2cq_addr = fifo->rsvd_h2cq_addr << TX_PAGE_SIZE_SHIFT;
1231 h2cq_size = RSVD_PG_H2CQ_NUM << TX_PAGE_SIZE_SHIFT;
1232
1233 value32 = rtw_read32(rtwdev, REG_H2C_HEAD);
1234 value32 = (value32 & 0xFFFC0000) | h2cq_addr;
1235 rtw_write32(rtwdev, REG_H2C_HEAD, value32);
1236
1237 value32 = rtw_read32(rtwdev, REG_H2C_READ_ADDR);
1238 value32 = (value32 & 0xFFFC0000) | h2cq_addr;
1239 rtw_write32(rtwdev, REG_H2C_READ_ADDR, value32);
1240
1241 value32 = rtw_read32(rtwdev, REG_H2C_TAIL);
1242 value32 &= 0xFFFC0000;
1243 value32 |= (h2cq_addr + h2cq_size);
1244 rtw_write32(rtwdev, REG_H2C_TAIL, value32);
1245
1246 value8 = rtw_read8(rtwdev, REG_H2C_INFO);
1247 value8 = (u8)((value8 & 0xFC) | 0x01);
1248 rtw_write8(rtwdev, REG_H2C_INFO, value8);
1249
1250 value8 = rtw_read8(rtwdev, REG_H2C_INFO);
1251 value8 = (u8)((value8 & 0xFB) | 0x04);
1252 rtw_write8(rtwdev, REG_H2C_INFO, value8);
1253
1254 value8 = rtw_read8(rtwdev, REG_TXDMA_OFFSET_CHK + 1);
1255 value8 = (u8)((value8 & 0x7f) | 0x80);
1256 rtw_write8(rtwdev, REG_TXDMA_OFFSET_CHK + 1, value8);
1257
1258 wp = rtw_read32(rtwdev, REG_H2C_PKT_WRITEADDR) & 0x3FFFF;
1259 rp = rtw_read32(rtwdev, REG_H2C_PKT_READADDR) & 0x3FFFF;
1260 h2cq_free = wp >= rp ? h2cq_size - (wp - rp) : rp - wp;
1261
1262 if (h2cq_size != h2cq_free) {
1263 rtw_err(rtwdev, "H2C queue mismatch\n");
1264 return -EINVAL;
1265 }
1266
1267 return 0;
1268 }
1269
rtw_init_trx_cfg(struct rtw_dev * rtwdev)1270 static int rtw_init_trx_cfg(struct rtw_dev *rtwdev)
1271 {
1272 int ret;
1273
1274 ret = txdma_queue_mapping(rtwdev);
1275 if (ret)
1276 return ret;
1277
1278 ret = priority_queue_cfg(rtwdev);
1279 if (ret)
1280 return ret;
1281
1282 ret = init_h2c(rtwdev);
1283 if (ret)
1284 return ret;
1285
1286 return 0;
1287 }
1288
rtw_drv_info_cfg(struct rtw_dev * rtwdev)1289 static int rtw_drv_info_cfg(struct rtw_dev *rtwdev)
1290 {
1291 u8 value8;
1292
1293 rtw_write8(rtwdev, REG_RX_DRVINFO_SZ, PHY_STATUS_SIZE);
1294 if (rtw_chip_wcpu_11ac(rtwdev)) {
1295 value8 = rtw_read8(rtwdev, REG_TRXFF_BNDY + 1);
1296 value8 &= 0xF0;
1297 /* For rxdesc len = 0 issue */
1298 value8 |= 0xF;
1299 rtw_write8(rtwdev, REG_TRXFF_BNDY + 1, value8);
1300 }
1301 rtw_write32_set(rtwdev, REG_RCR, BIT_APP_PHYSTS);
1302 rtw_write32_clr(rtwdev, REG_WMAC_OPTION_FUNCTION + 4, BIT(8) | BIT(9));
1303
1304 return 0;
1305 }
1306
rtw_mac_init(struct rtw_dev * rtwdev)1307 int rtw_mac_init(struct rtw_dev *rtwdev)
1308 {
1309 const struct rtw_chip_info *chip = rtwdev->chip;
1310 int ret;
1311
1312 ret = rtw_init_trx_cfg(rtwdev);
1313 if (ret)
1314 return ret;
1315
1316 ret = chip->ops->mac_init(rtwdev);
1317 if (ret)
1318 return ret;
1319
1320 ret = rtw_drv_info_cfg(rtwdev);
1321 if (ret)
1322 return ret;
1323
1324 rtw_hci_interface_cfg(rtwdev);
1325
1326 return 0;
1327 }
1328