1 // SPDX-License-Identifier: ISC
2 /* Copyright (C) 2020 MediaTek Inc. */
3
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/platform_device.h>
7 #include <linux/pci.h>
8
9 #include "mt7615.h"
10 #include "regs.h"
11 #include "mac.h"
12 #include "../trace.h"
13
14 const u32 mt7615e_reg_map[] = {
15 [MT_TOP_CFG_BASE] = 0x01000,
16 [MT_HW_BASE] = 0x01000,
17 [MT_PCIE_REMAP_2] = 0x02504,
18 [MT_ARB_BASE] = 0x20c00,
19 [MT_HIF_BASE] = 0x04000,
20 [MT_CSR_BASE] = 0x07000,
21 [MT_PLE_BASE] = 0x08000,
22 [MT_PSE_BASE] = 0x0c000,
23 [MT_CFG_BASE] = 0x20200,
24 [MT_AGG_BASE] = 0x20a00,
25 [MT_TMAC_BASE] = 0x21000,
26 [MT_RMAC_BASE] = 0x21200,
27 [MT_DMA_BASE] = 0x21800,
28 [MT_PF_BASE] = 0x22000,
29 [MT_WTBL_BASE_ON] = 0x23000,
30 [MT_WTBL_BASE_OFF] = 0x23400,
31 [MT_LPON_BASE] = 0x24200,
32 [MT_MIB_BASE] = 0x24800,
33 [MT_WTBL_BASE_ADDR] = 0x30000,
34 [MT_PCIE_REMAP_BASE2] = 0x80000,
35 [MT_TOP_MISC_BASE] = 0xc0000,
36 [MT_EFUSE_ADDR_BASE] = 0x81070000,
37 };
38
39 const u32 mt7663e_reg_map[] = {
40 [MT_TOP_CFG_BASE] = 0x01000,
41 [MT_HW_BASE] = 0x02000,
42 [MT_DMA_SHDL_BASE] = 0x06000,
43 [MT_PCIE_REMAP_2] = 0x0700c,
44 [MT_ARB_BASE] = 0x20c00,
45 [MT_HIF_BASE] = 0x04000,
46 [MT_CSR_BASE] = 0x07000,
47 [MT_PLE_BASE] = 0x08000,
48 [MT_PSE_BASE] = 0x0c000,
49 [MT_PP_BASE] = 0x0e000,
50 [MT_CFG_BASE] = 0x20000,
51 [MT_AGG_BASE] = 0x22000,
52 [MT_TMAC_BASE] = 0x24000,
53 [MT_RMAC_BASE] = 0x25000,
54 [MT_DMA_BASE] = 0x27000,
55 [MT_PF_BASE] = 0x28000,
56 [MT_WTBL_BASE_ON] = 0x29000,
57 [MT_WTBL_BASE_OFF] = 0x29800,
58 [MT_LPON_BASE] = 0x2b000,
59 [MT_MIB_BASE] = 0x2d000,
60 [MT_WTBL_BASE_ADDR] = 0x30000,
61 [MT_PCIE_REMAP_BASE2] = 0x90000,
62 [MT_TOP_MISC_BASE] = 0xc0000,
63 [MT_EFUSE_ADDR_BASE] = 0x78011000,
64 };
65
66 static void
mt7615_rx_poll_complete(struct mt76_dev * mdev,enum mt76_rxq_id q)67 mt7615_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
68 {
69 struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
70
71 mt7615_irq_enable(dev, MT_INT_RX_DONE(q));
72 }
73
mt7615_irq_handler(int irq,void * dev_instance)74 static irqreturn_t mt7615_irq_handler(int irq, void *dev_instance)
75 {
76 struct mt7615_dev *dev = dev_instance;
77
78 mt76_wr(dev, MT_INT_MASK_CSR, 0);
79
80 if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
81 return IRQ_NONE;
82
83 tasklet_schedule(&dev->irq_tasklet);
84
85 return IRQ_HANDLED;
86 }
87
mt7615_irq_tasklet(struct tasklet_struct * t)88 static void mt7615_irq_tasklet(struct tasklet_struct *t)
89 {
90 struct mt7615_dev *dev = from_tasklet(dev, t, irq_tasklet);
91 u32 intr, mask = 0, tx_mcu_mask = mt7615_tx_mcu_int_mask(dev);
92 u32 mcu_int;
93
94 mt76_wr(dev, MT_INT_MASK_CSR, 0);
95
96 intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
97 intr &= dev->mt76.mmio.irqmask;
98 mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
99
100 trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask);
101
102 mask |= intr & MT_INT_RX_DONE_ALL;
103 if (intr & tx_mcu_mask)
104 mask |= tx_mcu_mask;
105 mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, mask, 0);
106
107 if (intr & tx_mcu_mask)
108 napi_schedule(&dev->mt76.tx_napi);
109
110 if (intr & MT_INT_RX_DONE(0))
111 napi_schedule(&dev->mt76.napi[0]);
112
113 if (intr & MT_INT_RX_DONE(1))
114 napi_schedule(&dev->mt76.napi[1]);
115
116 if (!(intr & (MT_INT_MCU_CMD | MT7663_INT_MCU_CMD)))
117 return;
118
119 if (is_mt7663(&dev->mt76)) {
120 mcu_int = mt76_rr(dev, MT_MCU2HOST_INT_STATUS);
121 mcu_int &= MT7663_MCU_CMD_ERROR_MASK;
122 mt76_wr(dev, MT_MCU2HOST_INT_STATUS, mcu_int);
123 } else {
124 mcu_int = mt76_rr(dev, MT_MCU_CMD);
125 mcu_int &= MT_MCU_CMD_ERROR_MASK;
126 }
127
128 if (!mcu_int)
129 return;
130
131 dev->reset_state = mcu_int;
132 queue_work(dev->mt76.wq, &dev->reset_work);
133 wake_up(&dev->reset_wait);
134 }
135
__mt7615_reg_addr(struct mt7615_dev * dev,u32 addr)136 static u32 __mt7615_reg_addr(struct mt7615_dev *dev, u32 addr)
137 {
138 if (addr < 0x100000)
139 return addr;
140
141 return mt7615_reg_map(dev, addr);
142 }
143
mt7615_rr(struct mt76_dev * mdev,u32 offset)144 static u32 mt7615_rr(struct mt76_dev *mdev, u32 offset)
145 {
146 struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
147 u32 addr = __mt7615_reg_addr(dev, offset);
148
149 return dev->bus_ops->rr(mdev, addr);
150 }
151
mt7615_wr(struct mt76_dev * mdev,u32 offset,u32 val)152 static void mt7615_wr(struct mt76_dev *mdev, u32 offset, u32 val)
153 {
154 struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
155 u32 addr = __mt7615_reg_addr(dev, offset);
156
157 dev->bus_ops->wr(mdev, addr, val);
158 }
159
mt7615_rmw(struct mt76_dev * mdev,u32 offset,u32 mask,u32 val)160 static u32 mt7615_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
161 {
162 struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
163 u32 addr = __mt7615_reg_addr(dev, offset);
164
165 return dev->bus_ops->rmw(mdev, addr, mask, val);
166 }
167
mt7615_mmio_probe(struct device * pdev,void __iomem * mem_base,int irq,const u32 * map)168 int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base,
169 int irq, const u32 *map)
170 {
171 static const struct mt76_driver_ops drv_ops = {
172 /* txwi_size = txd size + txp size */
173 .txwi_size = MT_TXD_SIZE + sizeof(struct mt76_connac_txp_common),
174 .drv_flags = MT_DRV_TXWI_NO_FREE | MT_DRV_HW_MGMT_TXQ,
175 .survey_flags = SURVEY_INFO_TIME_TX |
176 SURVEY_INFO_TIME_RX |
177 SURVEY_INFO_TIME_BSS_RX,
178 .token_size = MT7615_TOKEN_SIZE,
179 .tx_prepare_skb = mt7615_tx_prepare_skb,
180 .tx_complete_skb = mt76_connac_tx_complete_skb,
181 .rx_check = mt7615_rx_check,
182 .rx_skb = mt7615_queue_rx_skb,
183 .rx_poll_complete = mt7615_rx_poll_complete,
184 .sta_ps = mt7615_sta_ps,
185 .sta_add = mt7615_mac_sta_add,
186 .sta_remove = mt7615_mac_sta_remove,
187 .update_survey = mt7615_update_channel,
188 };
189 struct mt76_bus_ops *bus_ops;
190 struct ieee80211_ops *ops;
191 struct mt7615_dev *dev;
192 struct mt76_dev *mdev;
193 int ret;
194
195 ops = devm_kmemdup(pdev, &mt7615_ops, sizeof(mt7615_ops), GFP_KERNEL);
196 if (!ops)
197 return -ENOMEM;
198
199 mdev = mt76_alloc_device(pdev, sizeof(*dev), ops, &drv_ops);
200 if (!mdev)
201 return -ENOMEM;
202
203 dev = container_of(mdev, struct mt7615_dev, mt76);
204 mt76_mmio_init(&dev->mt76, mem_base);
205 tasklet_setup(&dev->irq_tasklet, mt7615_irq_tasklet);
206
207 dev->reg_map = map;
208 dev->ops = ops;
209 mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) |
210 (mt76_rr(dev, MT_HW_REV) & 0xff);
211 dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
212
213 dev->bus_ops = dev->mt76.bus;
214 bus_ops = devm_kmemdup(dev->mt76.dev, dev->bus_ops, sizeof(*bus_ops),
215 GFP_KERNEL);
216 if (!bus_ops) {
217 ret = -ENOMEM;
218 goto err_free_dev;
219 }
220
221 bus_ops->rr = mt7615_rr;
222 bus_ops->wr = mt7615_wr;
223 bus_ops->rmw = mt7615_rmw;
224 dev->mt76.bus = bus_ops;
225
226 mt76_wr(dev, MT_INT_MASK_CSR, 0);
227
228 ret = devm_request_irq(mdev->dev, irq, mt7615_irq_handler,
229 IRQF_SHARED, KBUILD_MODNAME, dev);
230 if (ret)
231 goto err_free_dev;
232
233 if (is_mt7663(mdev))
234 mt76_wr(dev, MT_PCIE_IRQ_ENABLE, 1);
235
236 ret = mt7615_register_device(dev);
237 if (ret)
238 goto err_free_irq;
239
240 return 0;
241
242 err_free_irq:
243 devm_free_irq(pdev, irq, dev);
244 err_free_dev:
245 mt76_free_device(&dev->mt76);
246
247 return ret;
248 }
249
mt7615_init(void)250 static int __init mt7615_init(void)
251 {
252 int ret;
253
254 ret = pci_register_driver(&mt7615_pci_driver);
255 if (ret)
256 return ret;
257
258 if (IS_ENABLED(CONFIG_MT7622_WMAC)) {
259 ret = platform_driver_register(&mt7622_wmac_driver);
260 if (ret)
261 pci_unregister_driver(&mt7615_pci_driver);
262 }
263
264 return ret;
265 }
266
mt7615_exit(void)267 static void __exit mt7615_exit(void)
268 {
269 if (IS_ENABLED(CONFIG_MT7622_WMAC))
270 platform_driver_unregister(&mt7622_wmac_driver);
271 pci_unregister_driver(&mt7615_pci_driver);
272 }
273
274 module_init(mt7615_init);
275 module_exit(mt7615_exit);
276 MODULE_LICENSE("Dual BSD/GPL");
277