1 // SPDX-License-Identifier: ISC
2 /* Copyright (C) 2020 MediaTek Inc.
3 *
4 * This file is written based on mt76/usb.c.
5 *
6 * Author: Felix Fietkau <nbd@nbd.name>
7 * Lorenzo Bianconi <lorenzo@kernel.org>
8 * Sean Wang <sean.wang@mediatek.com>
9 */
10
11 #include <linux/iopoll.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/mmc/sdio_func.h>
15 #include <linux/sched.h>
16 #include <linux/kthread.h>
17
18 #include "mt76.h"
19 #include "sdio.h"
20
mt76s_read_whisr(struct mt76_dev * dev)21 static u32 mt76s_read_whisr(struct mt76_dev *dev)
22 {
23 return sdio_readl(dev->sdio.func, MCR_WHISR, NULL);
24 }
25
mt76s_read_pcr(struct mt76_dev * dev)26 u32 mt76s_read_pcr(struct mt76_dev *dev)
27 {
28 struct mt76_sdio *sdio = &dev->sdio;
29
30 return sdio_readl(sdio->func, MCR_WHLPCR, NULL);
31 }
32 EXPORT_SYMBOL_GPL(mt76s_read_pcr);
33
mt76s_read_mailbox(struct mt76_dev * dev,u32 offset)34 static u32 mt76s_read_mailbox(struct mt76_dev *dev, u32 offset)
35 {
36 struct sdio_func *func = dev->sdio.func;
37 u32 val = ~0, status;
38 int err;
39
40 sdio_claim_host(func);
41
42 sdio_writel(func, offset, MCR_H2DSM0R, &err);
43 if (err < 0) {
44 dev_err(dev->dev, "failed setting address [err=%d]\n", err);
45 goto out;
46 }
47
48 sdio_writel(func, H2D_SW_INT_READ, MCR_WSICR, &err);
49 if (err < 0) {
50 dev_err(dev->dev, "failed setting read mode [err=%d]\n", err);
51 goto out;
52 }
53
54 err = readx_poll_timeout(mt76s_read_whisr, dev, status,
55 status & H2D_SW_INT_READ, 0, 1000000);
56 if (err < 0) {
57 dev_err(dev->dev, "query whisr timeout\n");
58 goto out;
59 }
60
61 sdio_writel(func, H2D_SW_INT_READ, MCR_WHISR, &err);
62 if (err < 0) {
63 dev_err(dev->dev, "failed setting read mode [err=%d]\n", err);
64 goto out;
65 }
66
67 val = sdio_readl(func, MCR_H2DSM0R, &err);
68 if (err < 0) {
69 dev_err(dev->dev, "failed reading h2dsm0r [err=%d]\n", err);
70 goto out;
71 }
72
73 if (val != offset) {
74 dev_err(dev->dev, "register mismatch\n");
75 val = ~0;
76 goto out;
77 }
78
79 val = sdio_readl(func, MCR_D2HRM1R, &err);
80 if (err < 0)
81 dev_err(dev->dev, "failed reading d2hrm1r [err=%d]\n", err);
82
83 out:
84 sdio_release_host(func);
85
86 return val;
87 }
88
mt76s_write_mailbox(struct mt76_dev * dev,u32 offset,u32 val)89 static void mt76s_write_mailbox(struct mt76_dev *dev, u32 offset, u32 val)
90 {
91 struct sdio_func *func = dev->sdio.func;
92 u32 status;
93 int err;
94
95 sdio_claim_host(func);
96
97 sdio_writel(func, offset, MCR_H2DSM0R, &err);
98 if (err < 0) {
99 dev_err(dev->dev, "failed setting address [err=%d]\n", err);
100 goto out;
101 }
102
103 sdio_writel(func, val, MCR_H2DSM1R, &err);
104 if (err < 0) {
105 dev_err(dev->dev,
106 "failed setting write value [err=%d]\n", err);
107 goto out;
108 }
109
110 sdio_writel(func, H2D_SW_INT_WRITE, MCR_WSICR, &err);
111 if (err < 0) {
112 dev_err(dev->dev, "failed setting write mode [err=%d]\n", err);
113 goto out;
114 }
115
116 err = readx_poll_timeout(mt76s_read_whisr, dev, status,
117 status & H2D_SW_INT_WRITE, 0, 1000000);
118 if (err < 0) {
119 dev_err(dev->dev, "query whisr timeout\n");
120 goto out;
121 }
122
123 sdio_writel(func, H2D_SW_INT_WRITE, MCR_WHISR, &err);
124 if (err < 0) {
125 dev_err(dev->dev, "failed setting write mode [err=%d]\n", err);
126 goto out;
127 }
128
129 val = sdio_readl(func, MCR_H2DSM0R, &err);
130 if (err < 0) {
131 dev_err(dev->dev, "failed reading h2dsm0r [err=%d]\n", err);
132 goto out;
133 }
134
135 if (val != offset)
136 dev_err(dev->dev, "register mismatch\n");
137
138 out:
139 sdio_release_host(func);
140 }
141
mt76s_rr(struct mt76_dev * dev,u32 offset)142 u32 mt76s_rr(struct mt76_dev *dev, u32 offset)
143 {
144 if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state))
145 return dev->mcu_ops->mcu_rr(dev, offset);
146 else
147 return mt76s_read_mailbox(dev, offset);
148 }
149 EXPORT_SYMBOL_GPL(mt76s_rr);
150
mt76s_wr(struct mt76_dev * dev,u32 offset,u32 val)151 void mt76s_wr(struct mt76_dev *dev, u32 offset, u32 val)
152 {
153 if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state))
154 dev->mcu_ops->mcu_wr(dev, offset, val);
155 else
156 mt76s_write_mailbox(dev, offset, val);
157 }
158 EXPORT_SYMBOL_GPL(mt76s_wr);
159
mt76s_rmw(struct mt76_dev * dev,u32 offset,u32 mask,u32 val)160 u32 mt76s_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val)
161 {
162 val |= mt76s_rr(dev, offset) & ~mask;
163 mt76s_wr(dev, offset, val);
164
165 return val;
166 }
167 EXPORT_SYMBOL_GPL(mt76s_rmw);
168
mt76s_write_copy(struct mt76_dev * dev,u32 offset,const void * data,int len)169 void mt76s_write_copy(struct mt76_dev *dev, u32 offset,
170 const void *data, int len)
171 {
172 const u32 *val = data;
173 int i;
174
175 for (i = 0; i < len / sizeof(u32); i++) {
176 mt76s_wr(dev, offset, val[i]);
177 offset += sizeof(u32);
178 }
179 }
180 EXPORT_SYMBOL_GPL(mt76s_write_copy);
181
mt76s_read_copy(struct mt76_dev * dev,u32 offset,void * data,int len)182 void mt76s_read_copy(struct mt76_dev *dev, u32 offset,
183 void *data, int len)
184 {
185 u32 *val = data;
186 int i;
187
188 for (i = 0; i < len / sizeof(u32); i++) {
189 val[i] = mt76s_rr(dev, offset);
190 offset += sizeof(u32);
191 }
192 }
193 EXPORT_SYMBOL_GPL(mt76s_read_copy);
194
mt76s_wr_rp(struct mt76_dev * dev,u32 base,const struct mt76_reg_pair * data,int len)195 int mt76s_wr_rp(struct mt76_dev *dev, u32 base,
196 const struct mt76_reg_pair *data,
197 int len)
198 {
199 int i;
200
201 for (i = 0; i < len; i++) {
202 mt76s_wr(dev, data->reg, data->value);
203 data++;
204 }
205
206 return 0;
207 }
208 EXPORT_SYMBOL_GPL(mt76s_wr_rp);
209
mt76s_rd_rp(struct mt76_dev * dev,u32 base,struct mt76_reg_pair * data,int len)210 int mt76s_rd_rp(struct mt76_dev *dev, u32 base,
211 struct mt76_reg_pair *data, int len)
212 {
213 int i;
214
215 for (i = 0; i < len; i++) {
216 data->value = mt76s_rr(dev, data->reg);
217 data++;
218 }
219
220 return 0;
221 }
222 EXPORT_SYMBOL_GPL(mt76s_rd_rp);
223
mt76s_hw_init(struct mt76_dev * dev,struct sdio_func * func,int hw_ver)224 int mt76s_hw_init(struct mt76_dev *dev, struct sdio_func *func, int hw_ver)
225 {
226 u32 status, ctrl;
227 int ret;
228
229 dev->sdio.hw_ver = hw_ver;
230
231 sdio_claim_host(func);
232
233 ret = sdio_enable_func(func);
234 if (ret < 0)
235 goto release;
236
237 /* Get ownership from the device */
238 sdio_writel(func, WHLPCR_INT_EN_CLR | WHLPCR_FW_OWN_REQ_CLR,
239 MCR_WHLPCR, &ret);
240 if (ret < 0)
241 goto disable_func;
242
243 ret = readx_poll_timeout(mt76s_read_pcr, dev, status,
244 status & WHLPCR_IS_DRIVER_OWN, 2000, 1000000);
245 if (ret < 0) {
246 dev_err(dev->dev, "Cannot get ownership from device");
247 goto disable_func;
248 }
249
250 ret = sdio_set_block_size(func, 512);
251 if (ret < 0)
252 goto disable_func;
253
254 /* Enable interrupt */
255 sdio_writel(func, WHLPCR_INT_EN_SET, MCR_WHLPCR, &ret);
256 if (ret < 0)
257 goto disable_func;
258
259 ctrl = WHIER_RX0_DONE_INT_EN | WHIER_TX_DONE_INT_EN;
260 if (hw_ver == MT76_CONNAC2_SDIO)
261 ctrl |= WHIER_RX1_DONE_INT_EN;
262 sdio_writel(func, ctrl, MCR_WHIER, &ret);
263 if (ret < 0)
264 goto disable_func;
265
266 switch (hw_ver) {
267 case MT76_CONNAC_SDIO:
268 /* set WHISR as read clear and Rx aggregation number as 16 */
269 ctrl = FIELD_PREP(MAX_HIF_RX_LEN_NUM, 16);
270 break;
271 default:
272 ctrl = sdio_readl(func, MCR_WHCR, &ret);
273 if (ret < 0)
274 goto disable_func;
275 ctrl &= ~MAX_HIF_RX_LEN_NUM_CONNAC2;
276 ctrl &= ~W_INT_CLR_CTRL; /* read clear */
277 ctrl |= FIELD_PREP(MAX_HIF_RX_LEN_NUM_CONNAC2, 0);
278 break;
279 }
280
281 sdio_writel(func, ctrl, MCR_WHCR, &ret);
282 if (ret < 0)
283 goto disable_func;
284
285 ret = sdio_claim_irq(func, mt76s_sdio_irq);
286 if (ret < 0)
287 goto disable_func;
288
289 sdio_release_host(func);
290
291 return 0;
292
293 disable_func:
294 sdio_disable_func(func);
295 release:
296 sdio_release_host(func);
297
298 return ret;
299 }
300 EXPORT_SYMBOL_GPL(mt76s_hw_init);
301
mt76s_alloc_rx_queue(struct mt76_dev * dev,enum mt76_rxq_id qid)302 int mt76s_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid)
303 {
304 struct mt76_queue *q = &dev->q_rx[qid];
305
306 spin_lock_init(&q->lock);
307 q->entry = devm_kcalloc(dev->dev,
308 MT_NUM_RX_ENTRIES, sizeof(*q->entry),
309 GFP_KERNEL);
310 if (!q->entry)
311 return -ENOMEM;
312
313 q->ndesc = MT_NUM_RX_ENTRIES;
314 q->head = q->tail = 0;
315 q->queued = 0;
316
317 return 0;
318 }
319 EXPORT_SYMBOL_GPL(mt76s_alloc_rx_queue);
320
mt76s_alloc_tx_queue(struct mt76_dev * dev)321 static struct mt76_queue *mt76s_alloc_tx_queue(struct mt76_dev *dev)
322 {
323 struct mt76_queue *q;
324
325 q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL);
326 if (!q)
327 return ERR_PTR(-ENOMEM);
328
329 spin_lock_init(&q->lock);
330 q->entry = devm_kcalloc(dev->dev,
331 MT_NUM_TX_ENTRIES, sizeof(*q->entry),
332 GFP_KERNEL);
333 if (!q->entry)
334 return ERR_PTR(-ENOMEM);
335
336 q->ndesc = MT_NUM_TX_ENTRIES;
337
338 return q;
339 }
340
mt76s_alloc_tx(struct mt76_dev * dev)341 int mt76s_alloc_tx(struct mt76_dev *dev)
342 {
343 struct mt76_queue *q;
344 int i;
345
346 for (i = 0; i <= MT_TXQ_PSD; i++) {
347 q = mt76s_alloc_tx_queue(dev);
348 if (IS_ERR(q))
349 return PTR_ERR(q);
350
351 q->qid = i;
352 dev->phy.q_tx[i] = q;
353 }
354
355 q = mt76s_alloc_tx_queue(dev);
356 if (IS_ERR(q))
357 return PTR_ERR(q);
358
359 q->qid = MT_MCUQ_WM;
360 dev->q_mcu[MT_MCUQ_WM] = q;
361
362 return 0;
363 }
364 EXPORT_SYMBOL_GPL(mt76s_alloc_tx);
365
366 static struct mt76_queue_entry *
mt76s_get_next_rx_entry(struct mt76_queue * q)367 mt76s_get_next_rx_entry(struct mt76_queue *q)
368 {
369 struct mt76_queue_entry *e = NULL;
370
371 spin_lock_bh(&q->lock);
372 if (q->queued > 0) {
373 e = &q->entry[q->tail];
374 q->tail = (q->tail + 1) % q->ndesc;
375 q->queued--;
376 }
377 spin_unlock_bh(&q->lock);
378
379 return e;
380 }
381
382 static int
mt76s_process_rx_queue(struct mt76_dev * dev,struct mt76_queue * q)383 mt76s_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
384 {
385 int qid = q - &dev->q_rx[MT_RXQ_MAIN];
386 int nframes = 0;
387
388 while (true) {
389 struct mt76_queue_entry *e;
390
391 if (!test_bit(MT76_STATE_INITIALIZED, &dev->phy.state))
392 break;
393
394 e = mt76s_get_next_rx_entry(q);
395 if (!e || !e->skb)
396 break;
397
398 dev->drv->rx_skb(dev, MT_RXQ_MAIN, e->skb);
399 e->skb = NULL;
400 nframes++;
401 }
402 if (qid == MT_RXQ_MAIN)
403 mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
404
405 return nframes;
406 }
407
mt76s_net_worker(struct mt76_worker * w)408 static void mt76s_net_worker(struct mt76_worker *w)
409 {
410 struct mt76_sdio *sdio = container_of(w, struct mt76_sdio,
411 net_worker);
412 struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio);
413 int i, nframes;
414
415 do {
416 nframes = 0;
417
418 local_bh_disable();
419 rcu_read_lock();
420
421 mt76_for_each_q_rx(dev, i)
422 nframes += mt76s_process_rx_queue(dev, &dev->q_rx[i]);
423
424 rcu_read_unlock();
425 local_bh_enable();
426 } while (nframes > 0);
427 }
428
mt76s_process_tx_queue(struct mt76_dev * dev,struct mt76_queue * q)429 static int mt76s_process_tx_queue(struct mt76_dev *dev, struct mt76_queue *q)
430 {
431 struct mt76_queue_entry entry;
432 int nframes = 0;
433 bool mcu;
434
435 if (!q)
436 return 0;
437
438 mcu = q == dev->q_mcu[MT_MCUQ_WM];
439 while (q->queued > 0) {
440 if (!q->entry[q->tail].done)
441 break;
442
443 entry = q->entry[q->tail];
444 q->entry[q->tail].done = false;
445
446 if (mcu) {
447 dev_kfree_skb(entry.skb);
448 entry.skb = NULL;
449 }
450
451 mt76_queue_tx_complete(dev, q, &entry);
452 nframes++;
453 }
454
455 if (!q->queued)
456 wake_up(&dev->tx_wait);
457
458 return nframes;
459 }
460
mt76s_status_worker(struct mt76_worker * w)461 static void mt76s_status_worker(struct mt76_worker *w)
462 {
463 struct mt76_sdio *sdio = container_of(w, struct mt76_sdio,
464 status_worker);
465 struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio);
466 bool resched = false;
467 int i, nframes;
468
469 do {
470 int ndata_frames = 0;
471
472 nframes = mt76s_process_tx_queue(dev, dev->q_mcu[MT_MCUQ_WM]);
473
474 for (i = 0; i <= MT_TXQ_PSD; i++)
475 ndata_frames += mt76s_process_tx_queue(dev,
476 dev->phy.q_tx[i]);
477 nframes += ndata_frames;
478 if (ndata_frames > 0)
479 resched = true;
480
481 if (dev->drv->tx_status_data &&
482 !test_and_set_bit(MT76_READING_STATS, &dev->phy.state))
483 queue_work(dev->wq, &dev->sdio.stat_work);
484 } while (nframes > 0);
485
486 if (resched)
487 mt76_worker_schedule(&dev->sdio.txrx_worker);
488 }
489
mt76s_tx_status_data(struct work_struct * work)490 static void mt76s_tx_status_data(struct work_struct *work)
491 {
492 struct mt76_sdio *sdio;
493 struct mt76_dev *dev;
494 u8 update = 1;
495 u16 count = 0;
496
497 sdio = container_of(work, struct mt76_sdio, stat_work);
498 dev = container_of(sdio, struct mt76_dev, sdio);
499
500 while (true) {
501 if (test_bit(MT76_REMOVED, &dev->phy.state))
502 break;
503
504 if (!dev->drv->tx_status_data(dev, &update))
505 break;
506 count++;
507 }
508
509 if (count && test_bit(MT76_STATE_RUNNING, &dev->phy.state))
510 queue_work(dev->wq, &sdio->stat_work);
511 else
512 clear_bit(MT76_READING_STATS, &dev->phy.state);
513 }
514
515 static int
mt76s_tx_queue_skb(struct mt76_dev * dev,struct mt76_queue * q,struct sk_buff * skb,struct mt76_wcid * wcid,struct ieee80211_sta * sta)516 mt76s_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
517 struct sk_buff *skb, struct mt76_wcid *wcid,
518 struct ieee80211_sta *sta)
519 {
520 struct mt76_tx_info tx_info = {
521 .skb = skb,
522 };
523 int err, len = skb->len;
524 u16 idx = q->head;
525
526 if (q->queued == q->ndesc)
527 return -ENOSPC;
528
529 skb->prev = skb->next = NULL;
530 err = dev->drv->tx_prepare_skb(dev, NULL, q->qid, wcid, sta, &tx_info);
531 if (err < 0)
532 return err;
533
534 q->entry[q->head].skb = tx_info.skb;
535 q->entry[q->head].buf_sz = len;
536 q->entry[q->head].wcid = 0xffff;
537
538 smp_wmb();
539
540 q->head = (q->head + 1) % q->ndesc;
541 q->queued++;
542
543 return idx;
544 }
545
546 static int
mt76s_tx_queue_skb_raw(struct mt76_dev * dev,struct mt76_queue * q,struct sk_buff * skb,u32 tx_info)547 mt76s_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
548 struct sk_buff *skb, u32 tx_info)
549 {
550 int ret = -ENOSPC, len = skb->len, pad;
551
552 if (q->queued == q->ndesc)
553 goto error;
554
555 pad = round_up(skb->len, 4) - skb->len;
556 ret = mt76_skb_adjust_pad(skb, pad);
557 if (ret)
558 goto error;
559
560 spin_lock_bh(&q->lock);
561
562 q->entry[q->head].buf_sz = len;
563 q->entry[q->head].skb = skb;
564 q->head = (q->head + 1) % q->ndesc;
565 q->queued++;
566
567 spin_unlock_bh(&q->lock);
568
569 return 0;
570
571 error:
572 dev_kfree_skb(skb);
573
574 return ret;
575 }
576
mt76s_tx_kick(struct mt76_dev * dev,struct mt76_queue * q)577 static void mt76s_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
578 {
579 struct mt76_sdio *sdio = &dev->sdio;
580
581 mt76_worker_schedule(&sdio->txrx_worker);
582 }
583
584 static const struct mt76_queue_ops sdio_queue_ops = {
585 .tx_queue_skb = mt76s_tx_queue_skb,
586 .kick = mt76s_tx_kick,
587 .tx_queue_skb_raw = mt76s_tx_queue_skb_raw,
588 };
589
mt76s_deinit(struct mt76_dev * dev)590 void mt76s_deinit(struct mt76_dev *dev)
591 {
592 struct mt76_sdio *sdio = &dev->sdio;
593 int i;
594
595 mt76_worker_teardown(&sdio->txrx_worker);
596 mt76_worker_teardown(&sdio->status_worker);
597 mt76_worker_teardown(&sdio->net_worker);
598
599 cancel_work_sync(&sdio->stat_work);
600 clear_bit(MT76_READING_STATS, &dev->phy.state);
601
602 mt76_tx_status_check(dev, true);
603
604 sdio_claim_host(sdio->func);
605 sdio_release_irq(sdio->func);
606 sdio_release_host(sdio->func);
607
608 mt76_for_each_q_rx(dev, i) {
609 struct mt76_queue *q = &dev->q_rx[i];
610 int j;
611
612 for (j = 0; j < q->ndesc; j++) {
613 struct mt76_queue_entry *e = &q->entry[j];
614
615 if (!e->skb)
616 continue;
617
618 dev_kfree_skb(e->skb);
619 e->skb = NULL;
620 }
621 }
622 }
623 EXPORT_SYMBOL_GPL(mt76s_deinit);
624
mt76s_init(struct mt76_dev * dev,struct sdio_func * func,const struct mt76_bus_ops * bus_ops)625 int mt76s_init(struct mt76_dev *dev, struct sdio_func *func,
626 const struct mt76_bus_ops *bus_ops)
627 {
628 struct mt76_sdio *sdio = &dev->sdio;
629 int err;
630
631 err = mt76_worker_setup(dev->hw, &sdio->status_worker,
632 mt76s_status_worker, "sdio-status");
633 if (err)
634 return err;
635
636 err = mt76_worker_setup(dev->hw, &sdio->net_worker, mt76s_net_worker,
637 "sdio-net");
638 if (err)
639 return err;
640
641 sched_set_fifo_low(sdio->status_worker.task);
642 sched_set_fifo_low(sdio->net_worker.task);
643
644 INIT_WORK(&sdio->stat_work, mt76s_tx_status_data);
645
646 dev->queue_ops = &sdio_queue_ops;
647 dev->bus = bus_ops;
648 dev->sdio.func = func;
649
650 return 0;
651 }
652 EXPORT_SYMBOL_GPL(mt76s_init);
653
654 MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
655 MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
656 MODULE_LICENSE("Dual BSD/GPL");
657