1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
4 <http://rt2x00.serialmonkey.com>
5
6 */
7
8 /*
9 Module: rt2x00mmio
10 Abstract: rt2x00 generic mmio device routines.
11 */
12
13 #include <linux/dma-mapping.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17
18 #include "rt2x00.h"
19 #include "rt2x00mmio.h"
20
21 /*
22 * Register access.
23 */
rt2x00mmio_regbusy_read(struct rt2x00_dev * rt2x00dev,const unsigned int offset,const struct rt2x00_field32 field,u32 * reg)24 int rt2x00mmio_regbusy_read(struct rt2x00_dev *rt2x00dev,
25 const unsigned int offset,
26 const struct rt2x00_field32 field,
27 u32 *reg)
28 {
29 unsigned int i;
30
31 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
32 return 0;
33
34 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
35 *reg = rt2x00mmio_register_read(rt2x00dev, offset);
36 if (!rt2x00_get_field32(*reg, field))
37 return 1;
38 udelay(REGISTER_BUSY_DELAY);
39 }
40
41 printk_once(KERN_ERR "%s() Indirect register access failed: "
42 "offset=0x%.08x, value=0x%.08x\n", __func__, offset, *reg);
43 *reg = ~0;
44
45 return 0;
46 }
47 EXPORT_SYMBOL_GPL(rt2x00mmio_regbusy_read);
48
rt2x00mmio_rxdone(struct rt2x00_dev * rt2x00dev)49 bool rt2x00mmio_rxdone(struct rt2x00_dev *rt2x00dev)
50 {
51 struct data_queue *queue = rt2x00dev->rx;
52 struct queue_entry *entry;
53 struct queue_entry_priv_mmio *entry_priv;
54 struct skb_frame_desc *skbdesc;
55 int max_rx = 16;
56
57 while (--max_rx) {
58 entry = rt2x00queue_get_entry(queue, Q_INDEX);
59 entry_priv = entry->priv_data;
60
61 if (rt2x00dev->ops->lib->get_entry_state(entry))
62 break;
63
64 /*
65 * Fill in desc fields of the skb descriptor
66 */
67 skbdesc = get_skb_frame_desc(entry->skb);
68 skbdesc->desc = entry_priv->desc;
69 skbdesc->desc_len = entry->queue->desc_size;
70
71 /*
72 * DMA is already done, notify rt2x00lib that
73 * it finished successfully.
74 */
75 rt2x00lib_dmastart(entry);
76 rt2x00lib_dmadone(entry);
77
78 /*
79 * Send the frame to rt2x00lib for further processing.
80 */
81 rt2x00lib_rxdone(entry, GFP_ATOMIC);
82 }
83
84 return !max_rx;
85 }
86 EXPORT_SYMBOL_GPL(rt2x00mmio_rxdone);
87
rt2x00mmio_flush_queue(struct data_queue * queue,bool drop)88 void rt2x00mmio_flush_queue(struct data_queue *queue, bool drop)
89 {
90 unsigned int i;
91
92 for (i = 0; !rt2x00queue_empty(queue) && i < 10; i++)
93 msleep(50);
94 }
95 EXPORT_SYMBOL_GPL(rt2x00mmio_flush_queue);
96
97 /*
98 * Device initialization handlers.
99 */
rt2x00mmio_alloc_queue_dma(struct rt2x00_dev * rt2x00dev,struct data_queue * queue)100 static int rt2x00mmio_alloc_queue_dma(struct rt2x00_dev *rt2x00dev,
101 struct data_queue *queue)
102 {
103 struct queue_entry_priv_mmio *entry_priv;
104 void *addr;
105 dma_addr_t dma;
106 unsigned int i;
107
108 /*
109 * Allocate DMA memory for descriptor and buffer.
110 */
111 addr = dma_alloc_coherent(rt2x00dev->dev,
112 queue->limit * queue->desc_size, &dma,
113 GFP_KERNEL);
114 if (!addr)
115 return -ENOMEM;
116
117 /*
118 * Initialize all queue entries to contain valid addresses.
119 */
120 for (i = 0; i < queue->limit; i++) {
121 entry_priv = queue->entries[i].priv_data;
122 entry_priv->desc = addr + i * queue->desc_size;
123 entry_priv->desc_dma = dma + i * queue->desc_size;
124 }
125
126 return 0;
127 }
128
rt2x00mmio_free_queue_dma(struct rt2x00_dev * rt2x00dev,struct data_queue * queue)129 static void rt2x00mmio_free_queue_dma(struct rt2x00_dev *rt2x00dev,
130 struct data_queue *queue)
131 {
132 struct queue_entry_priv_mmio *entry_priv =
133 queue->entries[0].priv_data;
134
135 if (entry_priv->desc)
136 dma_free_coherent(rt2x00dev->dev,
137 queue->limit * queue->desc_size,
138 entry_priv->desc, entry_priv->desc_dma);
139 entry_priv->desc = NULL;
140 }
141
rt2x00mmio_initialize(struct rt2x00_dev * rt2x00dev)142 int rt2x00mmio_initialize(struct rt2x00_dev *rt2x00dev)
143 {
144 struct data_queue *queue;
145 int status;
146
147 /*
148 * Allocate DMA
149 */
150 queue_for_each(rt2x00dev, queue) {
151 status = rt2x00mmio_alloc_queue_dma(rt2x00dev, queue);
152 if (status)
153 goto exit;
154 }
155
156 /*
157 * Register interrupt handler.
158 */
159 status = request_irq(rt2x00dev->irq,
160 rt2x00dev->ops->lib->irq_handler,
161 IRQF_SHARED, rt2x00dev->name, rt2x00dev);
162 if (status) {
163 rt2x00_err(rt2x00dev, "IRQ %d allocation failed (error %d)\n",
164 rt2x00dev->irq, status);
165 goto exit;
166 }
167
168 return 0;
169
170 exit:
171 queue_for_each(rt2x00dev, queue)
172 rt2x00mmio_free_queue_dma(rt2x00dev, queue);
173
174 return status;
175 }
176 EXPORT_SYMBOL_GPL(rt2x00mmio_initialize);
177
rt2x00mmio_uninitialize(struct rt2x00_dev * rt2x00dev)178 void rt2x00mmio_uninitialize(struct rt2x00_dev *rt2x00dev)
179 {
180 struct data_queue *queue;
181
182 /*
183 * Free irq line.
184 */
185 free_irq(rt2x00dev->irq, rt2x00dev);
186
187 /*
188 * Free DMA
189 */
190 queue_for_each(rt2x00dev, queue)
191 rt2x00mmio_free_queue_dma(rt2x00dev, queue);
192 }
193 EXPORT_SYMBOL_GPL(rt2x00mmio_uninitialize);
194
195 /*
196 * rt2x00mmio module information.
197 */
198 MODULE_AUTHOR(DRV_PROJECT);
199 MODULE_VERSION(DRV_VERSION);
200 MODULE_DESCRIPTION("rt2x00 mmio library");
201 MODULE_LICENSE("GPL");
202