1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2020 Linaro Ltd
4 */
5
6 #include <linux/clk.h>
7 #include <linux/device.h>
8 #include <linux/interconnect-provider.h>
9 #include <linux/io.h>
10 #include <linux/module.h>
11 #include <linux/of_device.h>
12 #include <linux/of_platform.h>
13 #include <linux/platform_device.h>
14 #include <linux/regmap.h>
15 #include <linux/slab.h>
16
17 #include "smd-rpm.h"
18 #include "icc-rpm.h"
19
20 /* BIMC QoS */
21 #define M_BKE_REG_BASE(n) (0x300 + (0x4000 * n))
22 #define M_BKE_EN_ADDR(n) (M_BKE_REG_BASE(n))
23 #define M_BKE_HEALTH_CFG_ADDR(i, n) (M_BKE_REG_BASE(n) + 0x40 + (0x4 * i))
24
25 #define M_BKE_HEALTH_CFG_LIMITCMDS_MASK 0x80000000
26 #define M_BKE_HEALTH_CFG_AREQPRIO_MASK 0x300
27 #define M_BKE_HEALTH_CFG_PRIOLVL_MASK 0x3
28 #define M_BKE_HEALTH_CFG_AREQPRIO_SHIFT 0x8
29 #define M_BKE_HEALTH_CFG_LIMITCMDS_SHIFT 0x1f
30
31 #define M_BKE_EN_EN_BMASK 0x1
32
33 /* NoC QoS */
34 #define NOC_QOS_PRIORITYn_ADDR(n) (0x8 + (n * 0x1000))
35 #define NOC_QOS_PRIORITY_P1_MASK 0xc
36 #define NOC_QOS_PRIORITY_P0_MASK 0x3
37 #define NOC_QOS_PRIORITY_P1_SHIFT 0x2
38
39 #define NOC_QOS_MODEn_ADDR(n) (0xc + (n * 0x1000))
40 #define NOC_QOS_MODEn_MASK 0x3
41
qcom_icc_bimc_set_qos_health(struct qcom_icc_provider * qp,struct qcom_icc_qos * qos,int regnum)42 static int qcom_icc_bimc_set_qos_health(struct qcom_icc_provider *qp,
43 struct qcom_icc_qos *qos,
44 int regnum)
45 {
46 u32 val;
47 u32 mask;
48
49 val = qos->prio_level;
50 mask = M_BKE_HEALTH_CFG_PRIOLVL_MASK;
51
52 val |= qos->areq_prio << M_BKE_HEALTH_CFG_AREQPRIO_SHIFT;
53 mask |= M_BKE_HEALTH_CFG_AREQPRIO_MASK;
54
55 /* LIMITCMDS is not present on M_BKE_HEALTH_3 */
56 if (regnum != 3) {
57 val |= qos->limit_commands << M_BKE_HEALTH_CFG_LIMITCMDS_SHIFT;
58 mask |= M_BKE_HEALTH_CFG_LIMITCMDS_MASK;
59 }
60
61 return regmap_update_bits(qp->regmap,
62 qp->qos_offset + M_BKE_HEALTH_CFG_ADDR(regnum, qos->qos_port),
63 mask, val);
64 }
65
qcom_icc_set_bimc_qos(struct icc_node * src,u64 max_bw)66 static int qcom_icc_set_bimc_qos(struct icc_node *src, u64 max_bw)
67 {
68 struct qcom_icc_provider *qp;
69 struct qcom_icc_node *qn;
70 struct icc_provider *provider;
71 u32 mode = NOC_QOS_MODE_BYPASS;
72 u32 val = 0;
73 int i, rc = 0;
74
75 qn = src->data;
76 provider = src->provider;
77 qp = to_qcom_provider(provider);
78
79 if (qn->qos.qos_mode != -1)
80 mode = qn->qos.qos_mode;
81
82 /* QoS Priority: The QoS Health parameters are getting considered
83 * only if we are NOT in Bypass Mode.
84 */
85 if (mode != NOC_QOS_MODE_BYPASS) {
86 for (i = 3; i >= 0; i--) {
87 rc = qcom_icc_bimc_set_qos_health(qp,
88 &qn->qos, i);
89 if (rc)
90 return rc;
91 }
92
93 /* Set BKE_EN to 1 when Fixed, Regulator or Limiter Mode */
94 val = 1;
95 }
96
97 return regmap_update_bits(qp->regmap,
98 qp->qos_offset + M_BKE_EN_ADDR(qn->qos.qos_port),
99 M_BKE_EN_EN_BMASK, val);
100 }
101
qcom_icc_noc_set_qos_priority(struct qcom_icc_provider * qp,struct qcom_icc_qos * qos)102 static int qcom_icc_noc_set_qos_priority(struct qcom_icc_provider *qp,
103 struct qcom_icc_qos *qos)
104 {
105 u32 val;
106 int rc;
107
108 /* Must be updated one at a time, P1 first, P0 last */
109 val = qos->areq_prio << NOC_QOS_PRIORITY_P1_SHIFT;
110 rc = regmap_update_bits(qp->regmap,
111 qp->qos_offset + NOC_QOS_PRIORITYn_ADDR(qos->qos_port),
112 NOC_QOS_PRIORITY_P1_MASK, val);
113 if (rc)
114 return rc;
115
116 return regmap_update_bits(qp->regmap,
117 qp->qos_offset + NOC_QOS_PRIORITYn_ADDR(qos->qos_port),
118 NOC_QOS_PRIORITY_P0_MASK, qos->prio_level);
119 }
120
qcom_icc_set_noc_qos(struct icc_node * src,u64 max_bw)121 static int qcom_icc_set_noc_qos(struct icc_node *src, u64 max_bw)
122 {
123 struct qcom_icc_provider *qp;
124 struct qcom_icc_node *qn;
125 struct icc_provider *provider;
126 u32 mode = NOC_QOS_MODE_BYPASS;
127 int rc = 0;
128
129 qn = src->data;
130 provider = src->provider;
131 qp = to_qcom_provider(provider);
132
133 if (qn->qos.qos_port < 0) {
134 dev_dbg(src->provider->dev,
135 "NoC QoS: Skipping %s: vote aggregated on parent.\n",
136 qn->name);
137 return 0;
138 }
139
140 if (qn->qos.qos_mode != -1)
141 mode = qn->qos.qos_mode;
142
143 if (mode == NOC_QOS_MODE_FIXED) {
144 dev_dbg(src->provider->dev, "NoC QoS: %s: Set Fixed mode\n",
145 qn->name);
146 rc = qcom_icc_noc_set_qos_priority(qp, &qn->qos);
147 if (rc)
148 return rc;
149 } else if (mode == NOC_QOS_MODE_BYPASS) {
150 dev_dbg(src->provider->dev, "NoC QoS: %s: Set Bypass mode\n",
151 qn->name);
152 }
153
154 return regmap_update_bits(qp->regmap,
155 qp->qos_offset + NOC_QOS_MODEn_ADDR(qn->qos.qos_port),
156 NOC_QOS_MODEn_MASK, mode);
157 }
158
qcom_icc_qos_set(struct icc_node * node,u64 sum_bw)159 static int qcom_icc_qos_set(struct icc_node *node, u64 sum_bw)
160 {
161 struct qcom_icc_provider *qp = to_qcom_provider(node->provider);
162 struct qcom_icc_node *qn = node->data;
163
164 dev_dbg(node->provider->dev, "Setting QoS for %s\n", qn->name);
165
166 if (qp->is_bimc_node)
167 return qcom_icc_set_bimc_qos(node, sum_bw);
168
169 return qcom_icc_set_noc_qos(node, sum_bw);
170 }
171
qcom_icc_rpm_set(int mas_rpm_id,int slv_rpm_id,u64 sum_bw)172 static int qcom_icc_rpm_set(int mas_rpm_id, int slv_rpm_id, u64 sum_bw)
173 {
174 int ret = 0;
175
176 if (mas_rpm_id != -1) {
177 ret = qcom_icc_rpm_smd_send(QCOM_SMD_RPM_ACTIVE_STATE,
178 RPM_BUS_MASTER_REQ,
179 mas_rpm_id,
180 sum_bw);
181 if (ret) {
182 pr_err("qcom_icc_rpm_smd_send mas %d error %d\n",
183 mas_rpm_id, ret);
184 return ret;
185 }
186 }
187
188 if (slv_rpm_id != -1) {
189 ret = qcom_icc_rpm_smd_send(QCOM_SMD_RPM_ACTIVE_STATE,
190 RPM_BUS_SLAVE_REQ,
191 slv_rpm_id,
192 sum_bw);
193 if (ret) {
194 pr_err("qcom_icc_rpm_smd_send slv %d error %d\n",
195 slv_rpm_id, ret);
196 return ret;
197 }
198 }
199
200 return ret;
201 }
202
qcom_icc_set(struct icc_node * src,struct icc_node * dst)203 static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
204 {
205 struct qcom_icc_provider *qp;
206 struct qcom_icc_node *qn;
207 struct icc_provider *provider;
208 struct icc_node *n;
209 u64 sum_bw;
210 u64 max_peak_bw;
211 u64 rate;
212 u32 agg_avg = 0;
213 u32 agg_peak = 0;
214 int ret, i;
215
216 qn = src->data;
217 provider = src->provider;
218 qp = to_qcom_provider(provider);
219
220 list_for_each_entry(n, &provider->nodes, node_list)
221 provider->aggregate(n, 0, n->avg_bw, n->peak_bw,
222 &agg_avg, &agg_peak);
223
224 sum_bw = icc_units_to_bps(agg_avg);
225 max_peak_bw = icc_units_to_bps(agg_peak);
226
227 if (!qn->qos.ap_owned) {
228 /* send bandwidth request message to the RPM processor */
229 ret = qcom_icc_rpm_set(qn->mas_rpm_id, qn->slv_rpm_id, sum_bw);
230 if (ret)
231 return ret;
232 } else if (qn->qos.qos_mode != -1) {
233 /* set bandwidth directly from the AP */
234 ret = qcom_icc_qos_set(src, sum_bw);
235 if (ret)
236 return ret;
237 }
238
239 rate = max(sum_bw, max_peak_bw);
240
241 do_div(rate, qn->buswidth);
242
243 if (qn->rate == rate)
244 return 0;
245
246 for (i = 0; i < qp->num_clks; i++) {
247 ret = clk_set_rate(qp->bus_clks[i].clk, rate);
248 if (ret) {
249 pr_err("%s clk_set_rate error: %d\n",
250 qp->bus_clks[i].id, ret);
251 return ret;
252 }
253 }
254
255 qn->rate = rate;
256
257 return 0;
258 }
259
260 static const char * const bus_clocks[] = {
261 "bus", "bus_a",
262 };
263
qnoc_probe(struct platform_device * pdev)264 int qnoc_probe(struct platform_device *pdev)
265 {
266 struct device *dev = &pdev->dev;
267 const struct qcom_icc_desc *desc;
268 struct icc_onecell_data *data;
269 struct icc_provider *provider;
270 struct qcom_icc_node **qnodes;
271 struct qcom_icc_provider *qp;
272 struct icc_node *node;
273 size_t num_nodes, i;
274 const char * const *cds;
275 int cd_num;
276 int ret;
277
278 /* wait for the RPM proxy */
279 if (!qcom_icc_rpm_smd_available())
280 return -EPROBE_DEFER;
281
282 desc = of_device_get_match_data(dev);
283 if (!desc)
284 return -EINVAL;
285
286 qnodes = desc->nodes;
287 num_nodes = desc->num_nodes;
288
289 if (desc->num_clocks) {
290 cds = desc->clocks;
291 cd_num = desc->num_clocks;
292 } else {
293 cds = bus_clocks;
294 cd_num = ARRAY_SIZE(bus_clocks);
295 }
296
297 qp = devm_kzalloc(dev, struct_size(qp, bus_clks, cd_num), GFP_KERNEL);
298 if (!qp)
299 return -ENOMEM;
300
301 data = devm_kzalloc(dev, struct_size(data, nodes, num_nodes),
302 GFP_KERNEL);
303 if (!data)
304 return -ENOMEM;
305
306 for (i = 0; i < cd_num; i++)
307 qp->bus_clks[i].id = cds[i];
308 qp->num_clks = cd_num;
309
310 qp->is_bimc_node = desc->is_bimc_node;
311 qp->qos_offset = desc->qos_offset;
312
313 if (desc->regmap_cfg) {
314 struct resource *res;
315 void __iomem *mmio;
316
317 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
318 if (!res)
319 return -ENODEV;
320
321 mmio = devm_ioremap_resource(dev, res);
322
323 if (IS_ERR(mmio)) {
324 dev_err(dev, "Cannot ioremap interconnect bus resource\n");
325 return PTR_ERR(mmio);
326 }
327
328 qp->regmap = devm_regmap_init_mmio(dev, mmio, desc->regmap_cfg);
329 if (IS_ERR(qp->regmap)) {
330 dev_err(dev, "Cannot regmap interconnect bus resource\n");
331 return PTR_ERR(qp->regmap);
332 }
333 }
334
335 ret = devm_clk_bulk_get(dev, qp->num_clks, qp->bus_clks);
336 if (ret)
337 return ret;
338
339 ret = clk_bulk_prepare_enable(qp->num_clks, qp->bus_clks);
340 if (ret)
341 return ret;
342
343 provider = &qp->provider;
344 INIT_LIST_HEAD(&provider->nodes);
345 provider->dev = dev;
346 provider->set = qcom_icc_set;
347 provider->aggregate = icc_std_aggregate;
348 provider->xlate = of_icc_xlate_onecell;
349 provider->data = data;
350
351 ret = icc_provider_add(provider);
352 if (ret) {
353 dev_err(dev, "error adding interconnect provider: %d\n", ret);
354 clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
355 return ret;
356 }
357
358 for (i = 0; i < num_nodes; i++) {
359 size_t j;
360
361 node = icc_node_create(qnodes[i]->id);
362 if (IS_ERR(node)) {
363 ret = PTR_ERR(node);
364 goto err;
365 }
366
367 node->name = qnodes[i]->name;
368 node->data = qnodes[i];
369 icc_node_add(node, provider);
370
371 for (j = 0; j < qnodes[i]->num_links; j++)
372 icc_link_create(node, qnodes[i]->links[j]);
373
374 data->nodes[i] = node;
375 }
376 data->num_nodes = num_nodes;
377
378 platform_set_drvdata(pdev, qp);
379
380 return 0;
381 err:
382 icc_nodes_remove(provider);
383 clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
384 icc_provider_del(provider);
385
386 return ret;
387 }
388 EXPORT_SYMBOL(qnoc_probe);
389
qnoc_remove(struct platform_device * pdev)390 int qnoc_remove(struct platform_device *pdev)
391 {
392 struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
393
394 icc_nodes_remove(&qp->provider);
395 clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
396 return icc_provider_del(&qp->provider);
397 }
398 EXPORT_SYMBOL(qnoc_remove);
399