1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Copyright (C) 2020 Linaro Ltd |
4 | */ |
5 | |
6 | #include <linux/device.h> |
7 | #include <linux/interconnect-provider.h> |
8 | #include <linux/io.h> |
9 | #include <linux/module.h> |
10 | #include <linux/of.h> |
11 | #include <linux/of_platform.h> |
12 | #include <linux/platform_device.h> |
13 | #include <linux/regmap.h> |
14 | #include <linux/slab.h> |
15 | |
16 | #include "icc-common.h" |
17 | #include "icc-rpm.h" |
18 | |
19 | /* QNOC QoS */ |
20 | #define QNOC_QOS_MCTL_LOWn_ADDR(n) (0x8 + (n * 0x1000)) |
21 | #define QNOC_QOS_MCTL_DFLT_PRIO_MASK 0x70 |
22 | #define QNOC_QOS_MCTL_DFLT_PRIO_SHIFT 4 |
23 | #define QNOC_QOS_MCTL_URGFWD_EN_MASK 0x8 |
24 | #define QNOC_QOS_MCTL_URGFWD_EN_SHIFT 3 |
25 | |
26 | /* BIMC QoS */ |
27 | #define M_BKE_REG_BASE(n) (0x300 + (0x4000 * n)) |
28 | #define M_BKE_EN_ADDR(n) (M_BKE_REG_BASE(n)) |
29 | #define M_BKE_HEALTH_CFG_ADDR(i, n) (M_BKE_REG_BASE(n) + 0x40 + (0x4 * i)) |
30 | |
31 | #define M_BKE_HEALTH_CFG_LIMITCMDS_MASK 0x80000000 |
32 | #define M_BKE_HEALTH_CFG_AREQPRIO_MASK 0x300 |
33 | #define M_BKE_HEALTH_CFG_PRIOLVL_MASK 0x3 |
34 | #define M_BKE_HEALTH_CFG_AREQPRIO_SHIFT 0x8 |
35 | #define M_BKE_HEALTH_CFG_LIMITCMDS_SHIFT 0x1f |
36 | |
37 | #define M_BKE_EN_EN_BMASK 0x1 |
38 | |
39 | /* NoC QoS */ |
40 | #define NOC_QOS_PRIORITYn_ADDR(n) (0x8 + (n * 0x1000)) |
41 | #define NOC_QOS_PRIORITY_P1_MASK 0xc |
42 | #define NOC_QOS_PRIORITY_P0_MASK 0x3 |
43 | #define NOC_QOS_PRIORITY_P1_SHIFT 0x2 |
44 | |
45 | #define NOC_QOS_MODEn_ADDR(n) (0xc + (n * 0x1000)) |
46 | #define NOC_QOS_MODEn_MASK 0x3 |
47 | |
48 | #define NOC_QOS_MODE_FIXED_VAL 0x0 |
49 | #define NOC_QOS_MODE_BYPASS_VAL 0x2 |
50 | |
51 | #define ICC_BUS_CLK_MIN_RATE 19200ULL /* kHz */ |
52 | |
53 | static int qcom_icc_set_qnoc_qos(struct icc_node *src) |
54 | { |
55 | struct icc_provider *provider = src->provider; |
56 | struct qcom_icc_provider *qp = to_qcom_provider(provider); |
57 | struct qcom_icc_node *qn = src->data; |
58 | struct qcom_icc_qos *qos = &qn->qos; |
59 | int rc; |
60 | |
61 | rc = regmap_update_bits(map: qp->regmap, |
62 | reg: qp->qos_offset + QNOC_QOS_MCTL_LOWn_ADDR(qos->qos_port), |
63 | QNOC_QOS_MCTL_DFLT_PRIO_MASK, |
64 | val: qos->areq_prio << QNOC_QOS_MCTL_DFLT_PRIO_SHIFT); |
65 | if (rc) |
66 | return rc; |
67 | |
68 | return regmap_update_bits(map: qp->regmap, |
69 | reg: qp->qos_offset + QNOC_QOS_MCTL_LOWn_ADDR(qos->qos_port), |
70 | QNOC_QOS_MCTL_URGFWD_EN_MASK, |
71 | val: !!qos->urg_fwd_en << QNOC_QOS_MCTL_URGFWD_EN_SHIFT); |
72 | } |
73 | |
74 | static int qcom_icc_bimc_set_qos_health(struct qcom_icc_provider *qp, |
75 | struct qcom_icc_qos *qos, |
76 | int regnum) |
77 | { |
78 | u32 val; |
79 | u32 mask; |
80 | |
81 | val = qos->prio_level; |
82 | mask = M_BKE_HEALTH_CFG_PRIOLVL_MASK; |
83 | |
84 | val |= qos->areq_prio << M_BKE_HEALTH_CFG_AREQPRIO_SHIFT; |
85 | mask |= M_BKE_HEALTH_CFG_AREQPRIO_MASK; |
86 | |
87 | /* LIMITCMDS is not present on M_BKE_HEALTH_3 */ |
88 | if (regnum != 3) { |
89 | val |= qos->limit_commands << M_BKE_HEALTH_CFG_LIMITCMDS_SHIFT; |
90 | mask |= M_BKE_HEALTH_CFG_LIMITCMDS_MASK; |
91 | } |
92 | |
93 | return regmap_update_bits(map: qp->regmap, |
94 | reg: qp->qos_offset + M_BKE_HEALTH_CFG_ADDR(regnum, qos->qos_port), |
95 | mask, val); |
96 | } |
97 | |
98 | static int qcom_icc_set_bimc_qos(struct icc_node *src) |
99 | { |
100 | struct qcom_icc_provider *qp; |
101 | struct qcom_icc_node *qn; |
102 | struct icc_provider *provider; |
103 | u32 mode = NOC_QOS_MODE_BYPASS; |
104 | u32 val = 0; |
105 | int i, rc = 0; |
106 | |
107 | qn = src->data; |
108 | provider = src->provider; |
109 | qp = to_qcom_provider(provider); |
110 | |
111 | if (qn->qos.qos_mode != NOC_QOS_MODE_INVALID) |
112 | mode = qn->qos.qos_mode; |
113 | |
114 | /* QoS Priority: The QoS Health parameters are getting considered |
115 | * only if we are NOT in Bypass Mode. |
116 | */ |
117 | if (mode != NOC_QOS_MODE_BYPASS) { |
118 | for (i = 3; i >= 0; i--) { |
119 | rc = qcom_icc_bimc_set_qos_health(qp, |
120 | qos: &qn->qos, regnum: i); |
121 | if (rc) |
122 | return rc; |
123 | } |
124 | |
125 | /* Set BKE_EN to 1 when Fixed, Regulator or Limiter Mode */ |
126 | val = 1; |
127 | } |
128 | |
129 | return regmap_update_bits(map: qp->regmap, |
130 | reg: qp->qos_offset + M_BKE_EN_ADDR(qn->qos.qos_port), |
131 | M_BKE_EN_EN_BMASK, val); |
132 | } |
133 | |
134 | static int qcom_icc_noc_set_qos_priority(struct qcom_icc_provider *qp, |
135 | struct qcom_icc_qos *qos) |
136 | { |
137 | u32 val; |
138 | int rc; |
139 | |
140 | /* Must be updated one at a time, P1 first, P0 last */ |
141 | val = qos->areq_prio << NOC_QOS_PRIORITY_P1_SHIFT; |
142 | rc = regmap_update_bits(map: qp->regmap, |
143 | reg: qp->qos_offset + NOC_QOS_PRIORITYn_ADDR(qos->qos_port), |
144 | NOC_QOS_PRIORITY_P1_MASK, val); |
145 | if (rc) |
146 | return rc; |
147 | |
148 | return regmap_update_bits(map: qp->regmap, |
149 | reg: qp->qos_offset + NOC_QOS_PRIORITYn_ADDR(qos->qos_port), |
150 | NOC_QOS_PRIORITY_P0_MASK, val: qos->prio_level); |
151 | } |
152 | |
153 | static int qcom_icc_set_noc_qos(struct icc_node *src) |
154 | { |
155 | struct qcom_icc_provider *qp; |
156 | struct qcom_icc_node *qn; |
157 | struct icc_provider *provider; |
158 | u32 mode = NOC_QOS_MODE_BYPASS_VAL; |
159 | int rc = 0; |
160 | |
161 | qn = src->data; |
162 | provider = src->provider; |
163 | qp = to_qcom_provider(provider); |
164 | |
165 | if (qn->qos.qos_port < 0) { |
166 | dev_dbg(src->provider->dev, |
167 | "NoC QoS: Skipping %s: vote aggregated on parent.\n" , |
168 | qn->name); |
169 | return 0; |
170 | } |
171 | |
172 | if (qn->qos.qos_mode == NOC_QOS_MODE_FIXED) { |
173 | dev_dbg(src->provider->dev, "NoC QoS: %s: Set Fixed mode\n" , qn->name); |
174 | mode = NOC_QOS_MODE_FIXED_VAL; |
175 | rc = qcom_icc_noc_set_qos_priority(qp, qos: &qn->qos); |
176 | if (rc) |
177 | return rc; |
178 | } else if (qn->qos.qos_mode == NOC_QOS_MODE_BYPASS) { |
179 | dev_dbg(src->provider->dev, "NoC QoS: %s: Set Bypass mode\n" , qn->name); |
180 | mode = NOC_QOS_MODE_BYPASS_VAL; |
181 | } else { |
182 | /* How did we get here? */ |
183 | } |
184 | |
185 | return regmap_update_bits(map: qp->regmap, |
186 | reg: qp->qos_offset + NOC_QOS_MODEn_ADDR(qn->qos.qos_port), |
187 | NOC_QOS_MODEn_MASK, val: mode); |
188 | } |
189 | |
190 | static int qcom_icc_qos_set(struct icc_node *node) |
191 | { |
192 | struct qcom_icc_provider *qp = to_qcom_provider(node->provider); |
193 | struct qcom_icc_node *qn = node->data; |
194 | |
195 | dev_dbg(node->provider->dev, "Setting QoS for %s\n" , qn->name); |
196 | |
197 | switch (qp->type) { |
198 | case QCOM_ICC_BIMC: |
199 | return qcom_icc_set_bimc_qos(src: node); |
200 | case QCOM_ICC_QNOC: |
201 | return qcom_icc_set_qnoc_qos(src: node); |
202 | default: |
203 | return qcom_icc_set_noc_qos(src: node); |
204 | } |
205 | } |
206 | |
207 | static int qcom_icc_rpm_set(struct qcom_icc_node *qn, u64 *bw) |
208 | { |
209 | int ret, rpm_ctx = 0; |
210 | u64 bw_bps; |
211 | |
212 | if (qn->qos.ap_owned) |
213 | return 0; |
214 | |
215 | for (rpm_ctx = 0; rpm_ctx < QCOM_SMD_RPM_STATE_NUM; rpm_ctx++) { |
216 | bw_bps = icc_units_to_bps(bw[rpm_ctx]); |
217 | |
218 | if (qn->mas_rpm_id != -1) { |
219 | ret = qcom_icc_rpm_smd_send(ctx: rpm_ctx, |
220 | RPM_BUS_MASTER_REQ, |
221 | id: qn->mas_rpm_id, |
222 | val: bw_bps); |
223 | if (ret) { |
224 | pr_err("qcom_icc_rpm_smd_send mas %d error %d\n" , |
225 | qn->mas_rpm_id, ret); |
226 | return ret; |
227 | } |
228 | } |
229 | |
230 | if (qn->slv_rpm_id != -1) { |
231 | ret = qcom_icc_rpm_smd_send(ctx: rpm_ctx, |
232 | RPM_BUS_SLAVE_REQ, |
233 | id: qn->slv_rpm_id, |
234 | val: bw_bps); |
235 | if (ret) { |
236 | pr_err("qcom_icc_rpm_smd_send slv %d error %d\n" , |
237 | qn->slv_rpm_id, ret); |
238 | return ret; |
239 | } |
240 | } |
241 | } |
242 | |
243 | return 0; |
244 | } |
245 | |
246 | /** |
247 | * qcom_icc_pre_bw_aggregate - cleans up values before re-aggregate requests |
248 | * @node: icc node to operate on |
249 | */ |
250 | static void qcom_icc_pre_bw_aggregate(struct icc_node *node) |
251 | { |
252 | struct qcom_icc_node *qn; |
253 | size_t i; |
254 | |
255 | qn = node->data; |
256 | for (i = 0; i < QCOM_SMD_RPM_STATE_NUM; i++) { |
257 | qn->sum_avg[i] = 0; |
258 | qn->max_peak[i] = 0; |
259 | } |
260 | } |
261 | |
262 | /** |
263 | * qcom_icc_bw_aggregate - aggregate bw for buckets indicated by tag |
264 | * @node: node to aggregate |
265 | * @tag: tag to indicate which buckets to aggregate |
266 | * @avg_bw: new bw to sum aggregate |
267 | * @peak_bw: new bw to max aggregate |
268 | * @agg_avg: existing aggregate avg bw val |
269 | * @agg_peak: existing aggregate peak bw val |
270 | */ |
271 | static int qcom_icc_bw_aggregate(struct icc_node *node, u32 tag, u32 avg_bw, |
272 | u32 peak_bw, u32 *agg_avg, u32 *agg_peak) |
273 | { |
274 | size_t i; |
275 | struct qcom_icc_node *qn; |
276 | |
277 | qn = node->data; |
278 | |
279 | if (!tag) |
280 | tag = RPM_ALWAYS_TAG; |
281 | |
282 | for (i = 0; i < QCOM_SMD_RPM_STATE_NUM; i++) { |
283 | if (tag & BIT(i)) { |
284 | qn->sum_avg[i] += avg_bw; |
285 | qn->max_peak[i] = max_t(u32, qn->max_peak[i], peak_bw); |
286 | } |
287 | } |
288 | |
289 | *agg_avg += avg_bw; |
290 | *agg_peak = max_t(u32, *agg_peak, peak_bw); |
291 | return 0; |
292 | } |
293 | |
294 | static u64 qcom_icc_calc_rate(struct qcom_icc_provider *qp, struct qcom_icc_node *qn, int ctx) |
295 | { |
296 | u64 agg_avg_rate, agg_peak_rate, agg_rate; |
297 | |
298 | if (qn->channels) |
299 | agg_avg_rate = div_u64(dividend: qn->sum_avg[ctx], divisor: qn->channels); |
300 | else |
301 | agg_avg_rate = qn->sum_avg[ctx]; |
302 | |
303 | if (qn->ab_coeff) { |
304 | agg_avg_rate = agg_avg_rate * qn->ab_coeff; |
305 | agg_avg_rate = div_u64(dividend: agg_avg_rate, divisor: 100); |
306 | } |
307 | |
308 | if (qn->ib_coeff) { |
309 | agg_peak_rate = qn->max_peak[ctx] * 100; |
310 | agg_peak_rate = div_u64(dividend: agg_peak_rate, divisor: qn->ib_coeff); |
311 | } else { |
312 | agg_peak_rate = qn->max_peak[ctx]; |
313 | } |
314 | |
315 | agg_rate = max_t(u64, agg_avg_rate, agg_peak_rate); |
316 | |
317 | return div_u64(dividend: agg_rate, divisor: qn->buswidth); |
318 | } |
319 | |
320 | /** |
321 | * qcom_icc_bus_aggregate - calculate bus clock rates by traversing all nodes |
322 | * @provider: generic interconnect provider |
323 | * @agg_clk_rate: array containing the aggregated clock rates in kHz |
324 | */ |
325 | static void qcom_icc_bus_aggregate(struct icc_provider *provider, u64 *agg_clk_rate) |
326 | { |
327 | struct qcom_icc_provider *qp = to_qcom_provider(provider); |
328 | struct qcom_icc_node *qn; |
329 | struct icc_node *node; |
330 | int ctx; |
331 | |
332 | /* |
333 | * Iterate nodes on the provider, aggregate bandwidth requests for |
334 | * every bucket and convert them into bus clock rates. |
335 | */ |
336 | list_for_each_entry(node, &provider->nodes, node_list) { |
337 | qn = node->data; |
338 | for (ctx = 0; ctx < QCOM_SMD_RPM_STATE_NUM; ctx++) { |
339 | agg_clk_rate[ctx] = max_t(u64, agg_clk_rate[ctx], |
340 | qcom_icc_calc_rate(qp, qn, ctx)); |
341 | } |
342 | } |
343 | } |
344 | |
345 | static int qcom_icc_set(struct icc_node *src, struct icc_node *dst) |
346 | { |
347 | struct qcom_icc_node *src_qn = NULL, *dst_qn = NULL; |
348 | u64 agg_clk_rate[QCOM_SMD_RPM_STATE_NUM] = { 0 }; |
349 | struct icc_provider *provider; |
350 | struct qcom_icc_provider *qp; |
351 | u64 active_rate, sleep_rate; |
352 | int ret; |
353 | |
354 | src_qn = src->data; |
355 | if (dst) |
356 | dst_qn = dst->data; |
357 | provider = src->provider; |
358 | qp = to_qcom_provider(provider); |
359 | |
360 | qcom_icc_bus_aggregate(provider, agg_clk_rate); |
361 | active_rate = agg_clk_rate[QCOM_SMD_RPM_ACTIVE_STATE]; |
362 | sleep_rate = agg_clk_rate[QCOM_SMD_RPM_SLEEP_STATE]; |
363 | |
364 | ret = qcom_icc_rpm_set(qn: src_qn, bw: src_qn->sum_avg); |
365 | if (ret) |
366 | return ret; |
367 | |
368 | if (dst_qn) { |
369 | ret = qcom_icc_rpm_set(qn: dst_qn, bw: dst_qn->sum_avg); |
370 | if (ret) |
371 | return ret; |
372 | } |
373 | |
374 | /* Some providers don't have a bus clock to scale */ |
375 | if (!qp->bus_clk_desc && !qp->bus_clk) |
376 | return 0; |
377 | |
378 | /* |
379 | * Downstream checks whether the requested rate is zero, but it makes little sense |
380 | * to vote for a value that's below the lower threshold, so let's not do so. |
381 | */ |
382 | if (qp->keep_alive) |
383 | active_rate = max(ICC_BUS_CLK_MIN_RATE, active_rate); |
384 | |
385 | /* Some providers have a non-RPM-owned bus clock - convert kHz->Hz for the CCF */ |
386 | if (qp->bus_clk) { |
387 | active_rate = max_t(u64, active_rate, sleep_rate); |
388 | /* ARM32 caps clk_set_rate arg to u32.. Nothing we can do about that! */ |
389 | active_rate = min_t(u64, 1000ULL * active_rate, ULONG_MAX); |
390 | return clk_set_rate(clk: qp->bus_clk, rate: active_rate); |
391 | } |
392 | |
393 | /* RPM only accepts <=INT_MAX rates */ |
394 | active_rate = min_t(u64, active_rate, INT_MAX); |
395 | sleep_rate = min_t(u64, sleep_rate, INT_MAX); |
396 | |
397 | if (active_rate != qp->bus_clk_rate[QCOM_SMD_RPM_ACTIVE_STATE]) { |
398 | ret = qcom_icc_rpm_set_bus_rate(clk: qp->bus_clk_desc, QCOM_SMD_RPM_ACTIVE_STATE, |
399 | rate: active_rate); |
400 | if (ret) |
401 | return ret; |
402 | |
403 | /* Cache the rate after we've successfully commited it to RPM */ |
404 | qp->bus_clk_rate[QCOM_SMD_RPM_ACTIVE_STATE] = active_rate; |
405 | } |
406 | |
407 | if (sleep_rate != qp->bus_clk_rate[QCOM_SMD_RPM_SLEEP_STATE]) { |
408 | ret = qcom_icc_rpm_set_bus_rate(clk: qp->bus_clk_desc, QCOM_SMD_RPM_SLEEP_STATE, |
409 | rate: sleep_rate); |
410 | if (ret) |
411 | return ret; |
412 | |
413 | /* Cache the rate after we've successfully commited it to RPM */ |
414 | qp->bus_clk_rate[QCOM_SMD_RPM_SLEEP_STATE] = sleep_rate; |
415 | } |
416 | |
417 | /* Handle the node-specific clock */ |
418 | if (!src_qn->bus_clk_desc) |
419 | return 0; |
420 | |
421 | active_rate = qcom_icc_calc_rate(qp, qn: src_qn, QCOM_SMD_RPM_ACTIVE_STATE); |
422 | sleep_rate = qcom_icc_calc_rate(qp, qn: src_qn, QCOM_SMD_RPM_SLEEP_STATE); |
423 | |
424 | if (active_rate != src_qn->bus_clk_rate[QCOM_SMD_RPM_ACTIVE_STATE]) { |
425 | ret = qcom_icc_rpm_set_bus_rate(clk: src_qn->bus_clk_desc, QCOM_SMD_RPM_ACTIVE_STATE, |
426 | rate: active_rate); |
427 | if (ret) |
428 | return ret; |
429 | |
430 | /* Cache the rate after we've successfully committed it to RPM */ |
431 | src_qn->bus_clk_rate[QCOM_SMD_RPM_ACTIVE_STATE] = active_rate; |
432 | } |
433 | |
434 | if (sleep_rate != src_qn->bus_clk_rate[QCOM_SMD_RPM_SLEEP_STATE]) { |
435 | ret = qcom_icc_rpm_set_bus_rate(clk: src_qn->bus_clk_desc, QCOM_SMD_RPM_SLEEP_STATE, |
436 | rate: sleep_rate); |
437 | if (ret) |
438 | return ret; |
439 | |
440 | /* Cache the rate after we've successfully committed it to RPM */ |
441 | src_qn->bus_clk_rate[QCOM_SMD_RPM_SLEEP_STATE] = sleep_rate; |
442 | } |
443 | |
444 | return 0; |
445 | } |
446 | |
447 | int qnoc_probe(struct platform_device *pdev) |
448 | { |
449 | struct device *dev = &pdev->dev; |
450 | const struct qcom_icc_desc *desc; |
451 | struct icc_onecell_data *data; |
452 | struct icc_provider *provider; |
453 | struct qcom_icc_node * const *qnodes; |
454 | struct qcom_icc_provider *qp; |
455 | struct icc_node *node; |
456 | size_t num_nodes, i; |
457 | const char * const *cds = NULL; |
458 | int cd_num; |
459 | int ret; |
460 | |
461 | /* wait for the RPM proxy */ |
462 | if (!qcom_icc_rpm_smd_available()) |
463 | return -EPROBE_DEFER; |
464 | |
465 | desc = of_device_get_match_data(dev); |
466 | if (!desc) |
467 | return -EINVAL; |
468 | |
469 | qnodes = desc->nodes; |
470 | num_nodes = desc->num_nodes; |
471 | |
472 | if (desc->num_intf_clocks) { |
473 | cds = desc->intf_clocks; |
474 | cd_num = desc->num_intf_clocks; |
475 | } else { |
476 | /* 0 intf clocks is perfectly fine */ |
477 | cd_num = 0; |
478 | } |
479 | |
480 | qp = devm_kzalloc(dev, size: sizeof(*qp), GFP_KERNEL); |
481 | if (!qp) |
482 | return -ENOMEM; |
483 | |
484 | qp->intf_clks = devm_kcalloc(dev, n: cd_num, size: sizeof(*qp->intf_clks), GFP_KERNEL); |
485 | if (!qp->intf_clks) |
486 | return -ENOMEM; |
487 | |
488 | if (desc->bus_clk_desc) { |
489 | qp->bus_clk_desc = devm_kzalloc(dev, size: sizeof(*qp->bus_clk_desc), |
490 | GFP_KERNEL); |
491 | if (!qp->bus_clk_desc) |
492 | return -ENOMEM; |
493 | |
494 | qp->bus_clk_desc = desc->bus_clk_desc; |
495 | } else { |
496 | /* Some older SoCs may have a single non-RPM-owned bus clock. */ |
497 | qp->bus_clk = devm_clk_get_optional(dev, id: "bus" ); |
498 | if (IS_ERR(ptr: qp->bus_clk)) |
499 | return PTR_ERR(ptr: qp->bus_clk); |
500 | } |
501 | |
502 | data = devm_kzalloc(dev, struct_size(data, nodes, num_nodes), |
503 | GFP_KERNEL); |
504 | if (!data) |
505 | return -ENOMEM; |
506 | |
507 | qp->num_intf_clks = cd_num; |
508 | for (i = 0; i < cd_num; i++) |
509 | qp->intf_clks[i].id = cds[i]; |
510 | |
511 | qp->keep_alive = desc->keep_alive; |
512 | qp->type = desc->type; |
513 | qp->qos_offset = desc->qos_offset; |
514 | |
515 | if (desc->regmap_cfg) { |
516 | struct resource *res; |
517 | void __iomem *mmio; |
518 | |
519 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
520 | if (!res) { |
521 | /* Try parent's regmap */ |
522 | qp->regmap = dev_get_regmap(dev: dev->parent, NULL); |
523 | if (qp->regmap) |
524 | goto regmap_done; |
525 | return -ENODEV; |
526 | } |
527 | |
528 | mmio = devm_ioremap_resource(dev, res); |
529 | if (IS_ERR(ptr: mmio)) |
530 | return PTR_ERR(ptr: mmio); |
531 | |
532 | qp->regmap = devm_regmap_init_mmio(dev, mmio, desc->regmap_cfg); |
533 | if (IS_ERR(ptr: qp->regmap)) { |
534 | dev_err(dev, "Cannot regmap interconnect bus resource\n" ); |
535 | return PTR_ERR(ptr: qp->regmap); |
536 | } |
537 | } |
538 | |
539 | regmap_done: |
540 | ret = clk_prepare_enable(clk: qp->bus_clk); |
541 | if (ret) |
542 | return ret; |
543 | |
544 | ret = devm_clk_bulk_get(dev, num_clks: qp->num_intf_clks, clks: qp->intf_clks); |
545 | if (ret) |
546 | goto err_disable_unprepare_clk; |
547 | |
548 | provider = &qp->provider; |
549 | provider->dev = dev; |
550 | provider->set = qcom_icc_set; |
551 | provider->pre_aggregate = qcom_icc_pre_bw_aggregate; |
552 | provider->aggregate = qcom_icc_bw_aggregate; |
553 | provider->xlate_extended = qcom_icc_xlate_extended; |
554 | provider->data = data; |
555 | |
556 | icc_provider_init(provider); |
557 | |
558 | /* If this fails, bus accesses will crash the platform! */ |
559 | ret = clk_bulk_prepare_enable(num_clks: qp->num_intf_clks, clks: qp->intf_clks); |
560 | if (ret) |
561 | goto err_disable_unprepare_clk; |
562 | |
563 | for (i = 0; i < num_nodes; i++) { |
564 | size_t j; |
565 | |
566 | if (!qnodes[i]->ab_coeff) |
567 | qnodes[i]->ab_coeff = qp->ab_coeff; |
568 | |
569 | if (!qnodes[i]->ib_coeff) |
570 | qnodes[i]->ib_coeff = qp->ib_coeff; |
571 | |
572 | node = icc_node_create(id: qnodes[i]->id); |
573 | if (IS_ERR(ptr: node)) { |
574 | clk_bulk_disable_unprepare(num_clks: qp->num_intf_clks, |
575 | clks: qp->intf_clks); |
576 | ret = PTR_ERR(ptr: node); |
577 | goto err_remove_nodes; |
578 | } |
579 | |
580 | node->name = qnodes[i]->name; |
581 | node->data = qnodes[i]; |
582 | icc_node_add(node, provider); |
583 | |
584 | for (j = 0; j < qnodes[i]->num_links; j++) |
585 | icc_link_create(node, dst_id: qnodes[i]->links[j]); |
586 | |
587 | /* Set QoS registers (we only need to do it once, generally) */ |
588 | if (qnodes[i]->qos.ap_owned && |
589 | qnodes[i]->qos.qos_mode != NOC_QOS_MODE_INVALID) { |
590 | ret = qcom_icc_qos_set(node); |
591 | if (ret) { |
592 | clk_bulk_disable_unprepare(num_clks: qp->num_intf_clks, |
593 | clks: qp->intf_clks); |
594 | goto err_remove_nodes; |
595 | } |
596 | } |
597 | |
598 | data->nodes[i] = node; |
599 | } |
600 | data->num_nodes = num_nodes; |
601 | |
602 | clk_bulk_disable_unprepare(num_clks: qp->num_intf_clks, clks: qp->intf_clks); |
603 | |
604 | ret = icc_provider_register(provider); |
605 | if (ret) |
606 | goto err_remove_nodes; |
607 | |
608 | platform_set_drvdata(pdev, data: qp); |
609 | |
610 | /* Populate child NoC devices if any */ |
611 | if (of_get_child_count(np: dev->of_node) > 0) { |
612 | ret = of_platform_populate(root: dev->of_node, NULL, NULL, parent: dev); |
613 | if (ret) |
614 | goto err_deregister_provider; |
615 | } |
616 | |
617 | return 0; |
618 | |
619 | err_deregister_provider: |
620 | icc_provider_deregister(provider); |
621 | err_remove_nodes: |
622 | icc_nodes_remove(provider); |
623 | err_disable_unprepare_clk: |
624 | clk_disable_unprepare(clk: qp->bus_clk); |
625 | |
626 | return ret; |
627 | } |
628 | EXPORT_SYMBOL(qnoc_probe); |
629 | |
630 | void qnoc_remove(struct platform_device *pdev) |
631 | { |
632 | struct qcom_icc_provider *qp = platform_get_drvdata(pdev); |
633 | |
634 | icc_provider_deregister(provider: &qp->provider); |
635 | icc_nodes_remove(provider: &qp->provider); |
636 | clk_disable_unprepare(clk: qp->bus_clk); |
637 | } |
638 | EXPORT_SYMBOL(qnoc_remove); |
639 | |