1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. |
4 | * |
5 | */ |
6 | |
7 | #include <linux/bitfield.h> |
8 | #include <linux/debugfs.h> |
9 | #include <linux/device.h> |
10 | #include <linux/dma-direction.h> |
11 | #include <linux/dma-mapping.h> |
12 | #include <linux/idr.h> |
13 | #include <linux/interrupt.h> |
14 | #include <linux/list.h> |
15 | #include <linux/mhi.h> |
16 | #include <linux/mod_devicetable.h> |
17 | #include <linux/module.h> |
18 | #include <linux/slab.h> |
19 | #include <linux/vmalloc.h> |
20 | #include <linux/wait.h> |
21 | #include "internal.h" |
22 | |
23 | #define CREATE_TRACE_POINTS |
24 | #include "trace.h" |
25 | |
26 | static DEFINE_IDA(mhi_controller_ida); |
27 | |
28 | #undef mhi_ee |
29 | #undef mhi_ee_end |
30 | |
31 | #define mhi_ee(a, b) [MHI_EE_##a] = b, |
32 | #define mhi_ee_end(a, b) [MHI_EE_##a] = b, |
33 | |
34 | const char * const mhi_ee_str[MHI_EE_MAX] = { |
35 | MHI_EE_LIST |
36 | }; |
37 | |
38 | #undef dev_st_trans |
39 | #undef dev_st_trans_end |
40 | |
41 | #define dev_st_trans(a, b) [DEV_ST_TRANSITION_##a] = b, |
42 | #define dev_st_trans_end(a, b) [DEV_ST_TRANSITION_##a] = b, |
43 | |
44 | const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX] = { |
45 | DEV_ST_TRANSITION_LIST |
46 | }; |
47 | |
48 | #undef ch_state_type |
49 | #undef ch_state_type_end |
50 | |
51 | #define ch_state_type(a, b) [MHI_CH_STATE_TYPE_##a] = b, |
52 | #define ch_state_type_end(a, b) [MHI_CH_STATE_TYPE_##a] = b, |
53 | |
54 | const char * const mhi_ch_state_type_str[MHI_CH_STATE_TYPE_MAX] = { |
55 | MHI_CH_STATE_TYPE_LIST |
56 | }; |
57 | |
58 | #undef mhi_pm_state |
59 | #undef mhi_pm_state_end |
60 | |
61 | #define mhi_pm_state(a, b) [MHI_PM_STATE_##a] = b, |
62 | #define mhi_pm_state_end(a, b) [MHI_PM_STATE_##a] = b, |
63 | |
64 | static const char * const mhi_pm_state_str[] = { |
65 | MHI_PM_STATE_LIST |
66 | }; |
67 | |
68 | const char *to_mhi_pm_state_str(u32 state) |
69 | { |
70 | int index; |
71 | |
72 | if (state) |
73 | index = __fls(word: state); |
74 | |
75 | if (!state || index >= ARRAY_SIZE(mhi_pm_state_str)) |
76 | return "Invalid State" ; |
77 | |
78 | return mhi_pm_state_str[index]; |
79 | } |
80 | |
81 | static ssize_t serial_number_show(struct device *dev, |
82 | struct device_attribute *attr, |
83 | char *buf) |
84 | { |
85 | struct mhi_device *mhi_dev = to_mhi_device(dev); |
86 | struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; |
87 | |
88 | return sysfs_emit(buf, fmt: "Serial Number: %u\n" , |
89 | mhi_cntrl->serial_number); |
90 | } |
91 | static DEVICE_ATTR_RO(serial_number); |
92 | |
93 | static ssize_t oem_pk_hash_show(struct device *dev, |
94 | struct device_attribute *attr, |
95 | char *buf) |
96 | { |
97 | struct mhi_device *mhi_dev = to_mhi_device(dev); |
98 | struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; |
99 | u32 hash_segment[MHI_MAX_OEM_PK_HASH_SEGMENTS]; |
100 | int i, cnt = 0, ret; |
101 | |
102 | for (i = 0; i < MHI_MAX_OEM_PK_HASH_SEGMENTS; i++) { |
103 | ret = mhi_read_reg(mhi_cntrl, base: mhi_cntrl->bhi, BHI_OEMPKHASH(i), out: &hash_segment[i]); |
104 | if (ret) { |
105 | dev_err(dev, "Could not capture OEM PK HASH\n" ); |
106 | return ret; |
107 | } |
108 | } |
109 | |
110 | for (i = 0; i < MHI_MAX_OEM_PK_HASH_SEGMENTS; i++) |
111 | cnt += sysfs_emit_at(buf, at: cnt, fmt: "OEMPKHASH[%d]: 0x%x\n" , i, hash_segment[i]); |
112 | |
113 | return cnt; |
114 | } |
115 | static DEVICE_ATTR_RO(oem_pk_hash); |
116 | |
117 | static ssize_t soc_reset_store(struct device *dev, |
118 | struct device_attribute *attr, |
119 | const char *buf, |
120 | size_t count) |
121 | { |
122 | struct mhi_device *mhi_dev = to_mhi_device(dev); |
123 | struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; |
124 | |
125 | mhi_soc_reset(mhi_cntrl); |
126 | return count; |
127 | } |
128 | static DEVICE_ATTR_WO(soc_reset); |
129 | |
130 | static struct attribute *mhi_dev_attrs[] = { |
131 | &dev_attr_serial_number.attr, |
132 | &dev_attr_oem_pk_hash.attr, |
133 | &dev_attr_soc_reset.attr, |
134 | NULL, |
135 | }; |
136 | ATTRIBUTE_GROUPS(mhi_dev); |
137 | |
138 | /* MHI protocol requires the transfer ring to be aligned with ring length */ |
139 | static int mhi_alloc_aligned_ring(struct mhi_controller *mhi_cntrl, |
140 | struct mhi_ring *ring, |
141 | u64 len) |
142 | { |
143 | ring->alloc_size = len + (len - 1); |
144 | ring->pre_aligned = dma_alloc_coherent(dev: mhi_cntrl->cntrl_dev, size: ring->alloc_size, |
145 | dma_handle: &ring->dma_handle, GFP_KERNEL); |
146 | if (!ring->pre_aligned) |
147 | return -ENOMEM; |
148 | |
149 | ring->iommu_base = (ring->dma_handle + (len - 1)) & ~(len - 1); |
150 | ring->base = ring->pre_aligned + (ring->iommu_base - ring->dma_handle); |
151 | |
152 | return 0; |
153 | } |
154 | |
155 | void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl) |
156 | { |
157 | int i; |
158 | struct mhi_event *mhi_event = mhi_cntrl->mhi_event; |
159 | |
160 | for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { |
161 | if (mhi_event->offload_ev) |
162 | continue; |
163 | |
164 | free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event); |
165 | } |
166 | |
167 | free_irq(mhi_cntrl->irq[0], mhi_cntrl); |
168 | } |
169 | |
170 | int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl) |
171 | { |
172 | struct mhi_event *mhi_event = mhi_cntrl->mhi_event; |
173 | struct device *dev = &mhi_cntrl->mhi_dev->dev; |
174 | unsigned long irq_flags = IRQF_SHARED | IRQF_NO_SUSPEND; |
175 | int i, ret; |
176 | |
177 | /* if controller driver has set irq_flags, use it */ |
178 | if (mhi_cntrl->irq_flags) |
179 | irq_flags = mhi_cntrl->irq_flags; |
180 | |
181 | /* Setup BHI_INTVEC IRQ */ |
182 | ret = request_threaded_irq(irq: mhi_cntrl->irq[0], handler: mhi_intvec_handler, |
183 | thread_fn: mhi_intvec_threaded_handler, |
184 | flags: irq_flags, |
185 | name: "bhi" , dev: mhi_cntrl); |
186 | if (ret) |
187 | return ret; |
188 | /* |
189 | * IRQs should be enabled during mhi_async_power_up(), so disable them explicitly here. |
190 | * Due to the use of IRQF_SHARED flag as default while requesting IRQs, we assume that |
191 | * IRQ_NOAUTOEN is not applicable. |
192 | */ |
193 | disable_irq(irq: mhi_cntrl->irq[0]); |
194 | |
195 | for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { |
196 | if (mhi_event->offload_ev) |
197 | continue; |
198 | |
199 | if (mhi_event->irq >= mhi_cntrl->nr_irqs) { |
200 | dev_err(dev, "irq %d not available for event ring\n" , |
201 | mhi_event->irq); |
202 | ret = -EINVAL; |
203 | goto error_request; |
204 | } |
205 | |
206 | ret = request_irq(irq: mhi_cntrl->irq[mhi_event->irq], |
207 | handler: mhi_irq_handler, |
208 | flags: irq_flags, |
209 | name: "mhi" , dev: mhi_event); |
210 | if (ret) { |
211 | dev_err(dev, "Error requesting irq:%d for ev:%d\n" , |
212 | mhi_cntrl->irq[mhi_event->irq], i); |
213 | goto error_request; |
214 | } |
215 | |
216 | disable_irq(irq: mhi_cntrl->irq[mhi_event->irq]); |
217 | } |
218 | |
219 | return 0; |
220 | |
221 | error_request: |
222 | for (--i, --mhi_event; i >= 0; i--, mhi_event--) { |
223 | if (mhi_event->offload_ev) |
224 | continue; |
225 | |
226 | free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event); |
227 | } |
228 | free_irq(mhi_cntrl->irq[0], mhi_cntrl); |
229 | |
230 | return ret; |
231 | } |
232 | |
233 | void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl) |
234 | { |
235 | int i; |
236 | struct mhi_ctxt *mhi_ctxt = mhi_cntrl->mhi_ctxt; |
237 | struct mhi_cmd *mhi_cmd; |
238 | struct mhi_event *mhi_event; |
239 | struct mhi_ring *ring; |
240 | |
241 | mhi_cmd = mhi_cntrl->mhi_cmd; |
242 | for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++) { |
243 | ring = &mhi_cmd->ring; |
244 | dma_free_coherent(dev: mhi_cntrl->cntrl_dev, size: ring->alloc_size, |
245 | cpu_addr: ring->pre_aligned, dma_handle: ring->dma_handle); |
246 | ring->base = NULL; |
247 | ring->iommu_base = 0; |
248 | } |
249 | |
250 | dma_free_coherent(dev: mhi_cntrl->cntrl_dev, |
251 | size: sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS, |
252 | cpu_addr: mhi_ctxt->cmd_ctxt, dma_handle: mhi_ctxt->cmd_ctxt_addr); |
253 | |
254 | mhi_event = mhi_cntrl->mhi_event; |
255 | for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { |
256 | if (mhi_event->offload_ev) |
257 | continue; |
258 | |
259 | ring = &mhi_event->ring; |
260 | dma_free_coherent(dev: mhi_cntrl->cntrl_dev, size: ring->alloc_size, |
261 | cpu_addr: ring->pre_aligned, dma_handle: ring->dma_handle); |
262 | ring->base = NULL; |
263 | ring->iommu_base = 0; |
264 | } |
265 | |
266 | dma_free_coherent(dev: mhi_cntrl->cntrl_dev, size: sizeof(*mhi_ctxt->er_ctxt) * |
267 | mhi_cntrl->total_ev_rings, cpu_addr: mhi_ctxt->er_ctxt, |
268 | dma_handle: mhi_ctxt->er_ctxt_addr); |
269 | |
270 | dma_free_coherent(dev: mhi_cntrl->cntrl_dev, size: sizeof(*mhi_ctxt->chan_ctxt) * |
271 | mhi_cntrl->max_chan, cpu_addr: mhi_ctxt->chan_ctxt, |
272 | dma_handle: mhi_ctxt->chan_ctxt_addr); |
273 | |
274 | kfree(objp: mhi_ctxt); |
275 | mhi_cntrl->mhi_ctxt = NULL; |
276 | } |
277 | |
278 | int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl) |
279 | { |
280 | struct mhi_ctxt *mhi_ctxt; |
281 | struct mhi_chan_ctxt *chan_ctxt; |
282 | struct mhi_event_ctxt *er_ctxt; |
283 | struct mhi_cmd_ctxt *cmd_ctxt; |
284 | struct mhi_chan *mhi_chan; |
285 | struct mhi_event *mhi_event; |
286 | struct mhi_cmd *mhi_cmd; |
287 | u32 tmp; |
288 | int ret = -ENOMEM, i; |
289 | |
290 | atomic_set(v: &mhi_cntrl->dev_wake, i: 0); |
291 | atomic_set(v: &mhi_cntrl->pending_pkts, i: 0); |
292 | |
293 | mhi_ctxt = kzalloc(size: sizeof(*mhi_ctxt), GFP_KERNEL); |
294 | if (!mhi_ctxt) |
295 | return -ENOMEM; |
296 | |
297 | /* Setup channel ctxt */ |
298 | mhi_ctxt->chan_ctxt = dma_alloc_coherent(dev: mhi_cntrl->cntrl_dev, |
299 | size: sizeof(*mhi_ctxt->chan_ctxt) * |
300 | mhi_cntrl->max_chan, |
301 | dma_handle: &mhi_ctxt->chan_ctxt_addr, |
302 | GFP_KERNEL); |
303 | if (!mhi_ctxt->chan_ctxt) |
304 | goto error_alloc_chan_ctxt; |
305 | |
306 | mhi_chan = mhi_cntrl->mhi_chan; |
307 | chan_ctxt = mhi_ctxt->chan_ctxt; |
308 | for (i = 0; i < mhi_cntrl->max_chan; i++, chan_ctxt++, mhi_chan++) { |
309 | /* Skip if it is an offload channel */ |
310 | if (mhi_chan->offload_ch) |
311 | continue; |
312 | |
313 | tmp = le32_to_cpu(chan_ctxt->chcfg); |
314 | tmp &= ~CHAN_CTX_CHSTATE_MASK; |
315 | tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_DISABLED); |
316 | tmp &= ~CHAN_CTX_BRSTMODE_MASK; |
317 | tmp |= FIELD_PREP(CHAN_CTX_BRSTMODE_MASK, mhi_chan->db_cfg.brstmode); |
318 | tmp &= ~CHAN_CTX_POLLCFG_MASK; |
319 | tmp |= FIELD_PREP(CHAN_CTX_POLLCFG_MASK, mhi_chan->db_cfg.pollcfg); |
320 | chan_ctxt->chcfg = cpu_to_le32(tmp); |
321 | |
322 | chan_ctxt->chtype = cpu_to_le32(mhi_chan->type); |
323 | chan_ctxt->erindex = cpu_to_le32(mhi_chan->er_index); |
324 | |
325 | mhi_chan->ch_state = MHI_CH_STATE_DISABLED; |
326 | mhi_chan->tre_ring.db_addr = (void __iomem *)&chan_ctxt->wp; |
327 | } |
328 | |
329 | /* Setup event context */ |
330 | mhi_ctxt->er_ctxt = dma_alloc_coherent(dev: mhi_cntrl->cntrl_dev, |
331 | size: sizeof(*mhi_ctxt->er_ctxt) * |
332 | mhi_cntrl->total_ev_rings, |
333 | dma_handle: &mhi_ctxt->er_ctxt_addr, |
334 | GFP_KERNEL); |
335 | if (!mhi_ctxt->er_ctxt) |
336 | goto error_alloc_er_ctxt; |
337 | |
338 | er_ctxt = mhi_ctxt->er_ctxt; |
339 | mhi_event = mhi_cntrl->mhi_event; |
340 | for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++, |
341 | mhi_event++) { |
342 | struct mhi_ring *ring = &mhi_event->ring; |
343 | |
344 | /* Skip if it is an offload event */ |
345 | if (mhi_event->offload_ev) |
346 | continue; |
347 | |
348 | tmp = le32_to_cpu(er_ctxt->intmod); |
349 | tmp &= ~EV_CTX_INTMODC_MASK; |
350 | tmp &= ~EV_CTX_INTMODT_MASK; |
351 | tmp |= FIELD_PREP(EV_CTX_INTMODT_MASK, mhi_event->intmod); |
352 | er_ctxt->intmod = cpu_to_le32(tmp); |
353 | |
354 | er_ctxt->ertype = cpu_to_le32(MHI_ER_TYPE_VALID); |
355 | er_ctxt->msivec = cpu_to_le32(mhi_event->irq); |
356 | mhi_event->db_cfg.db_mode = true; |
357 | |
358 | ring->el_size = sizeof(struct mhi_ring_element); |
359 | ring->len = ring->el_size * ring->elements; |
360 | ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, len: ring->len); |
361 | if (ret) |
362 | goto error_alloc_er; |
363 | |
364 | /* |
365 | * If the read pointer equals to the write pointer, then the |
366 | * ring is empty |
367 | */ |
368 | ring->rp = ring->wp = ring->base; |
369 | er_ctxt->rbase = cpu_to_le64(ring->iommu_base); |
370 | er_ctxt->rp = er_ctxt->wp = er_ctxt->rbase; |
371 | er_ctxt->rlen = cpu_to_le64(ring->len); |
372 | ring->ctxt_wp = &er_ctxt->wp; |
373 | } |
374 | |
375 | /* Setup cmd context */ |
376 | ret = -ENOMEM; |
377 | mhi_ctxt->cmd_ctxt = dma_alloc_coherent(dev: mhi_cntrl->cntrl_dev, |
378 | size: sizeof(*mhi_ctxt->cmd_ctxt) * |
379 | NR_OF_CMD_RINGS, |
380 | dma_handle: &mhi_ctxt->cmd_ctxt_addr, |
381 | GFP_KERNEL); |
382 | if (!mhi_ctxt->cmd_ctxt) |
383 | goto error_alloc_er; |
384 | |
385 | mhi_cmd = mhi_cntrl->mhi_cmd; |
386 | cmd_ctxt = mhi_ctxt->cmd_ctxt; |
387 | for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) { |
388 | struct mhi_ring *ring = &mhi_cmd->ring; |
389 | |
390 | ring->el_size = sizeof(struct mhi_ring_element); |
391 | ring->elements = CMD_EL_PER_RING; |
392 | ring->len = ring->el_size * ring->elements; |
393 | ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, len: ring->len); |
394 | if (ret) |
395 | goto error_alloc_cmd; |
396 | |
397 | ring->rp = ring->wp = ring->base; |
398 | cmd_ctxt->rbase = cpu_to_le64(ring->iommu_base); |
399 | cmd_ctxt->rp = cmd_ctxt->wp = cmd_ctxt->rbase; |
400 | cmd_ctxt->rlen = cpu_to_le64(ring->len); |
401 | ring->ctxt_wp = &cmd_ctxt->wp; |
402 | } |
403 | |
404 | mhi_cntrl->mhi_ctxt = mhi_ctxt; |
405 | |
406 | return 0; |
407 | |
408 | error_alloc_cmd: |
409 | for (--i, --mhi_cmd; i >= 0; i--, mhi_cmd--) { |
410 | struct mhi_ring *ring = &mhi_cmd->ring; |
411 | |
412 | dma_free_coherent(dev: mhi_cntrl->cntrl_dev, size: ring->alloc_size, |
413 | cpu_addr: ring->pre_aligned, dma_handle: ring->dma_handle); |
414 | } |
415 | dma_free_coherent(dev: mhi_cntrl->cntrl_dev, |
416 | size: sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS, |
417 | cpu_addr: mhi_ctxt->cmd_ctxt, dma_handle: mhi_ctxt->cmd_ctxt_addr); |
418 | i = mhi_cntrl->total_ev_rings; |
419 | mhi_event = mhi_cntrl->mhi_event + i; |
420 | |
421 | error_alloc_er: |
422 | for (--i, --mhi_event; i >= 0; i--, mhi_event--) { |
423 | struct mhi_ring *ring = &mhi_event->ring; |
424 | |
425 | if (mhi_event->offload_ev) |
426 | continue; |
427 | |
428 | dma_free_coherent(dev: mhi_cntrl->cntrl_dev, size: ring->alloc_size, |
429 | cpu_addr: ring->pre_aligned, dma_handle: ring->dma_handle); |
430 | } |
431 | dma_free_coherent(dev: mhi_cntrl->cntrl_dev, size: sizeof(*mhi_ctxt->er_ctxt) * |
432 | mhi_cntrl->total_ev_rings, cpu_addr: mhi_ctxt->er_ctxt, |
433 | dma_handle: mhi_ctxt->er_ctxt_addr); |
434 | |
435 | error_alloc_er_ctxt: |
436 | dma_free_coherent(dev: mhi_cntrl->cntrl_dev, size: sizeof(*mhi_ctxt->chan_ctxt) * |
437 | mhi_cntrl->max_chan, cpu_addr: mhi_ctxt->chan_ctxt, |
438 | dma_handle: mhi_ctxt->chan_ctxt_addr); |
439 | |
440 | error_alloc_chan_ctxt: |
441 | kfree(objp: mhi_ctxt); |
442 | |
443 | return ret; |
444 | } |
445 | |
446 | int mhi_init_mmio(struct mhi_controller *mhi_cntrl) |
447 | { |
448 | u32 val; |
449 | int i, ret; |
450 | struct mhi_chan *mhi_chan; |
451 | struct mhi_event *mhi_event; |
452 | void __iomem *base = mhi_cntrl->regs; |
453 | struct device *dev = &mhi_cntrl->mhi_dev->dev; |
454 | struct { |
455 | u32 offset; |
456 | u32 val; |
457 | } reg_info[] = { |
458 | { |
459 | CCABAP_HIGHER, |
460 | upper_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr), |
461 | }, |
462 | { |
463 | CCABAP_LOWER, |
464 | lower_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr), |
465 | }, |
466 | { |
467 | ECABAP_HIGHER, |
468 | upper_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr), |
469 | }, |
470 | { |
471 | ECABAP_LOWER, |
472 | lower_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr), |
473 | }, |
474 | { |
475 | CRCBAP_HIGHER, |
476 | upper_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr), |
477 | }, |
478 | { |
479 | CRCBAP_LOWER, |
480 | lower_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr), |
481 | }, |
482 | { |
483 | MHICTRLBASE_HIGHER, |
484 | upper_32_bits(mhi_cntrl->iova_start), |
485 | }, |
486 | { |
487 | MHICTRLBASE_LOWER, |
488 | lower_32_bits(mhi_cntrl->iova_start), |
489 | }, |
490 | { |
491 | MHIDATABASE_HIGHER, |
492 | upper_32_bits(mhi_cntrl->iova_start), |
493 | }, |
494 | { |
495 | MHIDATABASE_LOWER, |
496 | lower_32_bits(mhi_cntrl->iova_start), |
497 | }, |
498 | { |
499 | MHICTRLLIMIT_HIGHER, |
500 | upper_32_bits(mhi_cntrl->iova_stop), |
501 | }, |
502 | { |
503 | MHICTRLLIMIT_LOWER, |
504 | lower_32_bits(mhi_cntrl->iova_stop), |
505 | }, |
506 | { |
507 | MHIDATALIMIT_HIGHER, |
508 | upper_32_bits(mhi_cntrl->iova_stop), |
509 | }, |
510 | { |
511 | MHIDATALIMIT_LOWER, |
512 | lower_32_bits(mhi_cntrl->iova_stop), |
513 | }, |
514 | {0, 0} |
515 | }; |
516 | |
517 | dev_dbg(dev, "Initializing MHI registers\n" ); |
518 | |
519 | /* Read channel db offset */ |
520 | ret = mhi_read_reg(mhi_cntrl, base, CHDBOFF, out: &val); |
521 | if (ret) { |
522 | dev_err(dev, "Unable to read CHDBOFF register\n" ); |
523 | return -EIO; |
524 | } |
525 | |
526 | if (val >= mhi_cntrl->reg_len - (8 * MHI_DEV_WAKE_DB)) { |
527 | dev_err(dev, "CHDB offset: 0x%x is out of range: 0x%zx\n" , |
528 | val, mhi_cntrl->reg_len - (8 * MHI_DEV_WAKE_DB)); |
529 | return -ERANGE; |
530 | } |
531 | |
532 | /* Setup wake db */ |
533 | mhi_cntrl->wake_db = base + val + (8 * MHI_DEV_WAKE_DB); |
534 | mhi_cntrl->wake_set = false; |
535 | |
536 | /* Setup channel db address for each channel in tre_ring */ |
537 | mhi_chan = mhi_cntrl->mhi_chan; |
538 | for (i = 0; i < mhi_cntrl->max_chan; i++, val += 8, mhi_chan++) |
539 | mhi_chan->tre_ring.db_addr = base + val; |
540 | |
541 | /* Read event ring db offset */ |
542 | ret = mhi_read_reg(mhi_cntrl, base, ERDBOFF, out: &val); |
543 | if (ret) { |
544 | dev_err(dev, "Unable to read ERDBOFF register\n" ); |
545 | return -EIO; |
546 | } |
547 | |
548 | if (val >= mhi_cntrl->reg_len - (8 * mhi_cntrl->total_ev_rings)) { |
549 | dev_err(dev, "ERDB offset: 0x%x is out of range: 0x%zx\n" , |
550 | val, mhi_cntrl->reg_len - (8 * mhi_cntrl->total_ev_rings)); |
551 | return -ERANGE; |
552 | } |
553 | |
554 | /* Setup event db address for each ev_ring */ |
555 | mhi_event = mhi_cntrl->mhi_event; |
556 | for (i = 0; i < mhi_cntrl->total_ev_rings; i++, val += 8, mhi_event++) { |
557 | if (mhi_event->offload_ev) |
558 | continue; |
559 | |
560 | mhi_event->ring.db_addr = base + val; |
561 | } |
562 | |
563 | /* Setup DB register for primary CMD rings */ |
564 | mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING].ring.db_addr = base + CRDB_LOWER; |
565 | |
566 | /* Write to MMIO registers */ |
567 | for (i = 0; reg_info[i].offset; i++) |
568 | mhi_write_reg(mhi_cntrl, base, offset: reg_info[i].offset, |
569 | val: reg_info[i].val); |
570 | |
571 | ret = mhi_write_reg_field(mhi_cntrl, base, MHICFG, MHICFG_NER_MASK, |
572 | val: mhi_cntrl->total_ev_rings); |
573 | if (ret) { |
574 | dev_err(dev, "Unable to write MHICFG register\n" ); |
575 | return ret; |
576 | } |
577 | |
578 | ret = mhi_write_reg_field(mhi_cntrl, base, MHICFG, MHICFG_NHWER_MASK, |
579 | val: mhi_cntrl->hw_ev_rings); |
580 | if (ret) { |
581 | dev_err(dev, "Unable to write MHICFG register\n" ); |
582 | return ret; |
583 | } |
584 | |
585 | return 0; |
586 | } |
587 | |
588 | void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl, |
589 | struct mhi_chan *mhi_chan) |
590 | { |
591 | struct mhi_ring *buf_ring; |
592 | struct mhi_ring *tre_ring; |
593 | struct mhi_chan_ctxt *chan_ctxt; |
594 | u32 tmp; |
595 | |
596 | buf_ring = &mhi_chan->buf_ring; |
597 | tre_ring = &mhi_chan->tre_ring; |
598 | chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan]; |
599 | |
600 | if (!chan_ctxt->rbase) /* Already uninitialized */ |
601 | return; |
602 | |
603 | dma_free_coherent(dev: mhi_cntrl->cntrl_dev, size: tre_ring->alloc_size, |
604 | cpu_addr: tre_ring->pre_aligned, dma_handle: tre_ring->dma_handle); |
605 | vfree(addr: buf_ring->base); |
606 | |
607 | buf_ring->base = tre_ring->base = NULL; |
608 | tre_ring->ctxt_wp = NULL; |
609 | chan_ctxt->rbase = 0; |
610 | chan_ctxt->rlen = 0; |
611 | chan_ctxt->rp = 0; |
612 | chan_ctxt->wp = 0; |
613 | |
614 | tmp = le32_to_cpu(chan_ctxt->chcfg); |
615 | tmp &= ~CHAN_CTX_CHSTATE_MASK; |
616 | tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_DISABLED); |
617 | chan_ctxt->chcfg = cpu_to_le32(tmp); |
618 | |
619 | /* Update to all cores */ |
620 | smp_wmb(); |
621 | } |
622 | |
623 | int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl, |
624 | struct mhi_chan *mhi_chan) |
625 | { |
626 | struct mhi_ring *buf_ring; |
627 | struct mhi_ring *tre_ring; |
628 | struct mhi_chan_ctxt *chan_ctxt; |
629 | u32 tmp; |
630 | int ret; |
631 | |
632 | buf_ring = &mhi_chan->buf_ring; |
633 | tre_ring = &mhi_chan->tre_ring; |
634 | tre_ring->el_size = sizeof(struct mhi_ring_element); |
635 | tre_ring->len = tre_ring->el_size * tre_ring->elements; |
636 | chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan]; |
637 | ret = mhi_alloc_aligned_ring(mhi_cntrl, ring: tre_ring, len: tre_ring->len); |
638 | if (ret) |
639 | return -ENOMEM; |
640 | |
641 | buf_ring->el_size = sizeof(struct mhi_buf_info); |
642 | buf_ring->len = buf_ring->el_size * buf_ring->elements; |
643 | buf_ring->base = vzalloc(size: buf_ring->len); |
644 | |
645 | if (!buf_ring->base) { |
646 | dma_free_coherent(dev: mhi_cntrl->cntrl_dev, size: tre_ring->alloc_size, |
647 | cpu_addr: tre_ring->pre_aligned, dma_handle: tre_ring->dma_handle); |
648 | return -ENOMEM; |
649 | } |
650 | |
651 | tmp = le32_to_cpu(chan_ctxt->chcfg); |
652 | tmp &= ~CHAN_CTX_CHSTATE_MASK; |
653 | tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_ENABLED); |
654 | chan_ctxt->chcfg = cpu_to_le32(tmp); |
655 | |
656 | chan_ctxt->rbase = cpu_to_le64(tre_ring->iommu_base); |
657 | chan_ctxt->rp = chan_ctxt->wp = chan_ctxt->rbase; |
658 | chan_ctxt->rlen = cpu_to_le64(tre_ring->len); |
659 | tre_ring->ctxt_wp = &chan_ctxt->wp; |
660 | |
661 | tre_ring->rp = tre_ring->wp = tre_ring->base; |
662 | buf_ring->rp = buf_ring->wp = buf_ring->base; |
663 | mhi_chan->db_cfg.db_mode = 1; |
664 | |
665 | /* Update to all cores */ |
666 | smp_wmb(); |
667 | |
668 | return 0; |
669 | } |
670 | |
671 | static int parse_ev_cfg(struct mhi_controller *mhi_cntrl, |
672 | const struct mhi_controller_config *config) |
673 | { |
674 | struct mhi_event *mhi_event; |
675 | const struct mhi_event_config *event_cfg; |
676 | struct device *dev = mhi_cntrl->cntrl_dev; |
677 | int i, num; |
678 | |
679 | num = config->num_events; |
680 | mhi_cntrl->total_ev_rings = num; |
681 | mhi_cntrl->mhi_event = kcalloc(n: num, size: sizeof(*mhi_cntrl->mhi_event), |
682 | GFP_KERNEL); |
683 | if (!mhi_cntrl->mhi_event) |
684 | return -ENOMEM; |
685 | |
686 | /* Populate event ring */ |
687 | mhi_event = mhi_cntrl->mhi_event; |
688 | for (i = 0; i < num; i++) { |
689 | event_cfg = &config->event_cfg[i]; |
690 | |
691 | mhi_event->er_index = i; |
692 | mhi_event->ring.elements = event_cfg->num_elements; |
693 | mhi_event->intmod = event_cfg->irq_moderation_ms; |
694 | mhi_event->irq = event_cfg->irq; |
695 | |
696 | if (event_cfg->channel != U32_MAX) { |
697 | /* This event ring has a dedicated channel */ |
698 | mhi_event->chan = event_cfg->channel; |
699 | if (mhi_event->chan >= mhi_cntrl->max_chan) { |
700 | dev_err(dev, |
701 | "Event Ring channel not available\n" ); |
702 | goto error_ev_cfg; |
703 | } |
704 | |
705 | mhi_event->mhi_chan = |
706 | &mhi_cntrl->mhi_chan[mhi_event->chan]; |
707 | } |
708 | |
709 | /* Priority is fixed to 1 for now */ |
710 | mhi_event->priority = 1; |
711 | |
712 | mhi_event->db_cfg.brstmode = event_cfg->mode; |
713 | if (MHI_INVALID_BRSTMODE(mhi_event->db_cfg.brstmode)) |
714 | goto error_ev_cfg; |
715 | |
716 | if (mhi_event->db_cfg.brstmode == MHI_DB_BRST_ENABLE) |
717 | mhi_event->db_cfg.process_db = mhi_db_brstmode; |
718 | else |
719 | mhi_event->db_cfg.process_db = mhi_db_brstmode_disable; |
720 | |
721 | mhi_event->data_type = event_cfg->data_type; |
722 | |
723 | switch (mhi_event->data_type) { |
724 | case MHI_ER_DATA: |
725 | mhi_event->process_event = mhi_process_data_event_ring; |
726 | break; |
727 | case MHI_ER_CTRL: |
728 | mhi_event->process_event = mhi_process_ctrl_ev_ring; |
729 | break; |
730 | default: |
731 | dev_err(dev, "Event Ring type not supported\n" ); |
732 | goto error_ev_cfg; |
733 | } |
734 | |
735 | mhi_event->hw_ring = event_cfg->hardware_event; |
736 | if (mhi_event->hw_ring) |
737 | mhi_cntrl->hw_ev_rings++; |
738 | else |
739 | mhi_cntrl->sw_ev_rings++; |
740 | |
741 | mhi_event->cl_manage = event_cfg->client_managed; |
742 | mhi_event->offload_ev = event_cfg->offload_channel; |
743 | mhi_event++; |
744 | } |
745 | |
746 | return 0; |
747 | |
748 | error_ev_cfg: |
749 | |
750 | kfree(objp: mhi_cntrl->mhi_event); |
751 | return -EINVAL; |
752 | } |
753 | |
754 | static int parse_ch_cfg(struct mhi_controller *mhi_cntrl, |
755 | const struct mhi_controller_config *config) |
756 | { |
757 | const struct mhi_channel_config *ch_cfg; |
758 | struct device *dev = mhi_cntrl->cntrl_dev; |
759 | int i; |
760 | u32 chan; |
761 | |
762 | mhi_cntrl->max_chan = config->max_channels; |
763 | |
764 | /* |
765 | * The allocation of MHI channels can exceed 32KB in some scenarios, |
766 | * so to avoid any memory possible allocation failures, vzalloc is |
767 | * used here |
768 | */ |
769 | mhi_cntrl->mhi_chan = vcalloc(n: mhi_cntrl->max_chan, |
770 | size: sizeof(*mhi_cntrl->mhi_chan)); |
771 | if (!mhi_cntrl->mhi_chan) |
772 | return -ENOMEM; |
773 | |
774 | INIT_LIST_HEAD(list: &mhi_cntrl->lpm_chans); |
775 | |
776 | /* Populate channel configurations */ |
777 | for (i = 0; i < config->num_channels; i++) { |
778 | struct mhi_chan *mhi_chan; |
779 | |
780 | ch_cfg = &config->ch_cfg[i]; |
781 | |
782 | chan = ch_cfg->num; |
783 | if (chan >= mhi_cntrl->max_chan) { |
784 | dev_err(dev, "Channel %d not available\n" , chan); |
785 | goto error_chan_cfg; |
786 | } |
787 | |
788 | mhi_chan = &mhi_cntrl->mhi_chan[chan]; |
789 | mhi_chan->name = ch_cfg->name; |
790 | mhi_chan->chan = chan; |
791 | |
792 | mhi_chan->tre_ring.elements = ch_cfg->num_elements; |
793 | if (!mhi_chan->tre_ring.elements) |
794 | goto error_chan_cfg; |
795 | |
796 | /* |
797 | * For some channels, local ring length should be bigger than |
798 | * the transfer ring length due to internal logical channels |
799 | * in device. So host can queue much more buffers than transfer |
800 | * ring length. Example, RSC channels should have a larger local |
801 | * channel length than transfer ring length. |
802 | */ |
803 | mhi_chan->buf_ring.elements = ch_cfg->local_elements; |
804 | if (!mhi_chan->buf_ring.elements) |
805 | mhi_chan->buf_ring.elements = mhi_chan->tre_ring.elements; |
806 | mhi_chan->er_index = ch_cfg->event_ring; |
807 | mhi_chan->dir = ch_cfg->dir; |
808 | |
809 | /* |
810 | * For most channels, chtype is identical to channel directions. |
811 | * So, if it is not defined then assign channel direction to |
812 | * chtype |
813 | */ |
814 | mhi_chan->type = ch_cfg->type; |
815 | if (!mhi_chan->type) |
816 | mhi_chan->type = (enum mhi_ch_type)mhi_chan->dir; |
817 | |
818 | mhi_chan->ee_mask = ch_cfg->ee_mask; |
819 | mhi_chan->db_cfg.pollcfg = ch_cfg->pollcfg; |
820 | mhi_chan->lpm_notify = ch_cfg->lpm_notify; |
821 | mhi_chan->offload_ch = ch_cfg->offload_channel; |
822 | mhi_chan->db_cfg.reset_req = ch_cfg->doorbell_mode_switch; |
823 | mhi_chan->pre_alloc = ch_cfg->auto_queue; |
824 | mhi_chan->wake_capable = ch_cfg->wake_capable; |
825 | |
826 | /* |
827 | * If MHI host allocates buffers, then the channel direction |
828 | * should be DMA_FROM_DEVICE |
829 | */ |
830 | if (mhi_chan->pre_alloc && mhi_chan->dir != DMA_FROM_DEVICE) { |
831 | dev_err(dev, "Invalid channel configuration\n" ); |
832 | goto error_chan_cfg; |
833 | } |
834 | |
835 | /* |
836 | * Bi-directional and direction less channel must be an |
837 | * offload channel |
838 | */ |
839 | if ((mhi_chan->dir == DMA_BIDIRECTIONAL || |
840 | mhi_chan->dir == DMA_NONE) && !mhi_chan->offload_ch) { |
841 | dev_err(dev, "Invalid channel configuration\n" ); |
842 | goto error_chan_cfg; |
843 | } |
844 | |
845 | if (!mhi_chan->offload_ch) { |
846 | mhi_chan->db_cfg.brstmode = ch_cfg->doorbell; |
847 | if (MHI_INVALID_BRSTMODE(mhi_chan->db_cfg.brstmode)) { |
848 | dev_err(dev, "Invalid Door bell mode\n" ); |
849 | goto error_chan_cfg; |
850 | } |
851 | } |
852 | |
853 | if (mhi_chan->db_cfg.brstmode == MHI_DB_BRST_ENABLE) |
854 | mhi_chan->db_cfg.process_db = mhi_db_brstmode; |
855 | else |
856 | mhi_chan->db_cfg.process_db = mhi_db_brstmode_disable; |
857 | |
858 | mhi_chan->configured = true; |
859 | |
860 | if (mhi_chan->lpm_notify) |
861 | list_add_tail(new: &mhi_chan->node, head: &mhi_cntrl->lpm_chans); |
862 | } |
863 | |
864 | return 0; |
865 | |
866 | error_chan_cfg: |
867 | vfree(addr: mhi_cntrl->mhi_chan); |
868 | |
869 | return -EINVAL; |
870 | } |
871 | |
872 | static int parse_config(struct mhi_controller *mhi_cntrl, |
873 | const struct mhi_controller_config *config) |
874 | { |
875 | int ret; |
876 | |
877 | /* Parse MHI channel configuration */ |
878 | ret = parse_ch_cfg(mhi_cntrl, config); |
879 | if (ret) |
880 | return ret; |
881 | |
882 | /* Parse MHI event configuration */ |
883 | ret = parse_ev_cfg(mhi_cntrl, config); |
884 | if (ret) |
885 | goto error_ev_cfg; |
886 | |
887 | mhi_cntrl->timeout_ms = config->timeout_ms; |
888 | if (!mhi_cntrl->timeout_ms) |
889 | mhi_cntrl->timeout_ms = MHI_TIMEOUT_MS; |
890 | |
891 | mhi_cntrl->ready_timeout_ms = config->ready_timeout_ms; |
892 | mhi_cntrl->bounce_buf = config->use_bounce_buf; |
893 | mhi_cntrl->buffer_len = config->buf_len; |
894 | if (!mhi_cntrl->buffer_len) |
895 | mhi_cntrl->buffer_len = MHI_MAX_MTU; |
896 | |
897 | /* By default, host is allowed to ring DB in both M0 and M2 states */ |
898 | mhi_cntrl->db_access = MHI_PM_M0 | MHI_PM_M2; |
899 | if (config->m2_no_db) |
900 | mhi_cntrl->db_access &= ~MHI_PM_M2; |
901 | |
902 | return 0; |
903 | |
904 | error_ev_cfg: |
905 | vfree(addr: mhi_cntrl->mhi_chan); |
906 | |
907 | return ret; |
908 | } |
909 | |
910 | int mhi_register_controller(struct mhi_controller *mhi_cntrl, |
911 | const struct mhi_controller_config *config) |
912 | { |
913 | struct mhi_event *mhi_event; |
914 | struct mhi_chan *mhi_chan; |
915 | struct mhi_cmd *mhi_cmd; |
916 | struct mhi_device *mhi_dev; |
917 | int ret, i; |
918 | |
919 | if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->regs || |
920 | !mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put || |
921 | !mhi_cntrl->status_cb || !mhi_cntrl->read_reg || |
922 | !mhi_cntrl->write_reg || !mhi_cntrl->nr_irqs || |
923 | !mhi_cntrl->irq || !mhi_cntrl->reg_len) |
924 | return -EINVAL; |
925 | |
926 | ret = parse_config(mhi_cntrl, config); |
927 | if (ret) |
928 | return -EINVAL; |
929 | |
930 | mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS, |
931 | size: sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL); |
932 | if (!mhi_cntrl->mhi_cmd) { |
933 | ret = -ENOMEM; |
934 | goto err_free_event; |
935 | } |
936 | |
937 | INIT_LIST_HEAD(list: &mhi_cntrl->transition_list); |
938 | mutex_init(&mhi_cntrl->pm_mutex); |
939 | rwlock_init(&mhi_cntrl->pm_lock); |
940 | spin_lock_init(&mhi_cntrl->transition_lock); |
941 | spin_lock_init(&mhi_cntrl->wlock); |
942 | INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker); |
943 | init_waitqueue_head(&mhi_cntrl->state_event); |
944 | |
945 | mhi_cntrl->hiprio_wq = alloc_ordered_workqueue("mhi_hiprio_wq" , WQ_HIGHPRI); |
946 | if (!mhi_cntrl->hiprio_wq) { |
947 | dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate workqueue\n" ); |
948 | ret = -ENOMEM; |
949 | goto err_free_cmd; |
950 | } |
951 | |
952 | mhi_cmd = mhi_cntrl->mhi_cmd; |
953 | for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++) |
954 | spin_lock_init(&mhi_cmd->lock); |
955 | |
956 | mhi_event = mhi_cntrl->mhi_event; |
957 | for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { |
958 | /* Skip for offload events */ |
959 | if (mhi_event->offload_ev) |
960 | continue; |
961 | |
962 | mhi_event->mhi_cntrl = mhi_cntrl; |
963 | spin_lock_init(&mhi_event->lock); |
964 | if (mhi_event->data_type == MHI_ER_CTRL) |
965 | tasklet_init(t: &mhi_event->task, func: mhi_ctrl_ev_task, |
966 | data: (ulong)mhi_event); |
967 | else |
968 | tasklet_init(t: &mhi_event->task, func: mhi_ev_task, |
969 | data: (ulong)mhi_event); |
970 | } |
971 | |
972 | mhi_chan = mhi_cntrl->mhi_chan; |
973 | for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { |
974 | mutex_init(&mhi_chan->mutex); |
975 | init_completion(x: &mhi_chan->completion); |
976 | rwlock_init(&mhi_chan->lock); |
977 | |
978 | /* used in setting bei field of TRE */ |
979 | mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index]; |
980 | mhi_chan->intmod = mhi_event->intmod; |
981 | } |
982 | |
983 | if (mhi_cntrl->bounce_buf) { |
984 | mhi_cntrl->map_single = mhi_map_single_use_bb; |
985 | mhi_cntrl->unmap_single = mhi_unmap_single_use_bb; |
986 | } else { |
987 | mhi_cntrl->map_single = mhi_map_single_no_bb; |
988 | mhi_cntrl->unmap_single = mhi_unmap_single_no_bb; |
989 | } |
990 | |
991 | mhi_cntrl->index = ida_alloc(ida: &mhi_controller_ida, GFP_KERNEL); |
992 | if (mhi_cntrl->index < 0) { |
993 | ret = mhi_cntrl->index; |
994 | goto err_destroy_wq; |
995 | } |
996 | |
997 | ret = mhi_init_irq_setup(mhi_cntrl); |
998 | if (ret) |
999 | goto err_ida_free; |
1000 | |
1001 | /* Register controller with MHI bus */ |
1002 | mhi_dev = mhi_alloc_device(mhi_cntrl); |
1003 | if (IS_ERR(ptr: mhi_dev)) { |
1004 | dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate MHI device\n" ); |
1005 | ret = PTR_ERR(ptr: mhi_dev); |
1006 | goto error_setup_irq; |
1007 | } |
1008 | |
1009 | mhi_dev->dev_type = MHI_DEVICE_CONTROLLER; |
1010 | mhi_dev->mhi_cntrl = mhi_cntrl; |
1011 | dev_set_name(dev: &mhi_dev->dev, name: "mhi%d" , mhi_cntrl->index); |
1012 | mhi_dev->name = dev_name(dev: &mhi_dev->dev); |
1013 | |
1014 | /* Init wakeup source */ |
1015 | device_init_wakeup(dev: &mhi_dev->dev, enable: true); |
1016 | |
1017 | ret = device_add(dev: &mhi_dev->dev); |
1018 | if (ret) |
1019 | goto err_release_dev; |
1020 | |
1021 | mhi_cntrl->mhi_dev = mhi_dev; |
1022 | |
1023 | mhi_create_debugfs(mhi_cntrl); |
1024 | |
1025 | return 0; |
1026 | |
1027 | err_release_dev: |
1028 | put_device(dev: &mhi_dev->dev); |
1029 | error_setup_irq: |
1030 | mhi_deinit_free_irq(mhi_cntrl); |
1031 | err_ida_free: |
1032 | ida_free(&mhi_controller_ida, id: mhi_cntrl->index); |
1033 | err_destroy_wq: |
1034 | destroy_workqueue(wq: mhi_cntrl->hiprio_wq); |
1035 | err_free_cmd: |
1036 | kfree(objp: mhi_cntrl->mhi_cmd); |
1037 | err_free_event: |
1038 | kfree(objp: mhi_cntrl->mhi_event); |
1039 | vfree(addr: mhi_cntrl->mhi_chan); |
1040 | |
1041 | return ret; |
1042 | } |
1043 | EXPORT_SYMBOL_GPL(mhi_register_controller); |
1044 | |
1045 | void mhi_unregister_controller(struct mhi_controller *mhi_cntrl) |
1046 | { |
1047 | struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev; |
1048 | struct mhi_chan *mhi_chan = mhi_cntrl->mhi_chan; |
1049 | unsigned int i; |
1050 | |
1051 | mhi_deinit_free_irq(mhi_cntrl); |
1052 | mhi_destroy_debugfs(mhi_cntrl); |
1053 | |
1054 | destroy_workqueue(wq: mhi_cntrl->hiprio_wq); |
1055 | kfree(objp: mhi_cntrl->mhi_cmd); |
1056 | kfree(objp: mhi_cntrl->mhi_event); |
1057 | |
1058 | /* Drop the references to MHI devices created for channels */ |
1059 | for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { |
1060 | if (!mhi_chan->mhi_dev) |
1061 | continue; |
1062 | |
1063 | put_device(dev: &mhi_chan->mhi_dev->dev); |
1064 | } |
1065 | vfree(addr: mhi_cntrl->mhi_chan); |
1066 | |
1067 | device_del(dev: &mhi_dev->dev); |
1068 | put_device(dev: &mhi_dev->dev); |
1069 | |
1070 | ida_free(&mhi_controller_ida, id: mhi_cntrl->index); |
1071 | } |
1072 | EXPORT_SYMBOL_GPL(mhi_unregister_controller); |
1073 | |
1074 | struct mhi_controller *mhi_alloc_controller(void) |
1075 | { |
1076 | struct mhi_controller *mhi_cntrl; |
1077 | |
1078 | mhi_cntrl = kzalloc(size: sizeof(*mhi_cntrl), GFP_KERNEL); |
1079 | |
1080 | return mhi_cntrl; |
1081 | } |
1082 | EXPORT_SYMBOL_GPL(mhi_alloc_controller); |
1083 | |
1084 | void mhi_free_controller(struct mhi_controller *mhi_cntrl) |
1085 | { |
1086 | kfree(objp: mhi_cntrl); |
1087 | } |
1088 | EXPORT_SYMBOL_GPL(mhi_free_controller); |
1089 | |
1090 | int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl) |
1091 | { |
1092 | struct device *dev = &mhi_cntrl->mhi_dev->dev; |
1093 | u32 bhi_off, bhie_off; |
1094 | int ret; |
1095 | |
1096 | mutex_lock(&mhi_cntrl->pm_mutex); |
1097 | |
1098 | ret = mhi_init_dev_ctxt(mhi_cntrl); |
1099 | if (ret) |
1100 | goto error_dev_ctxt; |
1101 | |
1102 | ret = mhi_read_reg(mhi_cntrl, base: mhi_cntrl->regs, BHIOFF, out: &bhi_off); |
1103 | if (ret) { |
1104 | dev_err(dev, "Error getting BHI offset\n" ); |
1105 | goto error_reg_offset; |
1106 | } |
1107 | |
1108 | if (bhi_off >= mhi_cntrl->reg_len) { |
1109 | dev_err(dev, "BHI offset: 0x%x is out of range: 0x%zx\n" , |
1110 | bhi_off, mhi_cntrl->reg_len); |
1111 | ret = -ERANGE; |
1112 | goto error_reg_offset; |
1113 | } |
1114 | mhi_cntrl->bhi = mhi_cntrl->regs + bhi_off; |
1115 | |
1116 | if (mhi_cntrl->fbc_download || mhi_cntrl->rddm_size) { |
1117 | ret = mhi_read_reg(mhi_cntrl, base: mhi_cntrl->regs, BHIEOFF, |
1118 | out: &bhie_off); |
1119 | if (ret) { |
1120 | dev_err(dev, "Error getting BHIE offset\n" ); |
1121 | goto error_reg_offset; |
1122 | } |
1123 | |
1124 | if (bhie_off >= mhi_cntrl->reg_len) { |
1125 | dev_err(dev, |
1126 | "BHIe offset: 0x%x is out of range: 0x%zx\n" , |
1127 | bhie_off, mhi_cntrl->reg_len); |
1128 | ret = -ERANGE; |
1129 | goto error_reg_offset; |
1130 | } |
1131 | mhi_cntrl->bhie = mhi_cntrl->regs + bhie_off; |
1132 | } |
1133 | |
1134 | if (mhi_cntrl->rddm_size) { |
1135 | /* |
1136 | * This controller supports RDDM, so we need to manually clear |
1137 | * BHIE RX registers since POR values are undefined. |
1138 | */ |
1139 | memset_io(mhi_cntrl->bhie + BHIE_RXVECADDR_LOW_OFFS, |
1140 | 0, BHIE_RXVECSTATUS_OFFS - BHIE_RXVECADDR_LOW_OFFS + |
1141 | 4); |
1142 | /* |
1143 | * Allocate RDDM table for debugging purpose if specified |
1144 | */ |
1145 | mhi_alloc_bhie_table(mhi_cntrl, image_info: &mhi_cntrl->rddm_image, |
1146 | alloc_size: mhi_cntrl->rddm_size); |
1147 | if (mhi_cntrl->rddm_image) { |
1148 | ret = mhi_rddm_prepare(mhi_cntrl, |
1149 | img_info: mhi_cntrl->rddm_image); |
1150 | if (ret) { |
1151 | mhi_free_bhie_table(mhi_cntrl, |
1152 | image_info: mhi_cntrl->rddm_image); |
1153 | goto error_reg_offset; |
1154 | } |
1155 | } |
1156 | } |
1157 | |
1158 | mutex_unlock(lock: &mhi_cntrl->pm_mutex); |
1159 | |
1160 | return 0; |
1161 | |
1162 | error_reg_offset: |
1163 | mhi_deinit_dev_ctxt(mhi_cntrl); |
1164 | |
1165 | error_dev_ctxt: |
1166 | mutex_unlock(lock: &mhi_cntrl->pm_mutex); |
1167 | |
1168 | return ret; |
1169 | } |
1170 | EXPORT_SYMBOL_GPL(mhi_prepare_for_power_up); |
1171 | |
1172 | void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl) |
1173 | { |
1174 | if (mhi_cntrl->fbc_image) { |
1175 | mhi_free_bhie_table(mhi_cntrl, image_info: mhi_cntrl->fbc_image); |
1176 | mhi_cntrl->fbc_image = NULL; |
1177 | } |
1178 | |
1179 | if (mhi_cntrl->rddm_image) { |
1180 | mhi_free_bhie_table(mhi_cntrl, image_info: mhi_cntrl->rddm_image); |
1181 | mhi_cntrl->rddm_image = NULL; |
1182 | } |
1183 | |
1184 | mhi_cntrl->bhi = NULL; |
1185 | mhi_cntrl->bhie = NULL; |
1186 | |
1187 | mhi_deinit_dev_ctxt(mhi_cntrl); |
1188 | } |
1189 | EXPORT_SYMBOL_GPL(mhi_unprepare_after_power_down); |
1190 | |
1191 | static void mhi_release_device(struct device *dev) |
1192 | { |
1193 | struct mhi_device *mhi_dev = to_mhi_device(dev); |
1194 | |
1195 | /* |
1196 | * We need to set the mhi_chan->mhi_dev to NULL here since the MHI |
1197 | * devices for the channels will only get created if the mhi_dev |
1198 | * associated with it is NULL. This scenario will happen during the |
1199 | * controller suspend and resume. |
1200 | */ |
1201 | if (mhi_dev->ul_chan) |
1202 | mhi_dev->ul_chan->mhi_dev = NULL; |
1203 | |
1204 | if (mhi_dev->dl_chan) |
1205 | mhi_dev->dl_chan->mhi_dev = NULL; |
1206 | |
1207 | kfree(objp: mhi_dev); |
1208 | } |
1209 | |
1210 | struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl) |
1211 | { |
1212 | struct mhi_device *mhi_dev; |
1213 | struct device *dev; |
1214 | |
1215 | mhi_dev = kzalloc(size: sizeof(*mhi_dev), GFP_KERNEL); |
1216 | if (!mhi_dev) |
1217 | return ERR_PTR(error: -ENOMEM); |
1218 | |
1219 | dev = &mhi_dev->dev; |
1220 | device_initialize(dev); |
1221 | dev->bus = &mhi_bus_type; |
1222 | dev->release = mhi_release_device; |
1223 | |
1224 | if (mhi_cntrl->mhi_dev) { |
1225 | /* for MHI client devices, parent is the MHI controller device */ |
1226 | dev->parent = &mhi_cntrl->mhi_dev->dev; |
1227 | } else { |
1228 | /* for MHI controller device, parent is the bus device (e.g. pci device) */ |
1229 | dev->parent = mhi_cntrl->cntrl_dev; |
1230 | } |
1231 | |
1232 | mhi_dev->mhi_cntrl = mhi_cntrl; |
1233 | mhi_dev->dev_wake = 0; |
1234 | |
1235 | return mhi_dev; |
1236 | } |
1237 | |
1238 | static int mhi_driver_probe(struct device *dev) |
1239 | { |
1240 | struct mhi_device *mhi_dev = to_mhi_device(dev); |
1241 | struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; |
1242 | struct device_driver *drv = dev->driver; |
1243 | struct mhi_driver *mhi_drv = to_mhi_driver(drv); |
1244 | struct mhi_event *mhi_event; |
1245 | struct mhi_chan *ul_chan = mhi_dev->ul_chan; |
1246 | struct mhi_chan *dl_chan = mhi_dev->dl_chan; |
1247 | int ret; |
1248 | |
1249 | /* Bring device out of LPM */ |
1250 | ret = mhi_device_get_sync(mhi_dev); |
1251 | if (ret) |
1252 | return ret; |
1253 | |
1254 | ret = -EINVAL; |
1255 | |
1256 | if (ul_chan) { |
1257 | /* |
1258 | * If channel supports LPM notifications then status_cb should |
1259 | * be provided |
1260 | */ |
1261 | if (ul_chan->lpm_notify && !mhi_drv->status_cb) |
1262 | goto exit_probe; |
1263 | |
1264 | /* For non-offload channels then xfer_cb should be provided */ |
1265 | if (!ul_chan->offload_ch && !mhi_drv->ul_xfer_cb) |
1266 | goto exit_probe; |
1267 | |
1268 | ul_chan->xfer_cb = mhi_drv->ul_xfer_cb; |
1269 | } |
1270 | |
1271 | ret = -EINVAL; |
1272 | if (dl_chan) { |
1273 | /* |
1274 | * If channel supports LPM notifications then status_cb should |
1275 | * be provided |
1276 | */ |
1277 | if (dl_chan->lpm_notify && !mhi_drv->status_cb) |
1278 | goto exit_probe; |
1279 | |
1280 | /* For non-offload channels then xfer_cb should be provided */ |
1281 | if (!dl_chan->offload_ch && !mhi_drv->dl_xfer_cb) |
1282 | goto exit_probe; |
1283 | |
1284 | mhi_event = &mhi_cntrl->mhi_event[dl_chan->er_index]; |
1285 | |
1286 | /* |
1287 | * If the channel event ring is managed by client, then |
1288 | * status_cb must be provided so that the framework can |
1289 | * notify pending data |
1290 | */ |
1291 | if (mhi_event->cl_manage && !mhi_drv->status_cb) |
1292 | goto exit_probe; |
1293 | |
1294 | dl_chan->xfer_cb = mhi_drv->dl_xfer_cb; |
1295 | } |
1296 | |
1297 | /* Call the user provided probe function */ |
1298 | ret = mhi_drv->probe(mhi_dev, mhi_dev->id); |
1299 | if (ret) |
1300 | goto exit_probe; |
1301 | |
1302 | mhi_device_put(mhi_dev); |
1303 | |
1304 | return ret; |
1305 | |
1306 | exit_probe: |
1307 | mhi_unprepare_from_transfer(mhi_dev); |
1308 | |
1309 | mhi_device_put(mhi_dev); |
1310 | |
1311 | return ret; |
1312 | } |
1313 | |
1314 | static int mhi_driver_remove(struct device *dev) |
1315 | { |
1316 | struct mhi_device *mhi_dev = to_mhi_device(dev); |
1317 | struct mhi_driver *mhi_drv = to_mhi_driver(dev->driver); |
1318 | struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; |
1319 | struct mhi_chan *mhi_chan; |
1320 | enum mhi_ch_state ch_state[] = { |
1321 | MHI_CH_STATE_DISABLED, |
1322 | MHI_CH_STATE_DISABLED |
1323 | }; |
1324 | int dir; |
1325 | |
1326 | /* Skip if it is a controller device */ |
1327 | if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) |
1328 | return 0; |
1329 | |
1330 | /* Reset both channels */ |
1331 | for (dir = 0; dir < 2; dir++) { |
1332 | mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; |
1333 | |
1334 | if (!mhi_chan) |
1335 | continue; |
1336 | |
1337 | /* Wake all threads waiting for completion */ |
1338 | write_lock_irq(&mhi_chan->lock); |
1339 | mhi_chan->ccs = MHI_EV_CC_INVALID; |
1340 | complete_all(&mhi_chan->completion); |
1341 | write_unlock_irq(&mhi_chan->lock); |
1342 | |
1343 | /* Set the channel state to disabled */ |
1344 | mutex_lock(&mhi_chan->mutex); |
1345 | write_lock_irq(&mhi_chan->lock); |
1346 | ch_state[dir] = mhi_chan->ch_state; |
1347 | mhi_chan->ch_state = MHI_CH_STATE_SUSPENDED; |
1348 | write_unlock_irq(&mhi_chan->lock); |
1349 | |
1350 | /* Reset the non-offload channel */ |
1351 | if (!mhi_chan->offload_ch) |
1352 | mhi_reset_chan(mhi_cntrl, mhi_chan); |
1353 | |
1354 | mutex_unlock(lock: &mhi_chan->mutex); |
1355 | } |
1356 | |
1357 | mhi_drv->remove(mhi_dev); |
1358 | |
1359 | /* De-init channel if it was enabled */ |
1360 | for (dir = 0; dir < 2; dir++) { |
1361 | mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; |
1362 | |
1363 | if (!mhi_chan) |
1364 | continue; |
1365 | |
1366 | mutex_lock(&mhi_chan->mutex); |
1367 | |
1368 | if ((ch_state[dir] == MHI_CH_STATE_ENABLED || |
1369 | ch_state[dir] == MHI_CH_STATE_STOP) && |
1370 | !mhi_chan->offload_ch) |
1371 | mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan); |
1372 | |
1373 | mhi_chan->ch_state = MHI_CH_STATE_DISABLED; |
1374 | |
1375 | mutex_unlock(lock: &mhi_chan->mutex); |
1376 | } |
1377 | |
1378 | while (mhi_dev->dev_wake) |
1379 | mhi_device_put(mhi_dev); |
1380 | |
1381 | return 0; |
1382 | } |
1383 | |
1384 | int __mhi_driver_register(struct mhi_driver *mhi_drv, struct module *owner) |
1385 | { |
1386 | struct device_driver *driver = &mhi_drv->driver; |
1387 | |
1388 | if (!mhi_drv->probe || !mhi_drv->remove) |
1389 | return -EINVAL; |
1390 | |
1391 | driver->bus = &mhi_bus_type; |
1392 | driver->owner = owner; |
1393 | driver->probe = mhi_driver_probe; |
1394 | driver->remove = mhi_driver_remove; |
1395 | |
1396 | return driver_register(drv: driver); |
1397 | } |
1398 | EXPORT_SYMBOL_GPL(__mhi_driver_register); |
1399 | |
1400 | void mhi_driver_unregister(struct mhi_driver *mhi_drv) |
1401 | { |
1402 | driver_unregister(drv: &mhi_drv->driver); |
1403 | } |
1404 | EXPORT_SYMBOL_GPL(mhi_driver_unregister); |
1405 | |
1406 | static int mhi_uevent(const struct device *dev, struct kobj_uevent_env *env) |
1407 | { |
1408 | const struct mhi_device *mhi_dev = to_mhi_device(dev); |
1409 | |
1410 | return add_uevent_var(env, format: "MODALIAS=" MHI_DEVICE_MODALIAS_FMT, |
1411 | mhi_dev->name); |
1412 | } |
1413 | |
1414 | static int mhi_match(struct device *dev, struct device_driver *drv) |
1415 | { |
1416 | struct mhi_device *mhi_dev = to_mhi_device(dev); |
1417 | struct mhi_driver *mhi_drv = to_mhi_driver(drv); |
1418 | const struct mhi_device_id *id; |
1419 | |
1420 | /* |
1421 | * If the device is a controller type then there is no client driver |
1422 | * associated with it |
1423 | */ |
1424 | if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) |
1425 | return 0; |
1426 | |
1427 | for (id = mhi_drv->id_table; id->chan[0]; id++) |
1428 | if (!strcmp(mhi_dev->name, id->chan)) { |
1429 | mhi_dev->id = id; |
1430 | return 1; |
1431 | } |
1432 | |
1433 | return 0; |
1434 | }; |
1435 | |
1436 | struct bus_type mhi_bus_type = { |
1437 | .name = "mhi" , |
1438 | .dev_name = "mhi" , |
1439 | .match = mhi_match, |
1440 | .uevent = mhi_uevent, |
1441 | .dev_groups = mhi_dev_groups, |
1442 | }; |
1443 | |
1444 | static int __init mhi_init(void) |
1445 | { |
1446 | mhi_debugfs_init(); |
1447 | return bus_register(bus: &mhi_bus_type); |
1448 | } |
1449 | |
1450 | static void __exit mhi_exit(void) |
1451 | { |
1452 | mhi_debugfs_exit(); |
1453 | bus_unregister(bus: &mhi_bus_type); |
1454 | } |
1455 | |
1456 | postcore_initcall(mhi_init); |
1457 | module_exit(mhi_exit); |
1458 | |
1459 | MODULE_LICENSE("GPL v2" ); |
1460 | MODULE_DESCRIPTION("Modem Host Interface" ); |
1461 | |