1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Tegra host1x driver |
4 | * |
5 | * Copyright (c) 2010-2013, NVIDIA Corporation. |
6 | */ |
7 | |
8 | #include <linux/clk.h> |
9 | #include <linux/delay.h> |
10 | #include <linux/dma-mapping.h> |
11 | #include <linux/io.h> |
12 | #include <linux/list.h> |
13 | #include <linux/module.h> |
14 | #include <linux/of.h> |
15 | #include <linux/of_platform.h> |
16 | #include <linux/platform_device.h> |
17 | #include <linux/pm_runtime.h> |
18 | #include <linux/slab.h> |
19 | |
20 | #include <soc/tegra/common.h> |
21 | |
22 | #define CREATE_TRACE_POINTS |
23 | #include <trace/events/host1x.h> |
24 | #undef CREATE_TRACE_POINTS |
25 | |
26 | #if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU) |
27 | #include <asm/dma-iommu.h> |
28 | #endif |
29 | |
30 | #include "bus.h" |
31 | #include "channel.h" |
32 | #include "context.h" |
33 | #include "debug.h" |
34 | #include "dev.h" |
35 | #include "intr.h" |
36 | |
37 | #include "hw/host1x01.h" |
38 | #include "hw/host1x02.h" |
39 | #include "hw/host1x04.h" |
40 | #include "hw/host1x05.h" |
41 | #include "hw/host1x06.h" |
42 | #include "hw/host1x07.h" |
43 | #include "hw/host1x08.h" |
44 | |
45 | void host1x_common_writel(struct host1x *host1x, u32 v, u32 r) |
46 | { |
47 | writel(val: v, addr: host1x->common_regs + r); |
48 | } |
49 | |
50 | void host1x_hypervisor_writel(struct host1x *host1x, u32 v, u32 r) |
51 | { |
52 | writel(val: v, addr: host1x->hv_regs + r); |
53 | } |
54 | |
55 | u32 host1x_hypervisor_readl(struct host1x *host1x, u32 r) |
56 | { |
57 | return readl(addr: host1x->hv_regs + r); |
58 | } |
59 | |
60 | void host1x_sync_writel(struct host1x *host1x, u32 v, u32 r) |
61 | { |
62 | void __iomem *sync_regs = host1x->regs + host1x->info->sync_offset; |
63 | |
64 | writel(val: v, addr: sync_regs + r); |
65 | } |
66 | |
67 | u32 host1x_sync_readl(struct host1x *host1x, u32 r) |
68 | { |
69 | void __iomem *sync_regs = host1x->regs + host1x->info->sync_offset; |
70 | |
71 | return readl(addr: sync_regs + r); |
72 | } |
73 | |
74 | void host1x_ch_writel(struct host1x_channel *ch, u32 v, u32 r) |
75 | { |
76 | writel(val: v, addr: ch->regs + r); |
77 | } |
78 | |
79 | u32 host1x_ch_readl(struct host1x_channel *ch, u32 r) |
80 | { |
81 | return readl(addr: ch->regs + r); |
82 | } |
83 | |
84 | static const struct host1x_info host1x01_info = { |
85 | .nb_channels = 8, |
86 | .nb_pts = 32, |
87 | .nb_mlocks = 16, |
88 | .nb_bases = 8, |
89 | .init = host1x01_init, |
90 | .sync_offset = 0x3000, |
91 | .dma_mask = DMA_BIT_MASK(32), |
92 | .has_wide_gather = false, |
93 | .has_hypervisor = false, |
94 | .num_sid_entries = 0, |
95 | .sid_table = NULL, |
96 | .reserve_vblank_syncpts = true, |
97 | }; |
98 | |
99 | static const struct host1x_info host1x02_info = { |
100 | .nb_channels = 9, |
101 | .nb_pts = 32, |
102 | .nb_mlocks = 16, |
103 | .nb_bases = 12, |
104 | .init = host1x02_init, |
105 | .sync_offset = 0x3000, |
106 | .dma_mask = DMA_BIT_MASK(32), |
107 | .has_wide_gather = false, |
108 | .has_hypervisor = false, |
109 | .num_sid_entries = 0, |
110 | .sid_table = NULL, |
111 | .reserve_vblank_syncpts = true, |
112 | }; |
113 | |
114 | static const struct host1x_info host1x04_info = { |
115 | .nb_channels = 12, |
116 | .nb_pts = 192, |
117 | .nb_mlocks = 16, |
118 | .nb_bases = 64, |
119 | .init = host1x04_init, |
120 | .sync_offset = 0x2100, |
121 | .dma_mask = DMA_BIT_MASK(34), |
122 | .has_wide_gather = false, |
123 | .has_hypervisor = false, |
124 | .num_sid_entries = 0, |
125 | .sid_table = NULL, |
126 | .reserve_vblank_syncpts = false, |
127 | }; |
128 | |
129 | static const struct host1x_info host1x05_info = { |
130 | .nb_channels = 14, |
131 | .nb_pts = 192, |
132 | .nb_mlocks = 16, |
133 | .nb_bases = 64, |
134 | .init = host1x05_init, |
135 | .sync_offset = 0x2100, |
136 | .dma_mask = DMA_BIT_MASK(34), |
137 | .has_wide_gather = false, |
138 | .has_hypervisor = false, |
139 | .num_sid_entries = 0, |
140 | .sid_table = NULL, |
141 | .reserve_vblank_syncpts = false, |
142 | }; |
143 | |
144 | static const struct host1x_sid_entry tegra186_sid_table[] = { |
145 | { |
146 | /* VIC */ |
147 | .base = 0x1af0, |
148 | .offset = 0x30, |
149 | .limit = 0x34 |
150 | }, |
151 | { |
152 | /* NVDEC */ |
153 | .base = 0x1b00, |
154 | .offset = 0x30, |
155 | .limit = 0x34 |
156 | }, |
157 | }; |
158 | |
159 | static const struct host1x_info host1x06_info = { |
160 | .nb_channels = 63, |
161 | .nb_pts = 576, |
162 | .nb_mlocks = 24, |
163 | .nb_bases = 16, |
164 | .init = host1x06_init, |
165 | .sync_offset = 0x0, |
166 | .dma_mask = DMA_BIT_MASK(40), |
167 | .has_wide_gather = true, |
168 | .has_hypervisor = true, |
169 | .num_sid_entries = ARRAY_SIZE(tegra186_sid_table), |
170 | .sid_table = tegra186_sid_table, |
171 | .reserve_vblank_syncpts = false, |
172 | }; |
173 | |
174 | static const struct host1x_sid_entry tegra194_sid_table[] = { |
175 | { |
176 | /* VIC */ |
177 | .base = 0x1af0, |
178 | .offset = 0x30, |
179 | .limit = 0x34 |
180 | }, |
181 | { |
182 | /* NVDEC */ |
183 | .base = 0x1b00, |
184 | .offset = 0x30, |
185 | .limit = 0x34 |
186 | }, |
187 | { |
188 | /* NVDEC1 */ |
189 | .base = 0x1bc0, |
190 | .offset = 0x30, |
191 | .limit = 0x34 |
192 | }, |
193 | }; |
194 | |
195 | static const struct host1x_info host1x07_info = { |
196 | .nb_channels = 63, |
197 | .nb_pts = 704, |
198 | .nb_mlocks = 32, |
199 | .nb_bases = 0, |
200 | .init = host1x07_init, |
201 | .sync_offset = 0x0, |
202 | .dma_mask = DMA_BIT_MASK(40), |
203 | .has_wide_gather = true, |
204 | .has_hypervisor = true, |
205 | .num_sid_entries = ARRAY_SIZE(tegra194_sid_table), |
206 | .sid_table = tegra194_sid_table, |
207 | .reserve_vblank_syncpts = false, |
208 | }; |
209 | |
210 | /* |
211 | * Tegra234 has two stream ID protection tables, one for setting stream IDs |
212 | * through the channel path via SETSTREAMID, and one for setting them via |
213 | * MMIO. We program each engine's data stream ID in the channel path table |
214 | * and firmware stream ID in the MMIO path table. |
215 | */ |
216 | static const struct host1x_sid_entry tegra234_sid_table[] = { |
217 | { |
218 | /* VIC channel */ |
219 | .base = 0x17b8, |
220 | .offset = 0x30, |
221 | .limit = 0x30 |
222 | }, |
223 | { |
224 | /* VIC MMIO */ |
225 | .base = 0x1688, |
226 | .offset = 0x34, |
227 | .limit = 0x34 |
228 | }, |
229 | { |
230 | /* NVDEC channel */ |
231 | .base = 0x17c8, |
232 | .offset = 0x30, |
233 | .limit = 0x30, |
234 | }, |
235 | { |
236 | /* NVDEC MMIO */ |
237 | .base = 0x1698, |
238 | .offset = 0x34, |
239 | .limit = 0x34, |
240 | }, |
241 | }; |
242 | |
243 | static const struct host1x_info host1x08_info = { |
244 | .nb_channels = 63, |
245 | .nb_pts = 1024, |
246 | .nb_mlocks = 24, |
247 | .nb_bases = 0, |
248 | .init = host1x08_init, |
249 | .sync_offset = 0x0, |
250 | .dma_mask = DMA_BIT_MASK(40), |
251 | .has_wide_gather = true, |
252 | .has_hypervisor = true, |
253 | .has_common = true, |
254 | .num_sid_entries = ARRAY_SIZE(tegra234_sid_table), |
255 | .sid_table = tegra234_sid_table, |
256 | .streamid_vm_table = { .base: 0x1004, .count: 128 }, |
257 | .classid_vm_table = { 0x1404, 25 }, |
258 | .mmio_vm_table = { 0x1504, 25 }, |
259 | .reserve_vblank_syncpts = false, |
260 | }; |
261 | |
262 | static const struct of_device_id host1x_of_match[] = { |
263 | { .compatible = "nvidia,tegra234-host1x" , .data = &host1x08_info, }, |
264 | { .compatible = "nvidia,tegra194-host1x" , .data = &host1x07_info, }, |
265 | { .compatible = "nvidia,tegra186-host1x" , .data = &host1x06_info, }, |
266 | { .compatible = "nvidia,tegra210-host1x" , .data = &host1x05_info, }, |
267 | { .compatible = "nvidia,tegra124-host1x" , .data = &host1x04_info, }, |
268 | { .compatible = "nvidia,tegra114-host1x" , .data = &host1x02_info, }, |
269 | { .compatible = "nvidia,tegra30-host1x" , .data = &host1x01_info, }, |
270 | { .compatible = "nvidia,tegra20-host1x" , .data = &host1x01_info, }, |
271 | { }, |
272 | }; |
273 | MODULE_DEVICE_TABLE(of, host1x_of_match); |
274 | |
275 | static void host1x_setup_virtualization_tables(struct host1x *host) |
276 | { |
277 | const struct host1x_info *info = host->info; |
278 | unsigned int i; |
279 | |
280 | if (!info->has_hypervisor) |
281 | return; |
282 | |
283 | for (i = 0; i < info->num_sid_entries; i++) { |
284 | const struct host1x_sid_entry *entry = &info->sid_table[i]; |
285 | |
286 | host1x_hypervisor_writel(host1x: host, v: entry->offset, r: entry->base); |
287 | host1x_hypervisor_writel(host1x: host, v: entry->limit, r: entry->base + 4); |
288 | } |
289 | |
290 | for (i = 0; i < info->streamid_vm_table.count; i++) { |
291 | /* Allow access to all stream IDs to all VMs. */ |
292 | host1x_hypervisor_writel(host1x: host, v: 0xff, r: info->streamid_vm_table.base + 4 * i); |
293 | } |
294 | |
295 | for (i = 0; i < info->classid_vm_table.count; i++) { |
296 | /* Allow access to all classes to all VMs. */ |
297 | host1x_hypervisor_writel(host1x: host, v: 0xff, r: info->classid_vm_table.base + 4 * i); |
298 | } |
299 | |
300 | for (i = 0; i < info->mmio_vm_table.count; i++) { |
301 | /* Use VM1 (that's us) as originator VMID for engine MMIO accesses. */ |
302 | host1x_hypervisor_writel(host1x: host, v: 0x1, r: info->mmio_vm_table.base + 4 * i); |
303 | } |
304 | } |
305 | |
306 | static bool host1x_wants_iommu(struct host1x *host1x) |
307 | { |
308 | /* Our IOMMU usage policy doesn't currently play well with GART */ |
309 | if (of_machine_is_compatible(compat: "nvidia,tegra20" )) |
310 | return false; |
311 | |
312 | /* |
313 | * If we support addressing a maximum of 32 bits of physical memory |
314 | * and if the host1x firewall is enabled, there's no need to enable |
315 | * IOMMU support. This can happen for example on Tegra20, Tegra30 |
316 | * and Tegra114. |
317 | * |
318 | * Tegra124 and later can address up to 34 bits of physical memory and |
319 | * many platforms come equipped with more than 2 GiB of system memory, |
320 | * which requires crossing the 4 GiB boundary. But there's a catch: on |
321 | * SoCs before Tegra186 (i.e. Tegra124 and Tegra210), the host1x can |
322 | * only address up to 32 bits of memory in GATHER opcodes, which means |
323 | * that command buffers need to either be in the first 2 GiB of system |
324 | * memory (which could quickly lead to memory exhaustion), or command |
325 | * buffers need to be treated differently from other buffers (which is |
326 | * not possible with the current ABI). |
327 | * |
328 | * A third option is to use the IOMMU in these cases to make sure all |
329 | * buffers will be mapped into a 32-bit IOVA space that host1x can |
330 | * address. This allows all of the system memory to be used and works |
331 | * within the limitations of the host1x on these SoCs. |
332 | * |
333 | * In summary, default to enable IOMMU on Tegra124 and later. For any |
334 | * of the earlier SoCs, only use the IOMMU for additional safety when |
335 | * the host1x firewall is disabled. |
336 | */ |
337 | if (host1x->info->dma_mask <= DMA_BIT_MASK(32)) { |
338 | if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) |
339 | return false; |
340 | } |
341 | |
342 | return true; |
343 | } |
344 | |
345 | static struct iommu_domain *host1x_iommu_attach(struct host1x *host) |
346 | { |
347 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev: host->dev); |
348 | int err; |
349 | |
350 | #if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU) |
351 | if (host->dev->archdata.mapping) { |
352 | struct dma_iommu_mapping *mapping = |
353 | to_dma_iommu_mapping(host->dev); |
354 | arm_iommu_detach_device(host->dev); |
355 | arm_iommu_release_mapping(mapping); |
356 | |
357 | domain = iommu_get_domain_for_dev(host->dev); |
358 | } |
359 | #endif |
360 | |
361 | /* |
362 | * We may not always want to enable IOMMU support (for example if the |
363 | * host1x firewall is already enabled and we don't support addressing |
364 | * more than 32 bits of physical memory), so check for that first. |
365 | * |
366 | * Similarly, if host1x is already attached to an IOMMU (via the DMA |
367 | * API), don't try to attach again. |
368 | */ |
369 | if (!host1x_wants_iommu(host1x: host) || domain) |
370 | return domain; |
371 | |
372 | host->group = iommu_group_get(dev: host->dev); |
373 | if (host->group) { |
374 | struct iommu_domain_geometry *geometry; |
375 | dma_addr_t start, end; |
376 | unsigned long order; |
377 | |
378 | err = iova_cache_get(); |
379 | if (err < 0) |
380 | goto put_group; |
381 | |
382 | host->domain = iommu_domain_alloc(bus: &platform_bus_type); |
383 | if (!host->domain) { |
384 | err = -ENOMEM; |
385 | goto put_cache; |
386 | } |
387 | |
388 | err = iommu_attach_group(domain: host->domain, group: host->group); |
389 | if (err) { |
390 | if (err == -ENODEV) |
391 | err = 0; |
392 | |
393 | goto free_domain; |
394 | } |
395 | |
396 | geometry = &host->domain->geometry; |
397 | start = geometry->aperture_start & host->info->dma_mask; |
398 | end = geometry->aperture_end & host->info->dma_mask; |
399 | |
400 | order = __ffs(host->domain->pgsize_bitmap); |
401 | init_iova_domain(iovad: &host->iova, granule: 1UL << order, start_pfn: start >> order); |
402 | host->iova_end = end; |
403 | |
404 | domain = host->domain; |
405 | } |
406 | |
407 | return domain; |
408 | |
409 | free_domain: |
410 | iommu_domain_free(domain: host->domain); |
411 | host->domain = NULL; |
412 | put_cache: |
413 | iova_cache_put(); |
414 | put_group: |
415 | iommu_group_put(group: host->group); |
416 | host->group = NULL; |
417 | |
418 | return ERR_PTR(error: err); |
419 | } |
420 | |
421 | static int host1x_iommu_init(struct host1x *host) |
422 | { |
423 | u64 mask = host->info->dma_mask; |
424 | struct iommu_domain *domain; |
425 | int err; |
426 | |
427 | domain = host1x_iommu_attach(host); |
428 | if (IS_ERR(ptr: domain)) { |
429 | err = PTR_ERR(ptr: domain); |
430 | dev_err(host->dev, "failed to attach to IOMMU: %d\n" , err); |
431 | return err; |
432 | } |
433 | |
434 | /* |
435 | * If we're not behind an IOMMU make sure we don't get push buffers |
436 | * that are allocated outside of the range addressable by the GATHER |
437 | * opcode. |
438 | * |
439 | * Newer generations of Tegra (Tegra186 and later) support a wide |
440 | * variant of the GATHER opcode that allows addressing more bits. |
441 | */ |
442 | if (!domain && !host->info->has_wide_gather) |
443 | mask = DMA_BIT_MASK(32); |
444 | |
445 | err = dma_coerce_mask_and_coherent(dev: host->dev, mask); |
446 | if (err < 0) { |
447 | dev_err(host->dev, "failed to set DMA mask: %d\n" , err); |
448 | return err; |
449 | } |
450 | |
451 | return 0; |
452 | } |
453 | |
454 | static void host1x_iommu_exit(struct host1x *host) |
455 | { |
456 | if (host->domain) { |
457 | put_iova_domain(iovad: &host->iova); |
458 | iommu_detach_group(domain: host->domain, group: host->group); |
459 | |
460 | iommu_domain_free(domain: host->domain); |
461 | host->domain = NULL; |
462 | |
463 | iova_cache_put(); |
464 | |
465 | iommu_group_put(group: host->group); |
466 | host->group = NULL; |
467 | } |
468 | } |
469 | |
470 | static int host1x_get_resets(struct host1x *host) |
471 | { |
472 | int err; |
473 | |
474 | host->resets[0].id = "mc" ; |
475 | host->resets[1].id = "host1x" ; |
476 | host->nresets = ARRAY_SIZE(host->resets); |
477 | |
478 | err = devm_reset_control_bulk_get_optional_exclusive_released( |
479 | dev: host->dev, num_rstcs: host->nresets, rstcs: host->resets); |
480 | if (err) { |
481 | dev_err(host->dev, "failed to get reset: %d\n" , err); |
482 | return err; |
483 | } |
484 | |
485 | return 0; |
486 | } |
487 | |
488 | static int host1x_probe(struct platform_device *pdev) |
489 | { |
490 | struct host1x *host; |
491 | int err, i; |
492 | |
493 | host = devm_kzalloc(dev: &pdev->dev, size: sizeof(*host), GFP_KERNEL); |
494 | if (!host) |
495 | return -ENOMEM; |
496 | |
497 | host->info = of_device_get_match_data(dev: &pdev->dev); |
498 | |
499 | if (host->info->has_hypervisor) { |
500 | host->regs = devm_platform_ioremap_resource_byname(pdev, name: "vm" ); |
501 | if (IS_ERR(ptr: host->regs)) |
502 | return PTR_ERR(ptr: host->regs); |
503 | |
504 | host->hv_regs = devm_platform_ioremap_resource_byname(pdev, name: "hypervisor" ); |
505 | if (IS_ERR(ptr: host->hv_regs)) |
506 | return PTR_ERR(ptr: host->hv_regs); |
507 | |
508 | if (host->info->has_common) { |
509 | host->common_regs = devm_platform_ioremap_resource_byname(pdev, name: "common" ); |
510 | if (IS_ERR(ptr: host->common_regs)) |
511 | return PTR_ERR(ptr: host->common_regs); |
512 | } |
513 | } else { |
514 | host->regs = devm_platform_ioremap_resource(pdev, index: 0); |
515 | if (IS_ERR(ptr: host->regs)) |
516 | return PTR_ERR(ptr: host->regs); |
517 | } |
518 | |
519 | for (i = 0; i < ARRAY_SIZE(host->syncpt_irqs); i++) { |
520 | char irq_name[] = "syncptX" ; |
521 | |
522 | sprintf(buf: irq_name, fmt: "syncpt%d" , i); |
523 | |
524 | err = platform_get_irq_byname_optional(dev: pdev, name: irq_name); |
525 | if (err == -ENXIO) |
526 | break; |
527 | if (err < 0) |
528 | return err; |
529 | |
530 | host->syncpt_irqs[i] = err; |
531 | } |
532 | |
533 | host->num_syncpt_irqs = i; |
534 | |
535 | /* Device tree without irq names */ |
536 | if (i == 0) { |
537 | host->syncpt_irqs[0] = platform_get_irq(pdev, 0); |
538 | if (host->syncpt_irqs[0] < 0) |
539 | return host->syncpt_irqs[0]; |
540 | |
541 | host->num_syncpt_irqs = 1; |
542 | } |
543 | |
544 | mutex_init(&host->devices_lock); |
545 | INIT_LIST_HEAD(list: &host->devices); |
546 | INIT_LIST_HEAD(list: &host->list); |
547 | host->dev = &pdev->dev; |
548 | |
549 | /* set common host1x device data */ |
550 | platform_set_drvdata(pdev, data: host); |
551 | |
552 | host->dev->dma_parms = &host->dma_parms; |
553 | dma_set_max_seg_size(dev: host->dev, UINT_MAX); |
554 | |
555 | if (host->info->init) { |
556 | err = host->info->init(host); |
557 | if (err) |
558 | return err; |
559 | } |
560 | |
561 | host->clk = devm_clk_get(dev: &pdev->dev, NULL); |
562 | if (IS_ERR(ptr: host->clk)) { |
563 | err = PTR_ERR(ptr: host->clk); |
564 | |
565 | if (err != -EPROBE_DEFER) |
566 | dev_err(&pdev->dev, "failed to get clock: %d\n" , err); |
567 | |
568 | return err; |
569 | } |
570 | |
571 | err = host1x_get_resets(host); |
572 | if (err) |
573 | return err; |
574 | |
575 | host1x_bo_cache_init(cache: &host->cache); |
576 | |
577 | err = host1x_iommu_init(host); |
578 | if (err < 0) { |
579 | dev_err(&pdev->dev, "failed to setup IOMMU: %d\n" , err); |
580 | goto destroy_cache; |
581 | } |
582 | |
583 | err = host1x_channel_list_init(chlist: &host->channel_list, |
584 | num_channels: host->info->nb_channels); |
585 | if (err) { |
586 | dev_err(&pdev->dev, "failed to initialize channel list\n" ); |
587 | goto iommu_exit; |
588 | } |
589 | |
590 | err = host1x_memory_context_list_init(host1x: host); |
591 | if (err) { |
592 | dev_err(&pdev->dev, "failed to initialize context list\n" ); |
593 | goto free_channels; |
594 | } |
595 | |
596 | err = host1x_syncpt_init(host); |
597 | if (err) { |
598 | dev_err(&pdev->dev, "failed to initialize syncpts\n" ); |
599 | goto free_contexts; |
600 | } |
601 | |
602 | err = host1x_intr_init(host); |
603 | if (err) { |
604 | dev_err(&pdev->dev, "failed to initialize interrupts\n" ); |
605 | goto deinit_syncpt; |
606 | } |
607 | |
608 | pm_runtime_enable(dev: &pdev->dev); |
609 | |
610 | err = devm_tegra_core_dev_init_opp_table_common(dev: &pdev->dev); |
611 | if (err) |
612 | goto pm_disable; |
613 | |
614 | /* the driver's code isn't ready yet for the dynamic RPM */ |
615 | err = pm_runtime_resume_and_get(dev: &pdev->dev); |
616 | if (err) |
617 | goto pm_disable; |
618 | |
619 | host1x_debug_init(host1x: host); |
620 | |
621 | err = host1x_register(host1x: host); |
622 | if (err < 0) |
623 | goto deinit_debugfs; |
624 | |
625 | err = devm_of_platform_populate(dev: &pdev->dev); |
626 | if (err < 0) |
627 | goto unregister; |
628 | |
629 | return 0; |
630 | |
631 | unregister: |
632 | host1x_unregister(host1x: host); |
633 | deinit_debugfs: |
634 | host1x_debug_deinit(host1x: host); |
635 | |
636 | pm_runtime_put_sync_suspend(dev: &pdev->dev); |
637 | pm_disable: |
638 | pm_runtime_disable(dev: &pdev->dev); |
639 | |
640 | host1x_intr_deinit(host); |
641 | deinit_syncpt: |
642 | host1x_syncpt_deinit(host); |
643 | free_contexts: |
644 | host1x_memory_context_list_free(cdl: &host->context_list); |
645 | free_channels: |
646 | host1x_channel_list_free(chlist: &host->channel_list); |
647 | iommu_exit: |
648 | host1x_iommu_exit(host); |
649 | destroy_cache: |
650 | host1x_bo_cache_destroy(cache: &host->cache); |
651 | |
652 | return err; |
653 | } |
654 | |
655 | static int host1x_remove(struct platform_device *pdev) |
656 | { |
657 | struct host1x *host = platform_get_drvdata(pdev); |
658 | |
659 | host1x_unregister(host1x: host); |
660 | host1x_debug_deinit(host1x: host); |
661 | |
662 | pm_runtime_force_suspend(dev: &pdev->dev); |
663 | |
664 | host1x_intr_deinit(host); |
665 | host1x_syncpt_deinit(host); |
666 | host1x_memory_context_list_free(cdl: &host->context_list); |
667 | host1x_channel_list_free(chlist: &host->channel_list); |
668 | host1x_iommu_exit(host); |
669 | host1x_bo_cache_destroy(cache: &host->cache); |
670 | |
671 | return 0; |
672 | } |
673 | |
674 | static int __maybe_unused host1x_runtime_suspend(struct device *dev) |
675 | { |
676 | struct host1x *host = dev_get_drvdata(dev); |
677 | int err; |
678 | |
679 | host1x_channel_stop_all(host); |
680 | host1x_intr_stop(host); |
681 | host1x_syncpt_save(host); |
682 | |
683 | err = reset_control_bulk_assert(num_rstcs: host->nresets, rstcs: host->resets); |
684 | if (err) { |
685 | dev_err(dev, "failed to assert reset: %d\n" , err); |
686 | goto resume_host1x; |
687 | } |
688 | |
689 | usleep_range(min: 1000, max: 2000); |
690 | |
691 | clk_disable_unprepare(clk: host->clk); |
692 | reset_control_bulk_release(num_rstcs: host->nresets, rstcs: host->resets); |
693 | |
694 | return 0; |
695 | |
696 | resume_host1x: |
697 | host1x_setup_virtualization_tables(host); |
698 | host1x_syncpt_restore(host); |
699 | host1x_intr_start(host); |
700 | |
701 | return err; |
702 | } |
703 | |
704 | static int __maybe_unused host1x_runtime_resume(struct device *dev) |
705 | { |
706 | struct host1x *host = dev_get_drvdata(dev); |
707 | int err; |
708 | |
709 | err = reset_control_bulk_acquire(num_rstcs: host->nresets, rstcs: host->resets); |
710 | if (err) { |
711 | dev_err(dev, "failed to acquire reset: %d\n" , err); |
712 | return err; |
713 | } |
714 | |
715 | err = clk_prepare_enable(clk: host->clk); |
716 | if (err) { |
717 | dev_err(dev, "failed to enable clock: %d\n" , err); |
718 | goto release_reset; |
719 | } |
720 | |
721 | err = reset_control_bulk_deassert(num_rstcs: host->nresets, rstcs: host->resets); |
722 | if (err < 0) { |
723 | dev_err(dev, "failed to deassert reset: %d\n" , err); |
724 | goto disable_clk; |
725 | } |
726 | |
727 | host1x_setup_virtualization_tables(host); |
728 | host1x_syncpt_restore(host); |
729 | host1x_intr_start(host); |
730 | |
731 | return 0; |
732 | |
733 | disable_clk: |
734 | clk_disable_unprepare(clk: host->clk); |
735 | release_reset: |
736 | reset_control_bulk_release(num_rstcs: host->nresets, rstcs: host->resets); |
737 | |
738 | return err; |
739 | } |
740 | |
741 | static const struct dev_pm_ops host1x_pm_ops = { |
742 | SET_RUNTIME_PM_OPS(host1x_runtime_suspend, host1x_runtime_resume, |
743 | NULL) |
744 | SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) |
745 | }; |
746 | |
747 | static struct platform_driver tegra_host1x_driver = { |
748 | .driver = { |
749 | .name = "tegra-host1x" , |
750 | .of_match_table = host1x_of_match, |
751 | .pm = &host1x_pm_ops, |
752 | }, |
753 | .probe = host1x_probe, |
754 | .remove = host1x_remove, |
755 | }; |
756 | |
757 | static struct platform_driver * const drivers[] = { |
758 | &tegra_host1x_driver, |
759 | &tegra_mipi_driver, |
760 | }; |
761 | |
762 | static int __init tegra_host1x_init(void) |
763 | { |
764 | int err; |
765 | |
766 | err = bus_register(bus: &host1x_bus_type); |
767 | if (err < 0) |
768 | return err; |
769 | |
770 | err = platform_register_drivers(drivers, ARRAY_SIZE(drivers)); |
771 | if (err < 0) |
772 | bus_unregister(bus: &host1x_bus_type); |
773 | |
774 | return err; |
775 | } |
776 | module_init(tegra_host1x_init); |
777 | |
778 | static void __exit tegra_host1x_exit(void) |
779 | { |
780 | platform_unregister_drivers(drivers, ARRAY_SIZE(drivers)); |
781 | bus_unregister(bus: &host1x_bus_type); |
782 | } |
783 | module_exit(tegra_host1x_exit); |
784 | |
785 | /** |
786 | * host1x_get_dma_mask() - query the supported DMA mask for host1x |
787 | * @host1x: host1x instance |
788 | * |
789 | * Note that this returns the supported DMA mask for host1x, which can be |
790 | * different from the applicable DMA mask under certain circumstances. |
791 | */ |
792 | u64 host1x_get_dma_mask(struct host1x *host1x) |
793 | { |
794 | return host1x->info->dma_mask; |
795 | } |
796 | EXPORT_SYMBOL(host1x_get_dma_mask); |
797 | |
798 | MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>" ); |
799 | MODULE_AUTHOR("Terje Bergstrom <tbergstrom@nvidia.com>" ); |
800 | MODULE_DESCRIPTION("Host1x driver for Tegra products" ); |
801 | MODULE_LICENSE("GPL" ); |
802 | |