1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* |
3 | * Copyright (C) 2015-2018 Etnaviv Project |
4 | */ |
5 | |
6 | #ifndef __ETNAVIV_GPU_H__ |
7 | #define __ETNAVIV_GPU_H__ |
8 | |
9 | #include "etnaviv_cmdbuf.h" |
10 | #include "etnaviv_gem.h" |
11 | #include "etnaviv_mmu.h" |
12 | #include "etnaviv_drv.h" |
13 | #include "common.xml.h" |
14 | |
15 | struct etnaviv_gem_submit; |
16 | struct etnaviv_vram_mapping; |
17 | |
18 | struct etnaviv_chip_identity { |
19 | u32 model; |
20 | u32 revision; |
21 | u32 product_id; |
22 | u32 customer_id; |
23 | u32 eco_id; |
24 | |
25 | /* Supported feature fields. */ |
26 | u32 features; |
27 | |
28 | /* Supported minor feature fields. */ |
29 | u32 minor_features0; |
30 | u32 minor_features1; |
31 | u32 minor_features2; |
32 | u32 minor_features3; |
33 | u32 minor_features4; |
34 | u32 minor_features5; |
35 | u32 minor_features6; |
36 | u32 minor_features7; |
37 | u32 minor_features8; |
38 | u32 minor_features9; |
39 | u32 minor_features10; |
40 | u32 minor_features11; |
41 | |
42 | /* Number of streams supported. */ |
43 | u32 stream_count; |
44 | |
45 | /* Total number of temporary registers per thread. */ |
46 | u32 register_max; |
47 | |
48 | /* Maximum number of threads. */ |
49 | u32 thread_count; |
50 | |
51 | /* Number of shader cores. */ |
52 | u32 shader_core_count; |
53 | |
54 | /* Number of Neural Network cores. */ |
55 | u32 nn_core_count; |
56 | |
57 | /* Number of MAD units per Neural Network core. */ |
58 | u32 nn_mad_per_core; |
59 | |
60 | /* Number of Tensor Processing cores. */ |
61 | u32 tp_core_count; |
62 | |
63 | /* Size in bytes of the SRAM inside the NPU. */ |
64 | u32 on_chip_sram_size; |
65 | |
66 | /* Size in bytes of the SRAM across the AXI bus. */ |
67 | u32 axi_sram_size; |
68 | |
69 | /* Size of the vertex cache. */ |
70 | u32 vertex_cache_size; |
71 | |
72 | /* Number of entries in the vertex output buffer. */ |
73 | u32 vertex_output_buffer_size; |
74 | |
75 | /* Number of pixel pipes. */ |
76 | u32 pixel_pipes; |
77 | |
78 | /* Number of instructions. */ |
79 | u32 instruction_count; |
80 | |
81 | /* Number of constants. */ |
82 | u32 num_constants; |
83 | |
84 | /* Buffer size */ |
85 | u32 buffer_size; |
86 | |
87 | /* Number of varyings */ |
88 | u8 varyings_count; |
89 | }; |
90 | |
91 | enum etnaviv_sec_mode { |
92 | ETNA_SEC_NONE = 0, |
93 | ETNA_SEC_KERNEL, |
94 | ETNA_SEC_TZ |
95 | }; |
96 | |
97 | struct etnaviv_event { |
98 | struct dma_fence *fence; |
99 | struct etnaviv_gem_submit *submit; |
100 | |
101 | void (*sync_point)(struct etnaviv_gpu *gpu, struct etnaviv_event *event); |
102 | }; |
103 | |
104 | struct etnaviv_cmdbuf_suballoc; |
105 | struct regulator; |
106 | struct clk; |
107 | |
108 | #define ETNA_NR_EVENTS 30 |
109 | |
110 | enum etnaviv_gpu_state { |
111 | ETNA_GPU_STATE_UNKNOWN = 0, |
112 | ETNA_GPU_STATE_IDENTIFIED, |
113 | ETNA_GPU_STATE_RESET, |
114 | ETNA_GPU_STATE_INITIALIZED, |
115 | ETNA_GPU_STATE_RUNNING, |
116 | ETNA_GPU_STATE_FAULT, |
117 | }; |
118 | |
119 | struct etnaviv_gpu { |
120 | struct drm_device *drm; |
121 | struct thermal_cooling_device *cooling; |
122 | struct device *dev; |
123 | struct mutex lock; |
124 | struct etnaviv_chip_identity identity; |
125 | enum etnaviv_sec_mode sec_mode; |
126 | struct workqueue_struct *wq; |
127 | struct mutex sched_lock; |
128 | struct drm_gpu_scheduler sched; |
129 | enum etnaviv_gpu_state state; |
130 | |
131 | /* 'ring'-buffer: */ |
132 | struct etnaviv_cmdbuf buffer; |
133 | int exec_state; |
134 | |
135 | /* event management: */ |
136 | DECLARE_BITMAP(event_bitmap, ETNA_NR_EVENTS); |
137 | struct etnaviv_event event[ETNA_NR_EVENTS]; |
138 | struct completion event_free; |
139 | spinlock_t event_spinlock; |
140 | |
141 | u32 idle_mask; |
142 | |
143 | /* Fencing support */ |
144 | struct xarray user_fences; |
145 | u32 next_user_fence; |
146 | u32 next_fence; |
147 | u32 completed_fence; |
148 | wait_queue_head_t fence_event; |
149 | u64 fence_context; |
150 | spinlock_t fence_spinlock; |
151 | |
152 | /* worker for handling 'sync' points: */ |
153 | struct work_struct sync_point_work; |
154 | int sync_point_event; |
155 | |
156 | /* hang detection */ |
157 | u32 hangcheck_dma_addr; |
158 | u32 hangcheck_fence; |
159 | |
160 | void __iomem *mmio; |
161 | int irq; |
162 | |
163 | struct etnaviv_iommu_context *mmu_context; |
164 | unsigned int flush_seq; |
165 | |
166 | /* Power Control: */ |
167 | struct clk *clk_bus; |
168 | struct clk *clk_reg; |
169 | struct clk *clk_core; |
170 | struct clk *clk_shader; |
171 | |
172 | unsigned int freq_scale; |
173 | unsigned int fe_waitcycles; |
174 | unsigned long base_rate_core; |
175 | unsigned long base_rate_shader; |
176 | }; |
177 | |
178 | static inline void gpu_write(struct etnaviv_gpu *gpu, u32 reg, u32 data) |
179 | { |
180 | writel(val: data, addr: gpu->mmio + reg); |
181 | } |
182 | |
183 | static inline u32 gpu_read(struct etnaviv_gpu *gpu, u32 reg) |
184 | { |
185 | return readl(addr: gpu->mmio + reg); |
186 | } |
187 | |
188 | static inline u32 gpu_fix_power_address(struct etnaviv_gpu *gpu, u32 reg) |
189 | { |
190 | /* Power registers in GC300 < 2.0 are offset by 0x100 */ |
191 | if (gpu->identity.model == chipModel_GC300 && |
192 | gpu->identity.revision < 0x2000) |
193 | reg += 0x100; |
194 | |
195 | return reg; |
196 | } |
197 | |
198 | static inline void gpu_write_power(struct etnaviv_gpu *gpu, u32 reg, u32 data) |
199 | { |
200 | writel(val: data, addr: gpu->mmio + gpu_fix_power_address(gpu, reg)); |
201 | } |
202 | |
203 | static inline u32 gpu_read_power(struct etnaviv_gpu *gpu, u32 reg) |
204 | { |
205 | return readl(addr: gpu->mmio + gpu_fix_power_address(gpu, reg)); |
206 | } |
207 | |
208 | int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value); |
209 | |
210 | int etnaviv_gpu_init(struct etnaviv_gpu *gpu); |
211 | bool etnaviv_fill_identity_from_hwdb(struct etnaviv_gpu *gpu); |
212 | |
213 | #ifdef CONFIG_DEBUG_FS |
214 | int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m); |
215 | #endif |
216 | |
217 | void etnaviv_gpu_recover_hang(struct etnaviv_gem_submit *submit); |
218 | void etnaviv_gpu_retire(struct etnaviv_gpu *gpu); |
219 | int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu, |
220 | u32 fence, struct drm_etnaviv_timespec *timeout); |
221 | int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu, |
222 | struct etnaviv_gem_object *etnaviv_obj, |
223 | struct drm_etnaviv_timespec *timeout); |
224 | struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit); |
225 | int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu); |
226 | void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu); |
227 | int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms); |
228 | void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch); |
229 | |
230 | extern struct platform_driver etnaviv_gpu_driver; |
231 | |
232 | #endif /* __ETNAVIV_GPU_H__ */ |
233 | |