1 | /* |
2 | * Copyright © 2007 David Airlie |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice (including the next |
12 | * paragraph) shall be included in all copies or substantial portions of the |
13 | * Software. |
14 | * |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
21 | * DEALINGS IN THE SOFTWARE. |
22 | * |
23 | * Authors: |
24 | * David Airlie |
25 | */ |
26 | #include <linux/module.h> |
27 | #include <linux/slab.h> |
28 | #include <linux/pm_runtime.h> |
29 | |
30 | #include <drm/drmP.h> |
31 | #include <drm/drm_crtc.h> |
32 | #include <drm/drm_crtc_helper.h> |
33 | #include <drm/amdgpu_drm.h> |
34 | #include "amdgpu.h" |
35 | #include "cikd.h" |
36 | #include "amdgpu_gem.h" |
37 | |
38 | #include <drm/drm_fb_helper.h> |
39 | |
40 | #include <linux/vga_switcheroo.h> |
41 | |
42 | #include "amdgpu_display.h" |
43 | |
44 | /* object hierarchy - |
45 | this contains a helper + a amdgpu fb |
46 | the helper contains a pointer to amdgpu framebuffer baseclass. |
47 | */ |
48 | |
49 | static int |
50 | amdgpufb_open(struct fb_info *info, int user) |
51 | { |
52 | struct amdgpu_fbdev *rfbdev = info->par; |
53 | struct amdgpu_device *adev = rfbdev->adev; |
54 | int ret = pm_runtime_get_sync(adev->ddev->dev); |
55 | if (ret < 0 && ret != -EACCES) { |
56 | pm_runtime_mark_last_busy(adev->ddev->dev); |
57 | pm_runtime_put_autosuspend(adev->ddev->dev); |
58 | return ret; |
59 | } |
60 | return 0; |
61 | } |
62 | |
63 | static int |
64 | amdgpufb_release(struct fb_info *info, int user) |
65 | { |
66 | struct amdgpu_fbdev *rfbdev = info->par; |
67 | struct amdgpu_device *adev = rfbdev->adev; |
68 | |
69 | pm_runtime_mark_last_busy(adev->ddev->dev); |
70 | pm_runtime_put_autosuspend(adev->ddev->dev); |
71 | return 0; |
72 | } |
73 | |
74 | static struct fb_ops amdgpufb_ops = { |
75 | .owner = THIS_MODULE, |
76 | DRM_FB_HELPER_DEFAULT_OPS, |
77 | .fb_open = amdgpufb_open, |
78 | .fb_release = amdgpufb_release, |
79 | .fb_fillrect = drm_fb_helper_cfb_fillrect, |
80 | .fb_copyarea = drm_fb_helper_cfb_copyarea, |
81 | .fb_imageblit = drm_fb_helper_cfb_imageblit, |
82 | }; |
83 | |
84 | |
85 | int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int cpp, bool tiled) |
86 | { |
87 | int aligned = width; |
88 | int pitch_mask = 0; |
89 | |
90 | switch (cpp) { |
91 | case 1: |
92 | pitch_mask = 255; |
93 | break; |
94 | case 2: |
95 | pitch_mask = 127; |
96 | break; |
97 | case 3: |
98 | case 4: |
99 | pitch_mask = 63; |
100 | break; |
101 | } |
102 | |
103 | aligned += pitch_mask; |
104 | aligned &= ~pitch_mask; |
105 | return aligned * cpp; |
106 | } |
107 | |
108 | static void amdgpufb_destroy_pinned_object(struct drm_gem_object *gobj) |
109 | { |
110 | struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj); |
111 | int ret; |
112 | |
113 | ret = amdgpu_bo_reserve(abo, true); |
114 | if (likely(ret == 0)) { |
115 | amdgpu_bo_kunmap(abo); |
116 | amdgpu_bo_unpin(abo); |
117 | amdgpu_bo_unreserve(abo); |
118 | } |
119 | drm_gem_object_put_unlocked(gobj); |
120 | } |
121 | |
122 | static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev, |
123 | struct drm_mode_fb_cmd2 *mode_cmd, |
124 | struct drm_gem_object **gobj_p) |
125 | { |
126 | struct amdgpu_device *adev = rfbdev->adev; |
127 | struct drm_gem_object *gobj = NULL; |
128 | struct amdgpu_bo *abo = NULL; |
129 | bool fb_tiled = false; /* useful for testing */ |
130 | u32 tiling_flags = 0, domain; |
131 | int ret; |
132 | int aligned_size, size; |
133 | int height = mode_cmd->height; |
134 | u32 cpp; |
135 | |
136 | cpp = drm_format_plane_cpp(mode_cmd->pixel_format, 0); |
137 | |
138 | /* need to align pitch with crtc limits */ |
139 | mode_cmd->pitches[0] = amdgpu_align_pitch(adev, mode_cmd->width, cpp, |
140 | fb_tiled); |
141 | domain = amdgpu_display_supported_domains(adev); |
142 | |
143 | height = ALIGN(mode_cmd->height, 8); |
144 | size = mode_cmd->pitches[0] * height; |
145 | aligned_size = ALIGN(size, PAGE_SIZE); |
146 | ret = amdgpu_gem_object_create(adev, aligned_size, 0, domain, |
147 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | |
148 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | |
149 | AMDGPU_GEM_CREATE_VRAM_CLEARED, |
150 | ttm_bo_type_kernel, NULL, &gobj); |
151 | if (ret) { |
152 | pr_err("failed to allocate framebuffer (%d)\n" , aligned_size); |
153 | return -ENOMEM; |
154 | } |
155 | abo = gem_to_amdgpu_bo(gobj); |
156 | |
157 | if (fb_tiled) |
158 | tiling_flags = AMDGPU_TILING_SET(ARRAY_MODE, GRPH_ARRAY_2D_TILED_THIN1); |
159 | |
160 | ret = amdgpu_bo_reserve(abo, false); |
161 | if (unlikely(ret != 0)) |
162 | goto out_unref; |
163 | |
164 | if (tiling_flags) { |
165 | ret = amdgpu_bo_set_tiling_flags(abo, |
166 | tiling_flags); |
167 | if (ret) |
168 | dev_err(adev->dev, "FB failed to set tiling flags\n" ); |
169 | } |
170 | |
171 | |
172 | ret = amdgpu_bo_pin(abo, domain); |
173 | if (ret) { |
174 | amdgpu_bo_unreserve(abo); |
175 | goto out_unref; |
176 | } |
177 | |
178 | ret = amdgpu_ttm_alloc_gart(&abo->tbo); |
179 | if (ret) { |
180 | amdgpu_bo_unreserve(abo); |
181 | dev_err(adev->dev, "%p bind failed\n" , abo); |
182 | goto out_unref; |
183 | } |
184 | |
185 | ret = amdgpu_bo_kmap(abo, NULL); |
186 | amdgpu_bo_unreserve(abo); |
187 | if (ret) { |
188 | goto out_unref; |
189 | } |
190 | |
191 | *gobj_p = gobj; |
192 | return 0; |
193 | out_unref: |
194 | amdgpufb_destroy_pinned_object(gobj); |
195 | *gobj_p = NULL; |
196 | return ret; |
197 | } |
198 | |
199 | static int amdgpufb_create(struct drm_fb_helper *helper, |
200 | struct drm_fb_helper_surface_size *sizes) |
201 | { |
202 | struct amdgpu_fbdev *rfbdev = (struct amdgpu_fbdev *)helper; |
203 | struct amdgpu_device *adev = rfbdev->adev; |
204 | struct fb_info *info; |
205 | struct drm_framebuffer *fb = NULL; |
206 | struct drm_mode_fb_cmd2 mode_cmd; |
207 | struct drm_gem_object *gobj = NULL; |
208 | struct amdgpu_bo *abo = NULL; |
209 | int ret; |
210 | unsigned long tmp; |
211 | |
212 | mode_cmd.width = sizes->surface_width; |
213 | mode_cmd.height = sizes->surface_height; |
214 | |
215 | if (sizes->surface_bpp == 24) |
216 | sizes->surface_bpp = 32; |
217 | |
218 | mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, |
219 | sizes->surface_depth); |
220 | |
221 | ret = amdgpufb_create_pinned_object(rfbdev, &mode_cmd, &gobj); |
222 | if (ret) { |
223 | DRM_ERROR("failed to create fbcon object %d\n" , ret); |
224 | return ret; |
225 | } |
226 | |
227 | abo = gem_to_amdgpu_bo(gobj); |
228 | |
229 | /* okay we have an object now allocate the framebuffer */ |
230 | info = drm_fb_helper_alloc_fbi(helper); |
231 | if (IS_ERR(info)) { |
232 | ret = PTR_ERR(info); |
233 | goto out; |
234 | } |
235 | |
236 | info->par = rfbdev; |
237 | info->skip_vt_switch = true; |
238 | |
239 | ret = amdgpu_display_framebuffer_init(adev->ddev, &rfbdev->rfb, |
240 | &mode_cmd, gobj); |
241 | if (ret) { |
242 | DRM_ERROR("failed to initialize framebuffer %d\n" , ret); |
243 | goto out; |
244 | } |
245 | |
246 | fb = &rfbdev->rfb.base; |
247 | |
248 | /* setup helper */ |
249 | rfbdev->helper.fb = fb; |
250 | |
251 | strcpy(info->fix.id, "amdgpudrmfb" ); |
252 | |
253 | drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth); |
254 | |
255 | info->fbops = &amdgpufb_ops; |
256 | |
257 | tmp = amdgpu_bo_gpu_offset(abo) - adev->gmc.vram_start; |
258 | info->fix.smem_start = adev->gmc.aper_base + tmp; |
259 | info->fix.smem_len = amdgpu_bo_size(abo); |
260 | info->screen_base = amdgpu_bo_kptr(abo); |
261 | info->screen_size = amdgpu_bo_size(abo); |
262 | |
263 | drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height); |
264 | |
265 | /* setup aperture base/size for vesafb takeover */ |
266 | info->apertures->ranges[0].base = adev->ddev->mode_config.fb_base; |
267 | info->apertures->ranges[0].size = adev->gmc.aper_size; |
268 | |
269 | /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ |
270 | |
271 | if (info->screen_base == NULL) { |
272 | ret = -ENOSPC; |
273 | goto out; |
274 | } |
275 | |
276 | DRM_INFO("fb mappable at 0x%lX\n" , info->fix.smem_start); |
277 | DRM_INFO("vram apper at 0x%lX\n" , (unsigned long)adev->gmc.aper_base); |
278 | DRM_INFO("size %lu\n" , (unsigned long)amdgpu_bo_size(abo)); |
279 | DRM_INFO("fb depth is %d\n" , fb->format->depth); |
280 | DRM_INFO(" pitch is %d\n" , fb->pitches[0]); |
281 | |
282 | vga_switcheroo_client_fb_set(adev->ddev->pdev, info); |
283 | return 0; |
284 | |
285 | out: |
286 | if (abo) { |
287 | |
288 | } |
289 | if (fb && ret) { |
290 | drm_gem_object_put_unlocked(gobj); |
291 | drm_framebuffer_unregister_private(fb); |
292 | drm_framebuffer_cleanup(fb); |
293 | kfree(fb); |
294 | } |
295 | return ret; |
296 | } |
297 | |
298 | static int amdgpu_fbdev_destroy(struct drm_device *dev, struct amdgpu_fbdev *rfbdev) |
299 | { |
300 | struct amdgpu_framebuffer *rfb = &rfbdev->rfb; |
301 | |
302 | drm_fb_helper_unregister_fbi(&rfbdev->helper); |
303 | |
304 | if (rfb->base.obj[0]) { |
305 | amdgpufb_destroy_pinned_object(rfb->base.obj[0]); |
306 | rfb->base.obj[0] = NULL; |
307 | drm_framebuffer_unregister_private(&rfb->base); |
308 | drm_framebuffer_cleanup(&rfb->base); |
309 | } |
310 | drm_fb_helper_fini(&rfbdev->helper); |
311 | |
312 | return 0; |
313 | } |
314 | |
315 | static const struct drm_fb_helper_funcs amdgpu_fb_helper_funcs = { |
316 | .fb_probe = amdgpufb_create, |
317 | }; |
318 | |
319 | int amdgpu_fbdev_init(struct amdgpu_device *adev) |
320 | { |
321 | struct amdgpu_fbdev *rfbdev; |
322 | int bpp_sel = 32; |
323 | int ret; |
324 | |
325 | /* don't init fbdev on hw without DCE */ |
326 | if (!adev->mode_info.mode_config_initialized) |
327 | return 0; |
328 | |
329 | /* don't init fbdev if there are no connectors */ |
330 | if (list_empty(&adev->ddev->mode_config.connector_list)) |
331 | return 0; |
332 | |
333 | /* select 8 bpp console on low vram cards */ |
334 | if (adev->gmc.real_vram_size <= (32*1024*1024)) |
335 | bpp_sel = 8; |
336 | |
337 | rfbdev = kzalloc(sizeof(struct amdgpu_fbdev), GFP_KERNEL); |
338 | if (!rfbdev) |
339 | return -ENOMEM; |
340 | |
341 | rfbdev->adev = adev; |
342 | adev->mode_info.rfbdev = rfbdev; |
343 | |
344 | drm_fb_helper_prepare(adev->ddev, &rfbdev->helper, |
345 | &amdgpu_fb_helper_funcs); |
346 | |
347 | ret = drm_fb_helper_init(adev->ddev, &rfbdev->helper, |
348 | AMDGPUFB_CONN_LIMIT); |
349 | if (ret) { |
350 | kfree(rfbdev); |
351 | return ret; |
352 | } |
353 | |
354 | drm_fb_helper_single_add_all_connectors(&rfbdev->helper); |
355 | |
356 | /* disable all the possible outputs/crtcs before entering KMS mode */ |
357 | if (!amdgpu_device_has_dc_support(adev)) |
358 | drm_helper_disable_unused_functions(adev->ddev); |
359 | |
360 | drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel); |
361 | return 0; |
362 | } |
363 | |
364 | void amdgpu_fbdev_fini(struct amdgpu_device *adev) |
365 | { |
366 | if (!adev->mode_info.rfbdev) |
367 | return; |
368 | |
369 | amdgpu_fbdev_destroy(adev->ddev, adev->mode_info.rfbdev); |
370 | kfree(adev->mode_info.rfbdev); |
371 | adev->mode_info.rfbdev = NULL; |
372 | } |
373 | |
374 | void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state) |
375 | { |
376 | if (adev->mode_info.rfbdev) |
377 | drm_fb_helper_set_suspend_unlocked(&adev->mode_info.rfbdev->helper, |
378 | state); |
379 | } |
380 | |
381 | int amdgpu_fbdev_total_size(struct amdgpu_device *adev) |
382 | { |
383 | struct amdgpu_bo *robj; |
384 | int size = 0; |
385 | |
386 | if (!adev->mode_info.rfbdev) |
387 | return 0; |
388 | |
389 | robj = gem_to_amdgpu_bo(adev->mode_info.rfbdev->rfb.base.obj[0]); |
390 | size += amdgpu_bo_size(robj); |
391 | return size; |
392 | } |
393 | |
394 | bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj) |
395 | { |
396 | if (!adev->mode_info.rfbdev) |
397 | return false; |
398 | if (robj == gem_to_amdgpu_bo(adev->mode_info.rfbdev->rfb.base.obj[0])) |
399 | return true; |
400 | return false; |
401 | } |
402 | |