1 | /* |
2 | * Copyright © 2007 David Airlie |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice (including the next |
12 | * paragraph) shall be included in all copies or substantial portions of the |
13 | * Software. |
14 | * |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
21 | * DEALINGS IN THE SOFTWARE. |
22 | * |
23 | * Authors: |
24 | * David Airlie |
25 | */ |
26 | #include <linux/module.h> |
27 | #include <linux/slab.h> |
28 | #include <linux/pm_runtime.h> |
29 | |
30 | #include <drm/drmP.h> |
31 | #include <drm/drm_crtc.h> |
32 | #include <drm/drm_crtc_helper.h> |
33 | #include <drm/radeon_drm.h> |
34 | #include "radeon.h" |
35 | |
36 | #include <drm/drm_fb_helper.h> |
37 | |
38 | #include <linux/vga_switcheroo.h> |
39 | |
40 | /* object hierarchy - |
41 | * this contains a helper + a radeon fb |
42 | * the helper contains a pointer to radeon framebuffer baseclass. |
43 | */ |
44 | struct radeon_fbdev { |
45 | struct drm_fb_helper helper; |
46 | struct drm_framebuffer fb; |
47 | struct radeon_device *rdev; |
48 | }; |
49 | |
50 | static int |
51 | radeonfb_open(struct fb_info *info, int user) |
52 | { |
53 | struct radeon_fbdev *rfbdev = info->par; |
54 | struct radeon_device *rdev = rfbdev->rdev; |
55 | int ret = pm_runtime_get_sync(rdev->ddev->dev); |
56 | if (ret < 0 && ret != -EACCES) { |
57 | pm_runtime_mark_last_busy(rdev->ddev->dev); |
58 | pm_runtime_put_autosuspend(rdev->ddev->dev); |
59 | return ret; |
60 | } |
61 | return 0; |
62 | } |
63 | |
64 | static int |
65 | radeonfb_release(struct fb_info *info, int user) |
66 | { |
67 | struct radeon_fbdev *rfbdev = info->par; |
68 | struct radeon_device *rdev = rfbdev->rdev; |
69 | |
70 | pm_runtime_mark_last_busy(rdev->ddev->dev); |
71 | pm_runtime_put_autosuspend(rdev->ddev->dev); |
72 | return 0; |
73 | } |
74 | |
75 | static struct fb_ops radeonfb_ops = { |
76 | .owner = THIS_MODULE, |
77 | DRM_FB_HELPER_DEFAULT_OPS, |
78 | .fb_open = radeonfb_open, |
79 | .fb_release = radeonfb_release, |
80 | .fb_fillrect = drm_fb_helper_cfb_fillrect, |
81 | .fb_copyarea = drm_fb_helper_cfb_copyarea, |
82 | .fb_imageblit = drm_fb_helper_cfb_imageblit, |
83 | }; |
84 | |
85 | |
86 | int radeon_align_pitch(struct radeon_device *rdev, int width, int cpp, bool tiled) |
87 | { |
88 | int aligned = width; |
89 | int align_large = (ASIC_IS_AVIVO(rdev)) || tiled; |
90 | int pitch_mask = 0; |
91 | |
92 | switch (cpp) { |
93 | case 1: |
94 | pitch_mask = align_large ? 255 : 127; |
95 | break; |
96 | case 2: |
97 | pitch_mask = align_large ? 127 : 31; |
98 | break; |
99 | case 3: |
100 | case 4: |
101 | pitch_mask = align_large ? 63 : 15; |
102 | break; |
103 | } |
104 | |
105 | aligned += pitch_mask; |
106 | aligned &= ~pitch_mask; |
107 | return aligned * cpp; |
108 | } |
109 | |
110 | static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj) |
111 | { |
112 | struct radeon_bo *rbo = gem_to_radeon_bo(gobj); |
113 | int ret; |
114 | |
115 | ret = radeon_bo_reserve(rbo, false); |
116 | if (likely(ret == 0)) { |
117 | radeon_bo_kunmap(rbo); |
118 | radeon_bo_unpin(rbo); |
119 | radeon_bo_unreserve(rbo); |
120 | } |
121 | drm_gem_object_put_unlocked(gobj); |
122 | } |
123 | |
124 | static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev, |
125 | struct drm_mode_fb_cmd2 *mode_cmd, |
126 | struct drm_gem_object **gobj_p) |
127 | { |
128 | struct radeon_device *rdev = rfbdev->rdev; |
129 | struct drm_gem_object *gobj = NULL; |
130 | struct radeon_bo *rbo = NULL; |
131 | bool fb_tiled = false; /* useful for testing */ |
132 | u32 tiling_flags = 0; |
133 | int ret; |
134 | int aligned_size, size; |
135 | int height = mode_cmd->height; |
136 | u32 cpp; |
137 | |
138 | cpp = drm_format_plane_cpp(mode_cmd->pixel_format, 0); |
139 | |
140 | /* need to align pitch with crtc limits */ |
141 | mode_cmd->pitches[0] = radeon_align_pitch(rdev, mode_cmd->width, cpp, |
142 | fb_tiled); |
143 | |
144 | if (rdev->family >= CHIP_R600) |
145 | height = ALIGN(mode_cmd->height, 8); |
146 | size = mode_cmd->pitches[0] * height; |
147 | aligned_size = ALIGN(size, PAGE_SIZE); |
148 | ret = radeon_gem_object_create(rdev, aligned_size, 0, |
149 | RADEON_GEM_DOMAIN_VRAM, |
150 | 0, true, &gobj); |
151 | if (ret) { |
152 | pr_err("failed to allocate framebuffer (%d)\n" , aligned_size); |
153 | return -ENOMEM; |
154 | } |
155 | rbo = gem_to_radeon_bo(gobj); |
156 | |
157 | if (fb_tiled) |
158 | tiling_flags = RADEON_TILING_MACRO; |
159 | |
160 | #ifdef __BIG_ENDIAN |
161 | switch (cpp) { |
162 | case 4: |
163 | tiling_flags |= RADEON_TILING_SWAP_32BIT; |
164 | break; |
165 | case 2: |
166 | tiling_flags |= RADEON_TILING_SWAP_16BIT; |
167 | default: |
168 | break; |
169 | } |
170 | #endif |
171 | |
172 | if (tiling_flags) { |
173 | ret = radeon_bo_set_tiling_flags(rbo, |
174 | tiling_flags | RADEON_TILING_SURFACE, |
175 | mode_cmd->pitches[0]); |
176 | if (ret) |
177 | dev_err(rdev->dev, "FB failed to set tiling flags\n" ); |
178 | } |
179 | |
180 | |
181 | ret = radeon_bo_reserve(rbo, false); |
182 | if (unlikely(ret != 0)) |
183 | goto out_unref; |
184 | /* Only 27 bit offset for legacy CRTC */ |
185 | ret = radeon_bo_pin_restricted(rbo, RADEON_GEM_DOMAIN_VRAM, |
186 | ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27, |
187 | NULL); |
188 | if (ret) { |
189 | radeon_bo_unreserve(rbo); |
190 | goto out_unref; |
191 | } |
192 | if (fb_tiled) |
193 | radeon_bo_check_tiling(rbo, 0, 0); |
194 | ret = radeon_bo_kmap(rbo, NULL); |
195 | radeon_bo_unreserve(rbo); |
196 | if (ret) { |
197 | goto out_unref; |
198 | } |
199 | |
200 | *gobj_p = gobj; |
201 | return 0; |
202 | out_unref: |
203 | radeonfb_destroy_pinned_object(gobj); |
204 | *gobj_p = NULL; |
205 | return ret; |
206 | } |
207 | |
208 | static int radeonfb_create(struct drm_fb_helper *helper, |
209 | struct drm_fb_helper_surface_size *sizes) |
210 | { |
211 | struct radeon_fbdev *rfbdev = |
212 | container_of(helper, struct radeon_fbdev, helper); |
213 | struct radeon_device *rdev = rfbdev->rdev; |
214 | struct fb_info *info; |
215 | struct drm_framebuffer *fb = NULL; |
216 | struct drm_mode_fb_cmd2 mode_cmd; |
217 | struct drm_gem_object *gobj = NULL; |
218 | struct radeon_bo *rbo = NULL; |
219 | int ret; |
220 | unsigned long tmp; |
221 | |
222 | mode_cmd.width = sizes->surface_width; |
223 | mode_cmd.height = sizes->surface_height; |
224 | |
225 | /* avivo can't scanout real 24bpp */ |
226 | if ((sizes->surface_bpp == 24) && ASIC_IS_AVIVO(rdev)) |
227 | sizes->surface_bpp = 32; |
228 | |
229 | mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, |
230 | sizes->surface_depth); |
231 | |
232 | ret = radeonfb_create_pinned_object(rfbdev, &mode_cmd, &gobj); |
233 | if (ret) { |
234 | DRM_ERROR("failed to create fbcon object %d\n" , ret); |
235 | return ret; |
236 | } |
237 | |
238 | rbo = gem_to_radeon_bo(gobj); |
239 | |
240 | /* okay we have an object now allocate the framebuffer */ |
241 | info = drm_fb_helper_alloc_fbi(helper); |
242 | if (IS_ERR(info)) { |
243 | ret = PTR_ERR(info); |
244 | goto out; |
245 | } |
246 | |
247 | info->par = rfbdev; |
248 | |
249 | ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->fb, &mode_cmd, gobj); |
250 | if (ret) { |
251 | DRM_ERROR("failed to initialize framebuffer %d\n" , ret); |
252 | goto out; |
253 | } |
254 | |
255 | fb = &rfbdev->fb; |
256 | |
257 | /* setup helper */ |
258 | rfbdev->helper.fb = fb; |
259 | |
260 | memset_io(rbo->kptr, 0x0, radeon_bo_size(rbo)); |
261 | |
262 | strcpy(info->fix.id, "radeondrmfb" ); |
263 | |
264 | drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth); |
265 | |
266 | info->fbops = &radeonfb_ops; |
267 | |
268 | tmp = radeon_bo_gpu_offset(rbo) - rdev->mc.vram_start; |
269 | info->fix.smem_start = rdev->mc.aper_base + tmp; |
270 | info->fix.smem_len = radeon_bo_size(rbo); |
271 | info->screen_base = rbo->kptr; |
272 | info->screen_size = radeon_bo_size(rbo); |
273 | |
274 | drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height); |
275 | |
276 | /* setup aperture base/size for vesafb takeover */ |
277 | info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base; |
278 | info->apertures->ranges[0].size = rdev->mc.aper_size; |
279 | |
280 | /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ |
281 | |
282 | if (info->screen_base == NULL) { |
283 | ret = -ENOSPC; |
284 | goto out; |
285 | } |
286 | |
287 | DRM_INFO("fb mappable at 0x%lX\n" , info->fix.smem_start); |
288 | DRM_INFO("vram apper at 0x%lX\n" , (unsigned long)rdev->mc.aper_base); |
289 | DRM_INFO("size %lu\n" , (unsigned long)radeon_bo_size(rbo)); |
290 | DRM_INFO("fb depth is %d\n" , fb->format->depth); |
291 | DRM_INFO(" pitch is %d\n" , fb->pitches[0]); |
292 | |
293 | vga_switcheroo_client_fb_set(rdev->ddev->pdev, info); |
294 | return 0; |
295 | |
296 | out: |
297 | if (rbo) { |
298 | |
299 | } |
300 | if (fb && ret) { |
301 | drm_gem_object_put_unlocked(gobj); |
302 | drm_framebuffer_unregister_private(fb); |
303 | drm_framebuffer_cleanup(fb); |
304 | kfree(fb); |
305 | } |
306 | return ret; |
307 | } |
308 | |
309 | static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfbdev) |
310 | { |
311 | struct drm_framebuffer *fb = &rfbdev->fb; |
312 | |
313 | drm_fb_helper_unregister_fbi(&rfbdev->helper); |
314 | |
315 | if (fb->obj[0]) { |
316 | radeonfb_destroy_pinned_object(fb->obj[0]); |
317 | fb->obj[0] = NULL; |
318 | drm_framebuffer_unregister_private(fb); |
319 | drm_framebuffer_cleanup(fb); |
320 | } |
321 | drm_fb_helper_fini(&rfbdev->helper); |
322 | |
323 | return 0; |
324 | } |
325 | |
326 | static const struct drm_fb_helper_funcs radeon_fb_helper_funcs = { |
327 | .fb_probe = radeonfb_create, |
328 | }; |
329 | |
330 | int radeon_fbdev_init(struct radeon_device *rdev) |
331 | { |
332 | struct radeon_fbdev *rfbdev; |
333 | int bpp_sel = 32; |
334 | int ret; |
335 | |
336 | /* don't enable fbdev if no connectors */ |
337 | if (list_empty(&rdev->ddev->mode_config.connector_list)) |
338 | return 0; |
339 | |
340 | /* select 8 bpp console on 8MB cards, or 16 bpp on RN50 or 32MB */ |
341 | if (rdev->mc.real_vram_size <= (8*1024*1024)) |
342 | bpp_sel = 8; |
343 | else if (ASIC_IS_RN50(rdev) || |
344 | rdev->mc.real_vram_size <= (32*1024*1024)) |
345 | bpp_sel = 16; |
346 | |
347 | rfbdev = kzalloc(sizeof(struct radeon_fbdev), GFP_KERNEL); |
348 | if (!rfbdev) |
349 | return -ENOMEM; |
350 | |
351 | rfbdev->rdev = rdev; |
352 | rdev->mode_info.rfbdev = rfbdev; |
353 | |
354 | drm_fb_helper_prepare(rdev->ddev, &rfbdev->helper, |
355 | &radeon_fb_helper_funcs); |
356 | |
357 | ret = drm_fb_helper_init(rdev->ddev, &rfbdev->helper, |
358 | RADEONFB_CONN_LIMIT); |
359 | if (ret) |
360 | goto free; |
361 | |
362 | ret = drm_fb_helper_single_add_all_connectors(&rfbdev->helper); |
363 | if (ret) |
364 | goto fini; |
365 | |
366 | /* disable all the possible outputs/crtcs before entering KMS mode */ |
367 | drm_helper_disable_unused_functions(rdev->ddev); |
368 | |
369 | ret = drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel); |
370 | if (ret) |
371 | goto fini; |
372 | |
373 | return 0; |
374 | |
375 | fini: |
376 | drm_fb_helper_fini(&rfbdev->helper); |
377 | free: |
378 | kfree(rfbdev); |
379 | return ret; |
380 | } |
381 | |
382 | void radeon_fbdev_fini(struct radeon_device *rdev) |
383 | { |
384 | if (!rdev->mode_info.rfbdev) |
385 | return; |
386 | |
387 | radeon_fbdev_destroy(rdev->ddev, rdev->mode_info.rfbdev); |
388 | kfree(rdev->mode_info.rfbdev); |
389 | rdev->mode_info.rfbdev = NULL; |
390 | } |
391 | |
392 | void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state) |
393 | { |
394 | if (rdev->mode_info.rfbdev) |
395 | drm_fb_helper_set_suspend(&rdev->mode_info.rfbdev->helper, state); |
396 | } |
397 | |
398 | bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj) |
399 | { |
400 | if (!rdev->mode_info.rfbdev) |
401 | return false; |
402 | |
403 | if (robj == gem_to_radeon_bo(rdev->mode_info.rfbdev->fb.obj[0])) |
404 | return true; |
405 | return false; |
406 | } |
407 | |
408 | void radeon_fb_add_connector(struct radeon_device *rdev, struct drm_connector *connector) |
409 | { |
410 | if (rdev->mode_info.rfbdev) |
411 | drm_fb_helper_add_one_connector(&rdev->mode_info.rfbdev->helper, connector); |
412 | } |
413 | |
414 | void radeon_fb_remove_connector(struct radeon_device *rdev, struct drm_connector *connector) |
415 | { |
416 | if (rdev->mode_info.rfbdev) |
417 | drm_fb_helper_remove_one_connector(&rdev->mode_info.rfbdev->helper, connector); |
418 | } |
419 | |