1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2016-2018, 2020-2021 The Linux Foundation. All rights reserved.
4 * Copyright (C) 2013 Red Hat
5 * Author: Rob Clark <robdclark@gmail.com>
6 */
7
8#include <linux/kthread.h>
9#include <linux/sched/mm.h>
10#include <uapi/linux/sched/types.h>
11
12#include <drm/drm_aperture.h>
13#include <drm/drm_drv.h>
14#include <drm/drm_mode_config.h>
15#include <drm/drm_vblank.h>
16
17#include "disp/msm_disp_snapshot.h"
18#include "msm_drv.h"
19#include "msm_gem.h"
20#include "msm_kms.h"
21#include "msm_mmu.h"
22
23static const struct drm_mode_config_funcs mode_config_funcs = {
24 .fb_create = msm_framebuffer_create,
25 .atomic_check = msm_atomic_check,
26 .atomic_commit = drm_atomic_helper_commit,
27};
28
29static const struct drm_mode_config_helper_funcs mode_config_helper_funcs = {
30 .atomic_commit_tail = msm_atomic_commit_tail,
31};
32
33static irqreturn_t msm_irq(int irq, void *arg)
34{
35 struct drm_device *dev = arg;
36 struct msm_drm_private *priv = dev->dev_private;
37 struct msm_kms *kms = priv->kms;
38
39 BUG_ON(!kms);
40
41 return kms->funcs->irq(kms);
42}
43
44static void msm_irq_preinstall(struct drm_device *dev)
45{
46 struct msm_drm_private *priv = dev->dev_private;
47 struct msm_kms *kms = priv->kms;
48
49 BUG_ON(!kms);
50
51 kms->funcs->irq_preinstall(kms);
52}
53
54static int msm_irq_postinstall(struct drm_device *dev)
55{
56 struct msm_drm_private *priv = dev->dev_private;
57 struct msm_kms *kms = priv->kms;
58
59 BUG_ON(!kms);
60
61 if (kms->funcs->irq_postinstall)
62 return kms->funcs->irq_postinstall(kms);
63
64 return 0;
65}
66
67static int msm_irq_install(struct drm_device *dev, unsigned int irq)
68{
69 struct msm_drm_private *priv = dev->dev_private;
70 struct msm_kms *kms = priv->kms;
71 int ret;
72
73 if (irq == IRQ_NOTCONNECTED)
74 return -ENOTCONN;
75
76 msm_irq_preinstall(dev);
77
78 ret = request_irq(irq, handler: msm_irq, flags: 0, name: dev->driver->name, dev);
79 if (ret)
80 return ret;
81
82 kms->irq_requested = true;
83
84 ret = msm_irq_postinstall(dev);
85 if (ret) {
86 free_irq(irq, dev);
87 return ret;
88 }
89
90 return 0;
91}
92
93static void msm_irq_uninstall(struct drm_device *dev)
94{
95 struct msm_drm_private *priv = dev->dev_private;
96 struct msm_kms *kms = priv->kms;
97
98 kms->funcs->irq_uninstall(kms);
99 if (kms->irq_requested)
100 free_irq(kms->irq, dev);
101}
102
103struct msm_vblank_work {
104 struct work_struct work;
105 struct drm_crtc *crtc;
106 bool enable;
107 struct msm_drm_private *priv;
108};
109
110static void vblank_ctrl_worker(struct work_struct *work)
111{
112 struct msm_vblank_work *vbl_work = container_of(work,
113 struct msm_vblank_work, work);
114 struct msm_drm_private *priv = vbl_work->priv;
115 struct msm_kms *kms = priv->kms;
116
117 if (vbl_work->enable)
118 kms->funcs->enable_vblank(kms, vbl_work->crtc);
119 else
120 kms->funcs->disable_vblank(kms, vbl_work->crtc);
121
122 kfree(objp: vbl_work);
123}
124
125static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
126 struct drm_crtc *crtc, bool enable)
127{
128 struct msm_vblank_work *vbl_work;
129
130 vbl_work = kzalloc(size: sizeof(*vbl_work), GFP_ATOMIC);
131 if (!vbl_work)
132 return -ENOMEM;
133
134 INIT_WORK(&vbl_work->work, vblank_ctrl_worker);
135
136 vbl_work->crtc = crtc;
137 vbl_work->enable = enable;
138 vbl_work->priv = priv;
139
140 queue_work(wq: priv->wq, work: &vbl_work->work);
141
142 return 0;
143}
144
145int msm_crtc_enable_vblank(struct drm_crtc *crtc)
146{
147 struct drm_device *dev = crtc->dev;
148 struct msm_drm_private *priv = dev->dev_private;
149 struct msm_kms *kms = priv->kms;
150 if (!kms)
151 return -ENXIO;
152 drm_dbg_vbl(dev, "crtc=%u\n", crtc->base.id);
153 return vblank_ctrl_queue_work(priv, crtc, enable: true);
154}
155
156void msm_crtc_disable_vblank(struct drm_crtc *crtc)
157{
158 struct drm_device *dev = crtc->dev;
159 struct msm_drm_private *priv = dev->dev_private;
160 struct msm_kms *kms = priv->kms;
161 if (!kms)
162 return;
163 drm_dbg_vbl(dev, "crtc=%u\n", crtc->base.id);
164 vblank_ctrl_queue_work(priv, crtc, enable: false);
165}
166
167struct msm_gem_address_space *msm_kms_init_aspace(struct drm_device *dev)
168{
169 struct msm_gem_address_space *aspace;
170 struct msm_mmu *mmu;
171 struct device *mdp_dev = dev->dev;
172 struct device *mdss_dev = mdp_dev->parent;
173 struct device *iommu_dev;
174
175 /*
176 * IOMMUs can be a part of MDSS device tree binding, or the
177 * MDP/DPU device.
178 */
179 if (device_iommu_mapped(dev: mdp_dev))
180 iommu_dev = mdp_dev;
181 else
182 iommu_dev = mdss_dev;
183
184 mmu = msm_iommu_new(dev: iommu_dev, quirks: 0);
185 if (IS_ERR(ptr: mmu))
186 return ERR_CAST(ptr: mmu);
187
188 if (!mmu) {
189 drm_info(dev, "no IOMMU, fallback to phys contig buffers for scanout\n");
190 return NULL;
191 }
192
193 aspace = msm_gem_address_space_create(mmu, name: "mdp_kms",
194 va_start: 0x1000, size: 0x100000000 - 0x1000);
195 if (IS_ERR(ptr: aspace)) {
196 dev_err(mdp_dev, "aspace create, error %pe\n", aspace);
197 mmu->funcs->destroy(mmu);
198 }
199
200 return aspace;
201}
202
203void msm_drm_kms_uninit(struct device *dev)
204{
205 struct platform_device *pdev = to_platform_device(dev);
206 struct msm_drm_private *priv = platform_get_drvdata(pdev);
207 struct drm_device *ddev = priv->dev;
208 struct msm_kms *kms = priv->kms;
209 int i;
210
211 BUG_ON(!kms);
212
213 /* clean up event worker threads */
214 for (i = 0; i < priv->num_crtcs; i++) {
215 if (priv->event_thread[i].worker)
216 kthread_destroy_worker(worker: priv->event_thread[i].worker);
217 }
218
219 drm_kms_helper_poll_fini(dev: ddev);
220
221 msm_disp_snapshot_destroy(drm_dev: ddev);
222
223 pm_runtime_get_sync(dev);
224 msm_irq_uninstall(dev: ddev);
225 pm_runtime_put_sync(dev);
226
227 if (kms && kms->funcs)
228 kms->funcs->destroy(kms);
229}
230
231int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv)
232{
233 struct msm_drm_private *priv = dev_get_drvdata(dev);
234 struct drm_device *ddev = priv->dev;
235 struct msm_kms *kms = priv->kms;
236 struct drm_crtc *crtc;
237 int ret;
238
239 /* the fw fb could be anywhere in memory */
240 ret = drm_aperture_remove_framebuffers(req_driver: drv);
241 if (ret)
242 return ret;
243
244 ret = priv->kms_init(ddev);
245 if (ret) {
246 DRM_DEV_ERROR(dev, "failed to load kms\n");
247 priv->kms = NULL;
248 return ret;
249 }
250
251 /* Enable normalization of plane zpos */
252 ddev->mode_config.normalize_zpos = true;
253
254 ddev->mode_config.funcs = &mode_config_funcs;
255 ddev->mode_config.helper_private = &mode_config_helper_funcs;
256
257 kms->dev = ddev;
258 ret = kms->funcs->hw_init(kms);
259 if (ret) {
260 DRM_DEV_ERROR(dev, "kms hw init failed: %d\n", ret);
261 goto err_msm_uninit;
262 }
263
264 drm_helper_move_panel_connectors_to_head(ddev);
265
266 drm_for_each_crtc(crtc, ddev) {
267 struct msm_drm_thread *ev_thread;
268
269 /* initialize event thread */
270 ev_thread = &priv->event_thread[drm_crtc_index(crtc)];
271 ev_thread->dev = ddev;
272 ev_thread->worker = kthread_create_worker(flags: 0, namefmt: "crtc_event:%d", crtc->base.id);
273 if (IS_ERR(ptr: ev_thread->worker)) {
274 ret = PTR_ERR(ptr: ev_thread->worker);
275 DRM_DEV_ERROR(dev, "failed to create crtc_event kthread\n");
276 ev_thread->worker = NULL;
277 goto err_msm_uninit;
278 }
279
280 sched_set_fifo(p: ev_thread->worker->task);
281 }
282
283 ret = drm_vblank_init(dev: ddev, num_crtcs: priv->num_crtcs);
284 if (ret < 0) {
285 DRM_DEV_ERROR(dev, "failed to initialize vblank\n");
286 goto err_msm_uninit;
287 }
288
289 pm_runtime_get_sync(dev);
290 ret = msm_irq_install(dev: ddev, irq: kms->irq);
291 pm_runtime_put_sync(dev);
292 if (ret < 0) {
293 DRM_DEV_ERROR(dev, "failed to install IRQ handler\n");
294 goto err_msm_uninit;
295 }
296
297 ret = msm_disp_snapshot_init(drm_dev: ddev);
298 if (ret)
299 DRM_DEV_ERROR(dev, "msm_disp_snapshot_init failed ret = %d\n", ret);
300
301 drm_mode_config_reset(dev: ddev);
302
303 return 0;
304
305err_msm_uninit:
306 return ret;
307}
308
309int msm_kms_pm_prepare(struct device *dev)
310{
311 struct msm_drm_private *priv = dev_get_drvdata(dev);
312 struct drm_device *ddev = priv ? priv->dev : NULL;
313
314 if (!priv || !priv->kms)
315 return 0;
316
317 return drm_mode_config_helper_suspend(dev: ddev);
318}
319
320void msm_kms_pm_complete(struct device *dev)
321{
322 struct msm_drm_private *priv = dev_get_drvdata(dev);
323 struct drm_device *ddev = priv ? priv->dev : NULL;
324
325 if (!priv || !priv->kms)
326 return;
327
328 drm_mode_config_helper_resume(dev: ddev);
329}
330
331void msm_kms_shutdown(struct platform_device *pdev)
332{
333 struct msm_drm_private *priv = platform_get_drvdata(pdev);
334 struct drm_device *drm = priv ? priv->dev : NULL;
335
336 /*
337 * Shutdown the hw if we're far enough along where things might be on.
338 * If we run this too early, we'll end up panicking in any variety of
339 * places. Since we don't register the drm device until late in
340 * msm_drm_init, drm_dev->registered is used as an indicator that the
341 * shutdown will be successful.
342 */
343 if (drm && drm->registered && priv->kms)
344 drm_atomic_helper_shutdown(dev: drm);
345}
346

source code of linux/drivers/gpu/drm/msm/msm_kms.c