1 | // SPDX-License-Identifier: GPL-2.0 OR MIT |
2 | |
3 | /* |
4 | * Xen para-virtual DRM device |
5 | * |
6 | * Copyright (C) 2016-2018 EPAM Systems Inc. |
7 | * |
8 | * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com> |
9 | */ |
10 | |
11 | #include <linux/delay.h> |
12 | #include <linux/dma-mapping.h> |
13 | #include <linux/module.h> |
14 | |
15 | #include <drm/drm_atomic_helper.h> |
16 | #include <drm/drm_drv.h> |
17 | #include <drm/drm_ioctl.h> |
18 | #include <drm/drm_probe_helper.h> |
19 | #include <drm/drm_file.h> |
20 | #include <drm/drm_gem.h> |
21 | |
22 | #include <xen/platform_pci.h> |
23 | #include <xen/xen.h> |
24 | #include <xen/xenbus.h> |
25 | |
26 | #include <xen/xen-front-pgdir-shbuf.h> |
27 | #include <xen/interface/io/displif.h> |
28 | |
29 | #include "xen_drm_front.h" |
30 | #include "xen_drm_front_cfg.h" |
31 | #include "xen_drm_front_evtchnl.h" |
32 | #include "xen_drm_front_gem.h" |
33 | #include "xen_drm_front_kms.h" |
34 | |
35 | struct xen_drm_front_dbuf { |
36 | struct list_head list; |
37 | u64 dbuf_cookie; |
38 | u64 fb_cookie; |
39 | |
40 | struct xen_front_pgdir_shbuf shbuf; |
41 | }; |
42 | |
43 | static void dbuf_add_to_list(struct xen_drm_front_info *front_info, |
44 | struct xen_drm_front_dbuf *dbuf, u64 dbuf_cookie) |
45 | { |
46 | dbuf->dbuf_cookie = dbuf_cookie; |
47 | list_add(new: &dbuf->list, head: &front_info->dbuf_list); |
48 | } |
49 | |
50 | static struct xen_drm_front_dbuf *dbuf_get(struct list_head *dbuf_list, |
51 | u64 dbuf_cookie) |
52 | { |
53 | struct xen_drm_front_dbuf *buf, *q; |
54 | |
55 | list_for_each_entry_safe(buf, q, dbuf_list, list) |
56 | if (buf->dbuf_cookie == dbuf_cookie) |
57 | return buf; |
58 | |
59 | return NULL; |
60 | } |
61 | |
62 | static void dbuf_free(struct list_head *dbuf_list, u64 dbuf_cookie) |
63 | { |
64 | struct xen_drm_front_dbuf *buf, *q; |
65 | |
66 | list_for_each_entry_safe(buf, q, dbuf_list, list) |
67 | if (buf->dbuf_cookie == dbuf_cookie) { |
68 | list_del(entry: &buf->list); |
69 | xen_front_pgdir_shbuf_unmap(buf: &buf->shbuf); |
70 | xen_front_pgdir_shbuf_free(buf: &buf->shbuf); |
71 | kfree(objp: buf); |
72 | break; |
73 | } |
74 | } |
75 | |
76 | static void dbuf_free_all(struct list_head *dbuf_list) |
77 | { |
78 | struct xen_drm_front_dbuf *buf, *q; |
79 | |
80 | list_for_each_entry_safe(buf, q, dbuf_list, list) { |
81 | list_del(entry: &buf->list); |
82 | xen_front_pgdir_shbuf_unmap(buf: &buf->shbuf); |
83 | xen_front_pgdir_shbuf_free(buf: &buf->shbuf); |
84 | kfree(objp: buf); |
85 | } |
86 | } |
87 | |
88 | static struct xendispl_req * |
89 | be_prepare_req(struct xen_drm_front_evtchnl *evtchnl, u8 operation) |
90 | { |
91 | struct xendispl_req *req; |
92 | |
93 | req = RING_GET_REQUEST(&evtchnl->u.req.ring, |
94 | evtchnl->u.req.ring.req_prod_pvt); |
95 | req->operation = operation; |
96 | req->id = evtchnl->evt_next_id++; |
97 | evtchnl->evt_id = req->id; |
98 | return req; |
99 | } |
100 | |
101 | static int be_stream_do_io(struct xen_drm_front_evtchnl *evtchnl, |
102 | struct xendispl_req *req) |
103 | { |
104 | reinit_completion(x: &evtchnl->u.req.completion); |
105 | if (unlikely(evtchnl->state != EVTCHNL_STATE_CONNECTED)) |
106 | return -EIO; |
107 | |
108 | xen_drm_front_evtchnl_flush(evtchnl); |
109 | return 0; |
110 | } |
111 | |
112 | static int be_stream_wait_io(struct xen_drm_front_evtchnl *evtchnl) |
113 | { |
114 | if (wait_for_completion_timeout(x: &evtchnl->u.req.completion, |
115 | timeout: msecs_to_jiffies(XEN_DRM_FRONT_WAIT_BACK_MS)) <= 0) |
116 | return -ETIMEDOUT; |
117 | |
118 | return evtchnl->u.req.resp_status; |
119 | } |
120 | |
121 | int xen_drm_front_mode_set(struct xen_drm_front_drm_pipeline *pipeline, |
122 | u32 x, u32 y, u32 width, u32 height, |
123 | u32 bpp, u64 fb_cookie) |
124 | { |
125 | struct xen_drm_front_evtchnl *evtchnl; |
126 | struct xen_drm_front_info *front_info; |
127 | struct xendispl_req *req; |
128 | unsigned long flags; |
129 | int ret; |
130 | |
131 | front_info = pipeline->drm_info->front_info; |
132 | evtchnl = &front_info->evt_pairs[pipeline->index].req; |
133 | if (unlikely(!evtchnl)) |
134 | return -EIO; |
135 | |
136 | mutex_lock(&evtchnl->u.req.req_io_lock); |
137 | |
138 | spin_lock_irqsave(&front_info->io_lock, flags); |
139 | req = be_prepare_req(evtchnl, XENDISPL_OP_SET_CONFIG); |
140 | req->op.set_config.x = x; |
141 | req->op.set_config.y = y; |
142 | req->op.set_config.width = width; |
143 | req->op.set_config.height = height; |
144 | req->op.set_config.bpp = bpp; |
145 | req->op.set_config.fb_cookie = fb_cookie; |
146 | |
147 | ret = be_stream_do_io(evtchnl, req); |
148 | spin_unlock_irqrestore(lock: &front_info->io_lock, flags); |
149 | |
150 | if (ret == 0) |
151 | ret = be_stream_wait_io(evtchnl); |
152 | |
153 | mutex_unlock(lock: &evtchnl->u.req.req_io_lock); |
154 | return ret; |
155 | } |
156 | |
157 | int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info, |
158 | u64 dbuf_cookie, u32 width, u32 height, |
159 | u32 bpp, u64 size, u32 offset, |
160 | struct page **pages) |
161 | { |
162 | struct xen_drm_front_evtchnl *evtchnl; |
163 | struct xen_drm_front_dbuf *dbuf; |
164 | struct xendispl_req *req; |
165 | struct xen_front_pgdir_shbuf_cfg buf_cfg; |
166 | unsigned long flags; |
167 | int ret; |
168 | |
169 | evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req; |
170 | if (unlikely(!evtchnl)) |
171 | return -EIO; |
172 | |
173 | dbuf = kzalloc(size: sizeof(*dbuf), GFP_KERNEL); |
174 | if (!dbuf) |
175 | return -ENOMEM; |
176 | |
177 | dbuf_add_to_list(front_info, dbuf, dbuf_cookie); |
178 | |
179 | memset(&buf_cfg, 0, sizeof(buf_cfg)); |
180 | buf_cfg.xb_dev = front_info->xb_dev; |
181 | buf_cfg.num_pages = DIV_ROUND_UP(size, PAGE_SIZE); |
182 | buf_cfg.pages = pages; |
183 | buf_cfg.pgdir = &dbuf->shbuf; |
184 | buf_cfg.be_alloc = front_info->cfg.be_alloc; |
185 | |
186 | ret = xen_front_pgdir_shbuf_alloc(cfg: &buf_cfg); |
187 | if (ret < 0) |
188 | goto fail_shbuf_alloc; |
189 | |
190 | mutex_lock(&evtchnl->u.req.req_io_lock); |
191 | |
192 | spin_lock_irqsave(&front_info->io_lock, flags); |
193 | req = be_prepare_req(evtchnl, XENDISPL_OP_DBUF_CREATE); |
194 | req->op.dbuf_create.gref_directory = |
195 | xen_front_pgdir_shbuf_get_dir_start(buf: &dbuf->shbuf); |
196 | req->op.dbuf_create.buffer_sz = size; |
197 | req->op.dbuf_create.data_ofs = offset; |
198 | req->op.dbuf_create.dbuf_cookie = dbuf_cookie; |
199 | req->op.dbuf_create.width = width; |
200 | req->op.dbuf_create.height = height; |
201 | req->op.dbuf_create.bpp = bpp; |
202 | if (buf_cfg.be_alloc) |
203 | req->op.dbuf_create.flags |= XENDISPL_DBUF_FLG_REQ_ALLOC; |
204 | |
205 | ret = be_stream_do_io(evtchnl, req); |
206 | spin_unlock_irqrestore(lock: &front_info->io_lock, flags); |
207 | |
208 | if (ret < 0) |
209 | goto fail; |
210 | |
211 | ret = be_stream_wait_io(evtchnl); |
212 | if (ret < 0) |
213 | goto fail; |
214 | |
215 | ret = xen_front_pgdir_shbuf_map(buf: &dbuf->shbuf); |
216 | if (ret < 0) |
217 | goto fail; |
218 | |
219 | mutex_unlock(lock: &evtchnl->u.req.req_io_lock); |
220 | return 0; |
221 | |
222 | fail: |
223 | mutex_unlock(lock: &evtchnl->u.req.req_io_lock); |
224 | fail_shbuf_alloc: |
225 | dbuf_free(dbuf_list: &front_info->dbuf_list, dbuf_cookie); |
226 | return ret; |
227 | } |
228 | |
229 | static int xen_drm_front_dbuf_destroy(struct xen_drm_front_info *front_info, |
230 | u64 dbuf_cookie) |
231 | { |
232 | struct xen_drm_front_evtchnl *evtchnl; |
233 | struct xendispl_req *req; |
234 | unsigned long flags; |
235 | bool be_alloc; |
236 | int ret; |
237 | |
238 | evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req; |
239 | if (unlikely(!evtchnl)) |
240 | return -EIO; |
241 | |
242 | be_alloc = front_info->cfg.be_alloc; |
243 | |
244 | /* |
245 | * For the backend allocated buffer release references now, so backend |
246 | * can free the buffer. |
247 | */ |
248 | if (be_alloc) |
249 | dbuf_free(dbuf_list: &front_info->dbuf_list, dbuf_cookie); |
250 | |
251 | mutex_lock(&evtchnl->u.req.req_io_lock); |
252 | |
253 | spin_lock_irqsave(&front_info->io_lock, flags); |
254 | req = be_prepare_req(evtchnl, XENDISPL_OP_DBUF_DESTROY); |
255 | req->op.dbuf_destroy.dbuf_cookie = dbuf_cookie; |
256 | |
257 | ret = be_stream_do_io(evtchnl, req); |
258 | spin_unlock_irqrestore(lock: &front_info->io_lock, flags); |
259 | |
260 | if (ret == 0) |
261 | ret = be_stream_wait_io(evtchnl); |
262 | |
263 | /* |
264 | * Do this regardless of communication status with the backend: |
265 | * if we cannot remove remote resources remove what we can locally. |
266 | */ |
267 | if (!be_alloc) |
268 | dbuf_free(dbuf_list: &front_info->dbuf_list, dbuf_cookie); |
269 | |
270 | mutex_unlock(lock: &evtchnl->u.req.req_io_lock); |
271 | return ret; |
272 | } |
273 | |
274 | int xen_drm_front_fb_attach(struct xen_drm_front_info *front_info, |
275 | u64 dbuf_cookie, u64 fb_cookie, u32 width, |
276 | u32 height, u32 pixel_format) |
277 | { |
278 | struct xen_drm_front_evtchnl *evtchnl; |
279 | struct xen_drm_front_dbuf *buf; |
280 | struct xendispl_req *req; |
281 | unsigned long flags; |
282 | int ret; |
283 | |
284 | evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req; |
285 | if (unlikely(!evtchnl)) |
286 | return -EIO; |
287 | |
288 | buf = dbuf_get(dbuf_list: &front_info->dbuf_list, dbuf_cookie); |
289 | if (!buf) |
290 | return -EINVAL; |
291 | |
292 | buf->fb_cookie = fb_cookie; |
293 | |
294 | mutex_lock(&evtchnl->u.req.req_io_lock); |
295 | |
296 | spin_lock_irqsave(&front_info->io_lock, flags); |
297 | req = be_prepare_req(evtchnl, XENDISPL_OP_FB_ATTACH); |
298 | req->op.fb_attach.dbuf_cookie = dbuf_cookie; |
299 | req->op.fb_attach.fb_cookie = fb_cookie; |
300 | req->op.fb_attach.width = width; |
301 | req->op.fb_attach.height = height; |
302 | req->op.fb_attach.pixel_format = pixel_format; |
303 | |
304 | ret = be_stream_do_io(evtchnl, req); |
305 | spin_unlock_irqrestore(lock: &front_info->io_lock, flags); |
306 | |
307 | if (ret == 0) |
308 | ret = be_stream_wait_io(evtchnl); |
309 | |
310 | mutex_unlock(lock: &evtchnl->u.req.req_io_lock); |
311 | return ret; |
312 | } |
313 | |
314 | int xen_drm_front_fb_detach(struct xen_drm_front_info *front_info, |
315 | u64 fb_cookie) |
316 | { |
317 | struct xen_drm_front_evtchnl *evtchnl; |
318 | struct xendispl_req *req; |
319 | unsigned long flags; |
320 | int ret; |
321 | |
322 | evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req; |
323 | if (unlikely(!evtchnl)) |
324 | return -EIO; |
325 | |
326 | mutex_lock(&evtchnl->u.req.req_io_lock); |
327 | |
328 | spin_lock_irqsave(&front_info->io_lock, flags); |
329 | req = be_prepare_req(evtchnl, XENDISPL_OP_FB_DETACH); |
330 | req->op.fb_detach.fb_cookie = fb_cookie; |
331 | |
332 | ret = be_stream_do_io(evtchnl, req); |
333 | spin_unlock_irqrestore(lock: &front_info->io_lock, flags); |
334 | |
335 | if (ret == 0) |
336 | ret = be_stream_wait_io(evtchnl); |
337 | |
338 | mutex_unlock(lock: &evtchnl->u.req.req_io_lock); |
339 | return ret; |
340 | } |
341 | |
342 | int xen_drm_front_page_flip(struct xen_drm_front_info *front_info, |
343 | int conn_idx, u64 fb_cookie) |
344 | { |
345 | struct xen_drm_front_evtchnl *evtchnl; |
346 | struct xendispl_req *req; |
347 | unsigned long flags; |
348 | int ret; |
349 | |
350 | if (unlikely(conn_idx >= front_info->num_evt_pairs)) |
351 | return -EINVAL; |
352 | |
353 | evtchnl = &front_info->evt_pairs[conn_idx].req; |
354 | |
355 | mutex_lock(&evtchnl->u.req.req_io_lock); |
356 | |
357 | spin_lock_irqsave(&front_info->io_lock, flags); |
358 | req = be_prepare_req(evtchnl, XENDISPL_OP_PG_FLIP); |
359 | req->op.pg_flip.fb_cookie = fb_cookie; |
360 | |
361 | ret = be_stream_do_io(evtchnl, req); |
362 | spin_unlock_irqrestore(lock: &front_info->io_lock, flags); |
363 | |
364 | if (ret == 0) |
365 | ret = be_stream_wait_io(evtchnl); |
366 | |
367 | mutex_unlock(lock: &evtchnl->u.req.req_io_lock); |
368 | return ret; |
369 | } |
370 | |
371 | void xen_drm_front_on_frame_done(struct xen_drm_front_info *front_info, |
372 | int conn_idx, u64 fb_cookie) |
373 | { |
374 | struct xen_drm_front_drm_info *drm_info = front_info->drm_info; |
375 | |
376 | if (unlikely(conn_idx >= front_info->cfg.num_connectors)) |
377 | return; |
378 | |
379 | xen_drm_front_kms_on_frame_done(pipeline: &drm_info->pipeline[conn_idx], |
380 | fb_cookie); |
381 | } |
382 | |
383 | void xen_drm_front_gem_object_free(struct drm_gem_object *obj) |
384 | { |
385 | struct xen_drm_front_drm_info *drm_info = obj->dev->dev_private; |
386 | int idx; |
387 | |
388 | if (drm_dev_enter(dev: obj->dev, idx: &idx)) { |
389 | xen_drm_front_dbuf_destroy(front_info: drm_info->front_info, |
390 | dbuf_cookie: xen_drm_front_dbuf_to_cookie(gem_obj: obj)); |
391 | drm_dev_exit(idx); |
392 | } else { |
393 | dbuf_free(dbuf_list: &drm_info->front_info->dbuf_list, |
394 | dbuf_cookie: xen_drm_front_dbuf_to_cookie(gem_obj: obj)); |
395 | } |
396 | |
397 | xen_drm_front_gem_free_object_unlocked(gem_obj: obj); |
398 | } |
399 | |
400 | static int xen_drm_drv_dumb_create(struct drm_file *filp, |
401 | struct drm_device *dev, |
402 | struct drm_mode_create_dumb *args) |
403 | { |
404 | struct xen_drm_front_drm_info *drm_info = dev->dev_private; |
405 | struct drm_gem_object *obj; |
406 | int ret; |
407 | |
408 | /* |
409 | * Dumb creation is a two stage process: first we create a fully |
410 | * constructed GEM object which is communicated to the backend, and |
411 | * only after that we can create GEM's handle. This is done so, |
412 | * because of the possible races: once you create a handle it becomes |
413 | * immediately visible to user-space, so the latter can try accessing |
414 | * object without pages etc. |
415 | * For details also see drm_gem_handle_create |
416 | */ |
417 | args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8); |
418 | args->size = args->pitch * args->height; |
419 | |
420 | obj = xen_drm_front_gem_create(dev, size: args->size); |
421 | if (IS_ERR(ptr: obj)) { |
422 | ret = PTR_ERR(ptr: obj); |
423 | goto fail; |
424 | } |
425 | |
426 | ret = xen_drm_front_dbuf_create(front_info: drm_info->front_info, |
427 | dbuf_cookie: xen_drm_front_dbuf_to_cookie(gem_obj: obj), |
428 | width: args->width, height: args->height, bpp: args->bpp, |
429 | size: args->size, offset: 0, |
430 | pages: xen_drm_front_gem_get_pages(obj)); |
431 | if (ret) |
432 | goto fail_backend; |
433 | |
434 | /* This is the tail of GEM object creation */ |
435 | ret = drm_gem_handle_create(file_priv: filp, obj, handlep: &args->handle); |
436 | if (ret) |
437 | goto fail_handle; |
438 | |
439 | /* Drop reference from allocate - handle holds it now */ |
440 | drm_gem_object_put(obj); |
441 | return 0; |
442 | |
443 | fail_handle: |
444 | xen_drm_front_dbuf_destroy(front_info: drm_info->front_info, |
445 | dbuf_cookie: xen_drm_front_dbuf_to_cookie(gem_obj: obj)); |
446 | fail_backend: |
447 | /* drop reference from allocate */ |
448 | drm_gem_object_put(obj); |
449 | fail: |
450 | DRM_ERROR("Failed to create dumb buffer: %d\n" , ret); |
451 | return ret; |
452 | } |
453 | |
454 | static void xen_drm_drv_release(struct drm_device *dev) |
455 | { |
456 | struct xen_drm_front_drm_info *drm_info = dev->dev_private; |
457 | struct xen_drm_front_info *front_info = drm_info->front_info; |
458 | |
459 | xen_drm_front_kms_fini(drm_info); |
460 | |
461 | drm_atomic_helper_shutdown(dev); |
462 | drm_mode_config_cleanup(dev); |
463 | |
464 | if (front_info->cfg.be_alloc) |
465 | xenbus_switch_state(dev: front_info->xb_dev, |
466 | new_state: XenbusStateInitialising); |
467 | |
468 | kfree(objp: drm_info); |
469 | } |
470 | |
471 | DEFINE_DRM_GEM_FOPS(xen_drm_dev_fops); |
472 | |
473 | static const struct drm_driver xen_drm_driver = { |
474 | .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, |
475 | .release = xen_drm_drv_release, |
476 | .gem_prime_import_sg_table = xen_drm_front_gem_import_sg_table, |
477 | .dumb_create = xen_drm_drv_dumb_create, |
478 | .fops = &xen_drm_dev_fops, |
479 | .name = "xendrm-du" , |
480 | .desc = "Xen PV DRM Display Unit" , |
481 | .date = "20180221" , |
482 | .major = 1, |
483 | .minor = 0, |
484 | |
485 | }; |
486 | |
487 | static int xen_drm_drv_init(struct xen_drm_front_info *front_info) |
488 | { |
489 | struct device *dev = &front_info->xb_dev->dev; |
490 | struct xen_drm_front_drm_info *drm_info; |
491 | struct drm_device *drm_dev; |
492 | int ret; |
493 | |
494 | if (drm_firmware_drivers_only()) |
495 | return -ENODEV; |
496 | |
497 | DRM_INFO("Creating %s\n" , xen_drm_driver.desc); |
498 | |
499 | drm_info = kzalloc(size: sizeof(*drm_info), GFP_KERNEL); |
500 | if (!drm_info) { |
501 | ret = -ENOMEM; |
502 | goto fail; |
503 | } |
504 | |
505 | drm_info->front_info = front_info; |
506 | front_info->drm_info = drm_info; |
507 | |
508 | drm_dev = drm_dev_alloc(driver: &xen_drm_driver, parent: dev); |
509 | if (IS_ERR(ptr: drm_dev)) { |
510 | ret = PTR_ERR(ptr: drm_dev); |
511 | goto fail_dev; |
512 | } |
513 | |
514 | drm_info->drm_dev = drm_dev; |
515 | |
516 | drm_dev->dev_private = drm_info; |
517 | |
518 | ret = xen_drm_front_kms_init(drm_info); |
519 | if (ret) { |
520 | DRM_ERROR("Failed to initialize DRM/KMS, ret %d\n" , ret); |
521 | goto fail_modeset; |
522 | } |
523 | |
524 | ret = drm_dev_register(dev: drm_dev, flags: 0); |
525 | if (ret) |
526 | goto fail_register; |
527 | |
528 | DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n" , |
529 | xen_drm_driver.name, xen_drm_driver.major, |
530 | xen_drm_driver.minor, xen_drm_driver.patchlevel, |
531 | xen_drm_driver.date, drm_dev->primary->index); |
532 | |
533 | return 0; |
534 | |
535 | fail_register: |
536 | drm_dev_unregister(dev: drm_dev); |
537 | fail_modeset: |
538 | drm_kms_helper_poll_fini(dev: drm_dev); |
539 | drm_mode_config_cleanup(dev: drm_dev); |
540 | drm_dev_put(dev: drm_dev); |
541 | fail_dev: |
542 | kfree(objp: drm_info); |
543 | front_info->drm_info = NULL; |
544 | fail: |
545 | return ret; |
546 | } |
547 | |
548 | static void xen_drm_drv_fini(struct xen_drm_front_info *front_info) |
549 | { |
550 | struct xen_drm_front_drm_info *drm_info = front_info->drm_info; |
551 | struct drm_device *dev; |
552 | |
553 | if (!drm_info) |
554 | return; |
555 | |
556 | dev = drm_info->drm_dev; |
557 | if (!dev) |
558 | return; |
559 | |
560 | /* Nothing to do if device is already unplugged */ |
561 | if (drm_dev_is_unplugged(dev)) |
562 | return; |
563 | |
564 | drm_kms_helper_poll_fini(dev); |
565 | drm_dev_unplug(dev); |
566 | drm_dev_put(dev); |
567 | |
568 | front_info->drm_info = NULL; |
569 | |
570 | xen_drm_front_evtchnl_free_all(front_info); |
571 | dbuf_free_all(dbuf_list: &front_info->dbuf_list); |
572 | |
573 | /* |
574 | * If we are not using backend allocated buffers, then tell the |
575 | * backend we are ready to (re)initialize. Otherwise, wait for |
576 | * drm_driver.release. |
577 | */ |
578 | if (!front_info->cfg.be_alloc) |
579 | xenbus_switch_state(dev: front_info->xb_dev, |
580 | new_state: XenbusStateInitialising); |
581 | } |
582 | |
583 | static int displback_initwait(struct xen_drm_front_info *front_info) |
584 | { |
585 | struct xen_drm_front_cfg *cfg = &front_info->cfg; |
586 | int ret; |
587 | |
588 | cfg->front_info = front_info; |
589 | ret = xen_drm_front_cfg_card(front_info, cfg); |
590 | if (ret < 0) |
591 | return ret; |
592 | |
593 | DRM_INFO("Have %d connector(s)\n" , cfg->num_connectors); |
594 | /* Create event channels for all connectors and publish */ |
595 | ret = xen_drm_front_evtchnl_create_all(front_info); |
596 | if (ret < 0) |
597 | return ret; |
598 | |
599 | return xen_drm_front_evtchnl_publish_all(front_info); |
600 | } |
601 | |
602 | static int displback_connect(struct xen_drm_front_info *front_info) |
603 | { |
604 | xen_drm_front_evtchnl_set_state(front_info, state: EVTCHNL_STATE_CONNECTED); |
605 | return xen_drm_drv_init(front_info); |
606 | } |
607 | |
608 | static void displback_disconnect(struct xen_drm_front_info *front_info) |
609 | { |
610 | if (!front_info->drm_info) |
611 | return; |
612 | |
613 | /* Tell the backend to wait until we release the DRM driver. */ |
614 | xenbus_switch_state(dev: front_info->xb_dev, new_state: XenbusStateReconfiguring); |
615 | |
616 | xen_drm_drv_fini(front_info); |
617 | } |
618 | |
619 | static void displback_changed(struct xenbus_device *xb_dev, |
620 | enum xenbus_state backend_state) |
621 | { |
622 | struct xen_drm_front_info *front_info = dev_get_drvdata(dev: &xb_dev->dev); |
623 | int ret; |
624 | |
625 | DRM_DEBUG("Backend state is %s, front is %s\n" , |
626 | xenbus_strstate(backend_state), |
627 | xenbus_strstate(xb_dev->state)); |
628 | |
629 | switch (backend_state) { |
630 | case XenbusStateReconfiguring: |
631 | case XenbusStateReconfigured: |
632 | case XenbusStateInitialised: |
633 | break; |
634 | |
635 | case XenbusStateInitialising: |
636 | if (xb_dev->state == XenbusStateReconfiguring) |
637 | break; |
638 | |
639 | /* recovering after backend unexpected closure */ |
640 | displback_disconnect(front_info); |
641 | break; |
642 | |
643 | case XenbusStateInitWait: |
644 | if (xb_dev->state == XenbusStateReconfiguring) |
645 | break; |
646 | |
647 | /* recovering after backend unexpected closure */ |
648 | displback_disconnect(front_info); |
649 | if (xb_dev->state != XenbusStateInitialising) |
650 | break; |
651 | |
652 | ret = displback_initwait(front_info); |
653 | if (ret < 0) |
654 | xenbus_dev_fatal(dev: xb_dev, err: ret, fmt: "initializing frontend" ); |
655 | else |
656 | xenbus_switch_state(dev: xb_dev, new_state: XenbusStateInitialised); |
657 | break; |
658 | |
659 | case XenbusStateConnected: |
660 | if (xb_dev->state != XenbusStateInitialised) |
661 | break; |
662 | |
663 | ret = displback_connect(front_info); |
664 | if (ret < 0) { |
665 | displback_disconnect(front_info); |
666 | xenbus_dev_fatal(dev: xb_dev, err: ret, fmt: "connecting backend" ); |
667 | } else { |
668 | xenbus_switch_state(dev: xb_dev, new_state: XenbusStateConnected); |
669 | } |
670 | break; |
671 | |
672 | case XenbusStateClosing: |
673 | /* |
674 | * in this state backend starts freeing resources, |
675 | * so let it go into closed state, so we can also |
676 | * remove ours |
677 | */ |
678 | break; |
679 | |
680 | case XenbusStateUnknown: |
681 | case XenbusStateClosed: |
682 | if (xb_dev->state == XenbusStateClosed) |
683 | break; |
684 | |
685 | displback_disconnect(front_info); |
686 | break; |
687 | } |
688 | } |
689 | |
690 | static int xen_drv_probe(struct xenbus_device *xb_dev, |
691 | const struct xenbus_device_id *id) |
692 | { |
693 | struct xen_drm_front_info *front_info; |
694 | struct device *dev = &xb_dev->dev; |
695 | int ret; |
696 | |
697 | ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64)); |
698 | if (ret < 0) { |
699 | DRM_ERROR("Cannot setup DMA mask, ret %d" , ret); |
700 | return ret; |
701 | } |
702 | |
703 | front_info = devm_kzalloc(dev: &xb_dev->dev, |
704 | size: sizeof(*front_info), GFP_KERNEL); |
705 | if (!front_info) |
706 | return -ENOMEM; |
707 | |
708 | front_info->xb_dev = xb_dev; |
709 | spin_lock_init(&front_info->io_lock); |
710 | INIT_LIST_HEAD(list: &front_info->dbuf_list); |
711 | dev_set_drvdata(dev: &xb_dev->dev, data: front_info); |
712 | |
713 | return xenbus_switch_state(dev: xb_dev, new_state: XenbusStateInitialising); |
714 | } |
715 | |
716 | static void xen_drv_remove(struct xenbus_device *dev) |
717 | { |
718 | struct xen_drm_front_info *front_info = dev_get_drvdata(dev: &dev->dev); |
719 | int to = 100; |
720 | |
721 | xenbus_switch_state(dev, new_state: XenbusStateClosing); |
722 | |
723 | /* |
724 | * On driver removal it is disconnected from XenBus, |
725 | * so no backend state change events come via .otherend_changed |
726 | * callback. This prevents us from exiting gracefully, e.g. |
727 | * signaling the backend to free event channels, waiting for its |
728 | * state to change to XenbusStateClosed and cleaning at our end. |
729 | * Normally when front driver removed backend will finally go into |
730 | * XenbusStateInitWait state. |
731 | * |
732 | * Workaround: read backend's state manually and wait with time-out. |
733 | */ |
734 | while ((xenbus_read_unsigned(dir: front_info->xb_dev->otherend, node: "state" , |
735 | default_val: XenbusStateUnknown) != XenbusStateInitWait) && |
736 | --to) |
737 | msleep(msecs: 10); |
738 | |
739 | if (!to) { |
740 | unsigned int state; |
741 | |
742 | state = xenbus_read_unsigned(dir: front_info->xb_dev->otherend, |
743 | node: "state" , default_val: XenbusStateUnknown); |
744 | DRM_ERROR("Backend state is %s while removing driver\n" , |
745 | xenbus_strstate(state)); |
746 | } |
747 | |
748 | xen_drm_drv_fini(front_info); |
749 | xenbus_frontend_closed(dev); |
750 | } |
751 | |
752 | static const struct xenbus_device_id xen_driver_ids[] = { |
753 | { XENDISPL_DRIVER_NAME }, |
754 | { "" } |
755 | }; |
756 | |
757 | static struct xenbus_driver xen_driver = { |
758 | .ids = xen_driver_ids, |
759 | .probe = xen_drv_probe, |
760 | .remove = xen_drv_remove, |
761 | .otherend_changed = displback_changed, |
762 | .not_essential = true, |
763 | }; |
764 | |
765 | static int __init xen_drv_init(void) |
766 | { |
767 | /* At the moment we only support case with XEN_PAGE_SIZE == PAGE_SIZE */ |
768 | if (XEN_PAGE_SIZE != PAGE_SIZE) { |
769 | DRM_ERROR(XENDISPL_DRIVER_NAME ": different kernel and Xen page sizes are not supported: XEN_PAGE_SIZE (%lu) != PAGE_SIZE (%lu)\n" , |
770 | XEN_PAGE_SIZE, PAGE_SIZE); |
771 | return -ENODEV; |
772 | } |
773 | |
774 | if (!xen_domain()) |
775 | return -ENODEV; |
776 | |
777 | if (!xen_has_pv_devices()) |
778 | return -ENODEV; |
779 | |
780 | DRM_INFO("Registering XEN PV " XENDISPL_DRIVER_NAME "\n" ); |
781 | return xenbus_register_frontend(&xen_driver); |
782 | } |
783 | |
784 | static void __exit xen_drv_fini(void) |
785 | { |
786 | DRM_INFO("Unregistering XEN PV " XENDISPL_DRIVER_NAME "\n" ); |
787 | xenbus_unregister_driver(drv: &xen_driver); |
788 | } |
789 | |
790 | module_init(xen_drv_init); |
791 | module_exit(xen_drv_fini); |
792 | |
793 | MODULE_DESCRIPTION("Xen para-virtualized display device frontend" ); |
794 | MODULE_LICENSE("GPL" ); |
795 | MODULE_ALIAS("xen:" XENDISPL_DRIVER_NAME); |
796 | |