1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * (c) 2017 Stefano Stabellini <stefano@aporeto.com> |
4 | */ |
5 | |
6 | #include <linux/inet.h> |
7 | #include <linux/kthread.h> |
8 | #include <linux/list.h> |
9 | #include <linux/radix-tree.h> |
10 | #include <linux/module.h> |
11 | #include <linux/semaphore.h> |
12 | #include <linux/wait.h> |
13 | #include <net/sock.h> |
14 | #include <net/inet_common.h> |
15 | #include <net/inet_connection_sock.h> |
16 | #include <net/request_sock.h> |
17 | #include <trace/events/sock.h> |
18 | |
19 | #include <xen/events.h> |
20 | #include <xen/grant_table.h> |
21 | #include <xen/xen.h> |
22 | #include <xen/xenbus.h> |
23 | #include <xen/interface/io/pvcalls.h> |
24 | |
25 | #define PVCALLS_VERSIONS "1" |
26 | #define MAX_RING_ORDER XENBUS_MAX_RING_GRANT_ORDER |
27 | |
28 | static struct pvcalls_back_global { |
29 | struct list_head frontends; |
30 | struct semaphore frontends_lock; |
31 | } pvcalls_back_global; |
32 | |
33 | /* |
34 | * Per-frontend data structure. It contains pointers to the command |
35 | * ring, its event channel, a list of active sockets and a tree of |
36 | * passive sockets. |
37 | */ |
38 | struct pvcalls_fedata { |
39 | struct list_head list; |
40 | struct xenbus_device *dev; |
41 | struct xen_pvcalls_sring *sring; |
42 | struct xen_pvcalls_back_ring ring; |
43 | int irq; |
44 | struct list_head socket_mappings; |
45 | struct radix_tree_root socketpass_mappings; |
46 | struct semaphore socket_lock; |
47 | }; |
48 | |
49 | struct pvcalls_ioworker { |
50 | struct work_struct register_work; |
51 | struct workqueue_struct *wq; |
52 | }; |
53 | |
54 | struct sock_mapping { |
55 | struct list_head list; |
56 | struct pvcalls_fedata *fedata; |
57 | struct sockpass_mapping *sockpass; |
58 | struct socket *sock; |
59 | uint64_t id; |
60 | grant_ref_t ref; |
61 | struct pvcalls_data_intf *ring; |
62 | void *bytes; |
63 | struct pvcalls_data data; |
64 | uint32_t ring_order; |
65 | int irq; |
66 | atomic_t read; |
67 | atomic_t write; |
68 | atomic_t io; |
69 | atomic_t release; |
70 | atomic_t eoi; |
71 | void (*saved_data_ready)(struct sock *sk); |
72 | struct pvcalls_ioworker ioworker; |
73 | }; |
74 | |
75 | struct sockpass_mapping { |
76 | struct list_head list; |
77 | struct pvcalls_fedata *fedata; |
78 | struct socket *sock; |
79 | uint64_t id; |
80 | struct xen_pvcalls_request reqcopy; |
81 | spinlock_t copy_lock; |
82 | struct workqueue_struct *wq; |
83 | struct work_struct register_work; |
84 | void (*saved_data_ready)(struct sock *sk); |
85 | }; |
86 | |
87 | static irqreturn_t pvcalls_back_conn_event(int irq, void *sock_map); |
88 | static int pvcalls_back_release_active(struct xenbus_device *dev, |
89 | struct pvcalls_fedata *fedata, |
90 | struct sock_mapping *map); |
91 | |
92 | static bool pvcalls_conn_back_read(void *opaque) |
93 | { |
94 | struct sock_mapping *map = (struct sock_mapping *)opaque; |
95 | struct msghdr msg; |
96 | struct kvec vec[2]; |
97 | RING_IDX cons, prod, size, wanted, array_size, masked_prod, masked_cons; |
98 | int32_t error; |
99 | struct pvcalls_data_intf *intf = map->ring; |
100 | struct pvcalls_data *data = &map->data; |
101 | unsigned long flags; |
102 | int ret; |
103 | |
104 | array_size = XEN_FLEX_RING_SIZE(map->ring_order); |
105 | cons = intf->in_cons; |
106 | prod = intf->in_prod; |
107 | error = intf->in_error; |
108 | /* read the indexes first, then deal with the data */ |
109 | virt_mb(); |
110 | |
111 | if (error) |
112 | return false; |
113 | |
114 | size = pvcalls_queued(prod, cons, ring_size: array_size); |
115 | if (size >= array_size) |
116 | return false; |
117 | spin_lock_irqsave(&map->sock->sk->sk_receive_queue.lock, flags); |
118 | if (skb_queue_empty(list: &map->sock->sk->sk_receive_queue)) { |
119 | atomic_set(v: &map->read, i: 0); |
120 | spin_unlock_irqrestore(lock: &map->sock->sk->sk_receive_queue.lock, |
121 | flags); |
122 | return true; |
123 | } |
124 | spin_unlock_irqrestore(lock: &map->sock->sk->sk_receive_queue.lock, flags); |
125 | wanted = array_size - size; |
126 | masked_prod = pvcalls_mask(idx: prod, ring_size: array_size); |
127 | masked_cons = pvcalls_mask(idx: cons, ring_size: array_size); |
128 | |
129 | memset(&msg, 0, sizeof(msg)); |
130 | if (masked_prod < masked_cons) { |
131 | vec[0].iov_base = data->in + masked_prod; |
132 | vec[0].iov_len = wanted; |
133 | iov_iter_kvec(i: &msg.msg_iter, ITER_DEST, kvec: vec, nr_segs: 1, count: wanted); |
134 | } else { |
135 | vec[0].iov_base = data->in + masked_prod; |
136 | vec[0].iov_len = array_size - masked_prod; |
137 | vec[1].iov_base = data->in; |
138 | vec[1].iov_len = wanted - vec[0].iov_len; |
139 | iov_iter_kvec(i: &msg.msg_iter, ITER_DEST, kvec: vec, nr_segs: 2, count: wanted); |
140 | } |
141 | |
142 | atomic_set(v: &map->read, i: 0); |
143 | ret = inet_recvmsg(sock: map->sock, msg: &msg, size: wanted, MSG_DONTWAIT); |
144 | WARN_ON(ret > wanted); |
145 | if (ret == -EAGAIN) /* shouldn't happen */ |
146 | return true; |
147 | if (!ret) |
148 | ret = -ENOTCONN; |
149 | spin_lock_irqsave(&map->sock->sk->sk_receive_queue.lock, flags); |
150 | if (ret > 0 && !skb_queue_empty(list: &map->sock->sk->sk_receive_queue)) |
151 | atomic_inc(v: &map->read); |
152 | spin_unlock_irqrestore(lock: &map->sock->sk->sk_receive_queue.lock, flags); |
153 | |
154 | /* write the data, then modify the indexes */ |
155 | virt_wmb(); |
156 | if (ret < 0) { |
157 | atomic_set(v: &map->read, i: 0); |
158 | intf->in_error = ret; |
159 | } else |
160 | intf->in_prod = prod + ret; |
161 | /* update the indexes, then notify the other end */ |
162 | virt_wmb(); |
163 | notify_remote_via_irq(irq: map->irq); |
164 | |
165 | return true; |
166 | } |
167 | |
168 | static bool pvcalls_conn_back_write(struct sock_mapping *map) |
169 | { |
170 | struct pvcalls_data_intf *intf = map->ring; |
171 | struct pvcalls_data *data = &map->data; |
172 | struct msghdr msg; |
173 | struct kvec vec[2]; |
174 | RING_IDX cons, prod, size, array_size; |
175 | int ret; |
176 | |
177 | atomic_set(v: &map->write, i: 0); |
178 | |
179 | cons = intf->out_cons; |
180 | prod = intf->out_prod; |
181 | /* read the indexes before dealing with the data */ |
182 | virt_mb(); |
183 | |
184 | array_size = XEN_FLEX_RING_SIZE(map->ring_order); |
185 | size = pvcalls_queued(prod, cons, ring_size: array_size); |
186 | if (size == 0) |
187 | return false; |
188 | |
189 | memset(&msg, 0, sizeof(msg)); |
190 | msg.msg_flags |= MSG_DONTWAIT; |
191 | if (pvcalls_mask(idx: prod, ring_size: array_size) > pvcalls_mask(idx: cons, ring_size: array_size)) { |
192 | vec[0].iov_base = data->out + pvcalls_mask(idx: cons, ring_size: array_size); |
193 | vec[0].iov_len = size; |
194 | iov_iter_kvec(i: &msg.msg_iter, ITER_SOURCE, kvec: vec, nr_segs: 1, count: size); |
195 | } else { |
196 | vec[0].iov_base = data->out + pvcalls_mask(idx: cons, ring_size: array_size); |
197 | vec[0].iov_len = array_size - pvcalls_mask(idx: cons, ring_size: array_size); |
198 | vec[1].iov_base = data->out; |
199 | vec[1].iov_len = size - vec[0].iov_len; |
200 | iov_iter_kvec(i: &msg.msg_iter, ITER_SOURCE, kvec: vec, nr_segs: 2, count: size); |
201 | } |
202 | |
203 | ret = inet_sendmsg(sock: map->sock, msg: &msg, size); |
204 | if (ret == -EAGAIN) { |
205 | atomic_inc(v: &map->write); |
206 | atomic_inc(v: &map->io); |
207 | return true; |
208 | } |
209 | |
210 | /* write the data, then update the indexes */ |
211 | virt_wmb(); |
212 | if (ret < 0) { |
213 | intf->out_error = ret; |
214 | } else { |
215 | intf->out_error = 0; |
216 | intf->out_cons = cons + ret; |
217 | prod = intf->out_prod; |
218 | } |
219 | /* update the indexes, then notify the other end */ |
220 | virt_wmb(); |
221 | if (prod != cons + ret) { |
222 | atomic_inc(v: &map->write); |
223 | atomic_inc(v: &map->io); |
224 | } |
225 | notify_remote_via_irq(irq: map->irq); |
226 | |
227 | return true; |
228 | } |
229 | |
230 | static void pvcalls_back_ioworker(struct work_struct *work) |
231 | { |
232 | struct pvcalls_ioworker *ioworker = container_of(work, |
233 | struct pvcalls_ioworker, register_work); |
234 | struct sock_mapping *map = container_of(ioworker, struct sock_mapping, |
235 | ioworker); |
236 | unsigned int eoi_flags = XEN_EOI_FLAG_SPURIOUS; |
237 | |
238 | while (atomic_read(v: &map->io) > 0) { |
239 | if (atomic_read(v: &map->release) > 0) { |
240 | atomic_set(v: &map->release, i: 0); |
241 | return; |
242 | } |
243 | |
244 | if (atomic_read(v: &map->read) > 0 && |
245 | pvcalls_conn_back_read(opaque: map)) |
246 | eoi_flags = 0; |
247 | if (atomic_read(v: &map->write) > 0 && |
248 | pvcalls_conn_back_write(map)) |
249 | eoi_flags = 0; |
250 | |
251 | if (atomic_read(v: &map->eoi) > 0 && !atomic_read(v: &map->write)) { |
252 | atomic_set(v: &map->eoi, i: 0); |
253 | xen_irq_lateeoi(irq: map->irq, eoi_flags); |
254 | eoi_flags = XEN_EOI_FLAG_SPURIOUS; |
255 | } |
256 | |
257 | atomic_dec(v: &map->io); |
258 | } |
259 | } |
260 | |
261 | static int pvcalls_back_socket(struct xenbus_device *dev, |
262 | struct xen_pvcalls_request *req) |
263 | { |
264 | struct pvcalls_fedata *fedata; |
265 | int ret; |
266 | struct xen_pvcalls_response *rsp; |
267 | |
268 | fedata = dev_get_drvdata(dev: &dev->dev); |
269 | |
270 | if (req->u.socket.domain != AF_INET || |
271 | req->u.socket.type != SOCK_STREAM || |
272 | (req->u.socket.protocol != IPPROTO_IP && |
273 | req->u.socket.protocol != AF_INET)) |
274 | ret = -EAFNOSUPPORT; |
275 | else |
276 | ret = 0; |
277 | |
278 | /* leave the actual socket allocation for later */ |
279 | |
280 | rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++); |
281 | rsp->req_id = req->req_id; |
282 | rsp->cmd = req->cmd; |
283 | rsp->u.socket.id = req->u.socket.id; |
284 | rsp->ret = ret; |
285 | |
286 | return 0; |
287 | } |
288 | |
289 | static void pvcalls_sk_state_change(struct sock *sock) |
290 | { |
291 | struct sock_mapping *map = sock->sk_user_data; |
292 | |
293 | if (map == NULL) |
294 | return; |
295 | |
296 | atomic_inc(v: &map->read); |
297 | notify_remote_via_irq(irq: map->irq); |
298 | } |
299 | |
300 | static void pvcalls_sk_data_ready(struct sock *sock) |
301 | { |
302 | struct sock_mapping *map = sock->sk_user_data; |
303 | struct pvcalls_ioworker *iow; |
304 | |
305 | trace_sk_data_ready(sk: sock); |
306 | |
307 | if (map == NULL) |
308 | return; |
309 | |
310 | iow = &map->ioworker; |
311 | atomic_inc(v: &map->read); |
312 | atomic_inc(v: &map->io); |
313 | queue_work(wq: iow->wq, work: &iow->register_work); |
314 | } |
315 | |
316 | static struct sock_mapping *pvcalls_new_active_socket( |
317 | struct pvcalls_fedata *fedata, |
318 | uint64_t id, |
319 | grant_ref_t ref, |
320 | evtchn_port_t evtchn, |
321 | struct socket *sock) |
322 | { |
323 | int ret; |
324 | struct sock_mapping *map; |
325 | void *page; |
326 | |
327 | map = kzalloc(size: sizeof(*map), GFP_KERNEL); |
328 | if (map == NULL) { |
329 | sock_release(sock); |
330 | return NULL; |
331 | } |
332 | |
333 | map->fedata = fedata; |
334 | map->sock = sock; |
335 | map->id = id; |
336 | map->ref = ref; |
337 | |
338 | ret = xenbus_map_ring_valloc(dev: fedata->dev, gnt_refs: &ref, nr_grefs: 1, vaddr: &page); |
339 | if (ret < 0) |
340 | goto out; |
341 | map->ring = page; |
342 | map->ring_order = map->ring->ring_order; |
343 | /* first read the order, then map the data ring */ |
344 | virt_rmb(); |
345 | if (map->ring_order > MAX_RING_ORDER) { |
346 | pr_warn("%s frontend requested ring_order %u, which is > MAX (%u)\n" , |
347 | __func__, map->ring_order, MAX_RING_ORDER); |
348 | goto out; |
349 | } |
350 | ret = xenbus_map_ring_valloc(dev: fedata->dev, gnt_refs: map->ring->ref, |
351 | nr_grefs: (1 << map->ring_order), vaddr: &page); |
352 | if (ret < 0) |
353 | goto out; |
354 | map->bytes = page; |
355 | |
356 | ret = bind_interdomain_evtchn_to_irqhandler_lateeoi( |
357 | dev: fedata->dev, remote_port: evtchn, |
358 | handler: pvcalls_back_conn_event, irqflags: 0, devname: "pvcalls-backend" , dev_id: map); |
359 | if (ret < 0) |
360 | goto out; |
361 | map->irq = ret; |
362 | |
363 | map->data.in = map->bytes; |
364 | map->data.out = map->bytes + XEN_FLEX_RING_SIZE(map->ring_order); |
365 | |
366 | map->ioworker.wq = alloc_ordered_workqueue("pvcalls_io" , 0); |
367 | if (!map->ioworker.wq) |
368 | goto out; |
369 | atomic_set(v: &map->io, i: 1); |
370 | INIT_WORK(&map->ioworker.register_work, pvcalls_back_ioworker); |
371 | |
372 | down(sem: &fedata->socket_lock); |
373 | list_add_tail(new: &map->list, head: &fedata->socket_mappings); |
374 | up(sem: &fedata->socket_lock); |
375 | |
376 | write_lock_bh(&map->sock->sk->sk_callback_lock); |
377 | map->saved_data_ready = map->sock->sk->sk_data_ready; |
378 | map->sock->sk->sk_user_data = map; |
379 | map->sock->sk->sk_data_ready = pvcalls_sk_data_ready; |
380 | map->sock->sk->sk_state_change = pvcalls_sk_state_change; |
381 | write_unlock_bh(&map->sock->sk->sk_callback_lock); |
382 | |
383 | return map; |
384 | out: |
385 | down(sem: &fedata->socket_lock); |
386 | list_del(entry: &map->list); |
387 | pvcalls_back_release_active(dev: fedata->dev, fedata, map); |
388 | up(sem: &fedata->socket_lock); |
389 | return NULL; |
390 | } |
391 | |
392 | static int pvcalls_back_connect(struct xenbus_device *dev, |
393 | struct xen_pvcalls_request *req) |
394 | { |
395 | struct pvcalls_fedata *fedata; |
396 | int ret = -EINVAL; |
397 | struct socket *sock; |
398 | struct sock_mapping *map; |
399 | struct xen_pvcalls_response *rsp; |
400 | struct sockaddr *sa = (struct sockaddr *)&req->u.connect.addr; |
401 | |
402 | fedata = dev_get_drvdata(dev: &dev->dev); |
403 | |
404 | if (req->u.connect.len < sizeof(sa->sa_family) || |
405 | req->u.connect.len > sizeof(req->u.connect.addr) || |
406 | sa->sa_family != AF_INET) |
407 | goto out; |
408 | |
409 | ret = sock_create(AF_INET, type: SOCK_STREAM, proto: 0, res: &sock); |
410 | if (ret < 0) |
411 | goto out; |
412 | ret = inet_stream_connect(sock, uaddr: sa, addr_len: req->u.connect.len, flags: 0); |
413 | if (ret < 0) { |
414 | sock_release(sock); |
415 | goto out; |
416 | } |
417 | |
418 | map = pvcalls_new_active_socket(fedata, |
419 | id: req->u.connect.id, |
420 | ref: req->u.connect.ref, |
421 | evtchn: req->u.connect.evtchn, |
422 | sock); |
423 | if (!map) |
424 | ret = -EFAULT; |
425 | |
426 | out: |
427 | rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++); |
428 | rsp->req_id = req->req_id; |
429 | rsp->cmd = req->cmd; |
430 | rsp->u.connect.id = req->u.connect.id; |
431 | rsp->ret = ret; |
432 | |
433 | return 0; |
434 | } |
435 | |
436 | static int pvcalls_back_release_active(struct xenbus_device *dev, |
437 | struct pvcalls_fedata *fedata, |
438 | struct sock_mapping *map) |
439 | { |
440 | disable_irq(irq: map->irq); |
441 | if (map->sock->sk != NULL) { |
442 | write_lock_bh(&map->sock->sk->sk_callback_lock); |
443 | map->sock->sk->sk_user_data = NULL; |
444 | map->sock->sk->sk_data_ready = map->saved_data_ready; |
445 | write_unlock_bh(&map->sock->sk->sk_callback_lock); |
446 | } |
447 | |
448 | atomic_set(v: &map->release, i: 1); |
449 | flush_work(work: &map->ioworker.register_work); |
450 | |
451 | xenbus_unmap_ring_vfree(dev, vaddr: map->bytes); |
452 | xenbus_unmap_ring_vfree(dev, vaddr: (void *)map->ring); |
453 | unbind_from_irqhandler(irq: map->irq, dev_id: map); |
454 | |
455 | sock_release(sock: map->sock); |
456 | kfree(objp: map); |
457 | |
458 | return 0; |
459 | } |
460 | |
461 | static int pvcalls_back_release_passive(struct xenbus_device *dev, |
462 | struct pvcalls_fedata *fedata, |
463 | struct sockpass_mapping *mappass) |
464 | { |
465 | if (mappass->sock->sk != NULL) { |
466 | write_lock_bh(&mappass->sock->sk->sk_callback_lock); |
467 | mappass->sock->sk->sk_user_data = NULL; |
468 | mappass->sock->sk->sk_data_ready = mappass->saved_data_ready; |
469 | write_unlock_bh(&mappass->sock->sk->sk_callback_lock); |
470 | } |
471 | sock_release(sock: mappass->sock); |
472 | destroy_workqueue(wq: mappass->wq); |
473 | kfree(objp: mappass); |
474 | |
475 | return 0; |
476 | } |
477 | |
478 | static int pvcalls_back_release(struct xenbus_device *dev, |
479 | struct xen_pvcalls_request *req) |
480 | { |
481 | struct pvcalls_fedata *fedata; |
482 | struct sock_mapping *map, *n; |
483 | struct sockpass_mapping *mappass; |
484 | int ret = 0; |
485 | struct xen_pvcalls_response *rsp; |
486 | |
487 | fedata = dev_get_drvdata(dev: &dev->dev); |
488 | |
489 | down(sem: &fedata->socket_lock); |
490 | list_for_each_entry_safe(map, n, &fedata->socket_mappings, list) { |
491 | if (map->id == req->u.release.id) { |
492 | list_del(entry: &map->list); |
493 | up(sem: &fedata->socket_lock); |
494 | ret = pvcalls_back_release_active(dev, fedata, map); |
495 | goto out; |
496 | } |
497 | } |
498 | mappass = radix_tree_lookup(&fedata->socketpass_mappings, |
499 | req->u.release.id); |
500 | if (mappass != NULL) { |
501 | radix_tree_delete(&fedata->socketpass_mappings, mappass->id); |
502 | up(sem: &fedata->socket_lock); |
503 | ret = pvcalls_back_release_passive(dev, fedata, mappass); |
504 | } else |
505 | up(sem: &fedata->socket_lock); |
506 | |
507 | out: |
508 | rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++); |
509 | rsp->req_id = req->req_id; |
510 | rsp->u.release.id = req->u.release.id; |
511 | rsp->cmd = req->cmd; |
512 | rsp->ret = ret; |
513 | return 0; |
514 | } |
515 | |
516 | static void __pvcalls_back_accept(struct work_struct *work) |
517 | { |
518 | struct sockpass_mapping *mappass = container_of( |
519 | work, struct sockpass_mapping, register_work); |
520 | struct sock_mapping *map; |
521 | struct pvcalls_ioworker *iow; |
522 | struct pvcalls_fedata *fedata; |
523 | struct socket *sock; |
524 | struct xen_pvcalls_response *rsp; |
525 | struct xen_pvcalls_request *req; |
526 | int notify; |
527 | int ret = -EINVAL; |
528 | unsigned long flags; |
529 | |
530 | fedata = mappass->fedata; |
531 | /* |
532 | * __pvcalls_back_accept can race against pvcalls_back_accept. |
533 | * We only need to check the value of "cmd" on read. It could be |
534 | * done atomically, but to simplify the code on the write side, we |
535 | * use a spinlock. |
536 | */ |
537 | spin_lock_irqsave(&mappass->copy_lock, flags); |
538 | req = &mappass->reqcopy; |
539 | if (req->cmd != PVCALLS_ACCEPT) { |
540 | spin_unlock_irqrestore(lock: &mappass->copy_lock, flags); |
541 | return; |
542 | } |
543 | spin_unlock_irqrestore(lock: &mappass->copy_lock, flags); |
544 | |
545 | sock = sock_alloc(); |
546 | if (sock == NULL) |
547 | goto out_error; |
548 | sock->type = mappass->sock->type; |
549 | sock->ops = mappass->sock->ops; |
550 | |
551 | ret = inet_accept(sock: mappass->sock, newsock: sock, O_NONBLOCK, kern: true); |
552 | if (ret == -EAGAIN) { |
553 | sock_release(sock); |
554 | return; |
555 | } |
556 | |
557 | map = pvcalls_new_active_socket(fedata, |
558 | id: req->u.accept.id_new, |
559 | ref: req->u.accept.ref, |
560 | evtchn: req->u.accept.evtchn, |
561 | sock); |
562 | if (!map) { |
563 | ret = -EFAULT; |
564 | goto out_error; |
565 | } |
566 | |
567 | map->sockpass = mappass; |
568 | iow = &map->ioworker; |
569 | atomic_inc(v: &map->read); |
570 | atomic_inc(v: &map->io); |
571 | queue_work(wq: iow->wq, work: &iow->register_work); |
572 | |
573 | out_error: |
574 | rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++); |
575 | rsp->req_id = req->req_id; |
576 | rsp->cmd = req->cmd; |
577 | rsp->u.accept.id = req->u.accept.id; |
578 | rsp->ret = ret; |
579 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&fedata->ring, notify); |
580 | if (notify) |
581 | notify_remote_via_irq(irq: fedata->irq); |
582 | |
583 | mappass->reqcopy.cmd = 0; |
584 | } |
585 | |
586 | static void pvcalls_pass_sk_data_ready(struct sock *sock) |
587 | { |
588 | struct sockpass_mapping *mappass = sock->sk_user_data; |
589 | struct pvcalls_fedata *fedata; |
590 | struct xen_pvcalls_response *rsp; |
591 | unsigned long flags; |
592 | int notify; |
593 | |
594 | trace_sk_data_ready(sk: sock); |
595 | |
596 | if (mappass == NULL) |
597 | return; |
598 | |
599 | fedata = mappass->fedata; |
600 | spin_lock_irqsave(&mappass->copy_lock, flags); |
601 | if (mappass->reqcopy.cmd == PVCALLS_POLL) { |
602 | rsp = RING_GET_RESPONSE(&fedata->ring, |
603 | fedata->ring.rsp_prod_pvt++); |
604 | rsp->req_id = mappass->reqcopy.req_id; |
605 | rsp->u.poll.id = mappass->reqcopy.u.poll.id; |
606 | rsp->cmd = mappass->reqcopy.cmd; |
607 | rsp->ret = 0; |
608 | |
609 | mappass->reqcopy.cmd = 0; |
610 | spin_unlock_irqrestore(lock: &mappass->copy_lock, flags); |
611 | |
612 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&fedata->ring, notify); |
613 | if (notify) |
614 | notify_remote_via_irq(irq: mappass->fedata->irq); |
615 | } else { |
616 | spin_unlock_irqrestore(lock: &mappass->copy_lock, flags); |
617 | queue_work(wq: mappass->wq, work: &mappass->register_work); |
618 | } |
619 | } |
620 | |
621 | static int pvcalls_back_bind(struct xenbus_device *dev, |
622 | struct xen_pvcalls_request *req) |
623 | { |
624 | struct pvcalls_fedata *fedata; |
625 | int ret; |
626 | struct sockpass_mapping *map; |
627 | struct xen_pvcalls_response *rsp; |
628 | |
629 | fedata = dev_get_drvdata(dev: &dev->dev); |
630 | |
631 | map = kzalloc(size: sizeof(*map), GFP_KERNEL); |
632 | if (map == NULL) { |
633 | ret = -ENOMEM; |
634 | goto out; |
635 | } |
636 | |
637 | INIT_WORK(&map->register_work, __pvcalls_back_accept); |
638 | spin_lock_init(&map->copy_lock); |
639 | map->wq = alloc_ordered_workqueue("pvcalls_wq" , 0); |
640 | if (!map->wq) { |
641 | ret = -ENOMEM; |
642 | goto out; |
643 | } |
644 | |
645 | ret = sock_create(AF_INET, type: SOCK_STREAM, proto: 0, res: &map->sock); |
646 | if (ret < 0) |
647 | goto out; |
648 | |
649 | ret = inet_bind(sock: map->sock, uaddr: (struct sockaddr *)&req->u.bind.addr, |
650 | addr_len: req->u.bind.len); |
651 | if (ret < 0) |
652 | goto out; |
653 | |
654 | map->fedata = fedata; |
655 | map->id = req->u.bind.id; |
656 | |
657 | down(sem: &fedata->socket_lock); |
658 | ret = radix_tree_insert(&fedata->socketpass_mappings, index: map->id, |
659 | map); |
660 | up(sem: &fedata->socket_lock); |
661 | if (ret) |
662 | goto out; |
663 | |
664 | write_lock_bh(&map->sock->sk->sk_callback_lock); |
665 | map->saved_data_ready = map->sock->sk->sk_data_ready; |
666 | map->sock->sk->sk_user_data = map; |
667 | map->sock->sk->sk_data_ready = pvcalls_pass_sk_data_ready; |
668 | write_unlock_bh(&map->sock->sk->sk_callback_lock); |
669 | |
670 | out: |
671 | if (ret) { |
672 | if (map && map->sock) |
673 | sock_release(sock: map->sock); |
674 | if (map && map->wq) |
675 | destroy_workqueue(wq: map->wq); |
676 | kfree(objp: map); |
677 | } |
678 | rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++); |
679 | rsp->req_id = req->req_id; |
680 | rsp->cmd = req->cmd; |
681 | rsp->u.bind.id = req->u.bind.id; |
682 | rsp->ret = ret; |
683 | return 0; |
684 | } |
685 | |
686 | static int pvcalls_back_listen(struct xenbus_device *dev, |
687 | struct xen_pvcalls_request *req) |
688 | { |
689 | struct pvcalls_fedata *fedata; |
690 | int ret = -EINVAL; |
691 | struct sockpass_mapping *map; |
692 | struct xen_pvcalls_response *rsp; |
693 | |
694 | fedata = dev_get_drvdata(dev: &dev->dev); |
695 | |
696 | down(sem: &fedata->socket_lock); |
697 | map = radix_tree_lookup(&fedata->socketpass_mappings, req->u.listen.id); |
698 | up(sem: &fedata->socket_lock); |
699 | if (map == NULL) |
700 | goto out; |
701 | |
702 | ret = inet_listen(sock: map->sock, backlog: req->u.listen.backlog); |
703 | |
704 | out: |
705 | rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++); |
706 | rsp->req_id = req->req_id; |
707 | rsp->cmd = req->cmd; |
708 | rsp->u.listen.id = req->u.listen.id; |
709 | rsp->ret = ret; |
710 | return 0; |
711 | } |
712 | |
713 | static int pvcalls_back_accept(struct xenbus_device *dev, |
714 | struct xen_pvcalls_request *req) |
715 | { |
716 | struct pvcalls_fedata *fedata; |
717 | struct sockpass_mapping *mappass; |
718 | int ret = -EINVAL; |
719 | struct xen_pvcalls_response *rsp; |
720 | unsigned long flags; |
721 | |
722 | fedata = dev_get_drvdata(dev: &dev->dev); |
723 | |
724 | down(sem: &fedata->socket_lock); |
725 | mappass = radix_tree_lookup(&fedata->socketpass_mappings, |
726 | req->u.accept.id); |
727 | up(sem: &fedata->socket_lock); |
728 | if (mappass == NULL) |
729 | goto out_error; |
730 | |
731 | /* |
732 | * Limitation of the current implementation: only support one |
733 | * concurrent accept or poll call on one socket. |
734 | */ |
735 | spin_lock_irqsave(&mappass->copy_lock, flags); |
736 | if (mappass->reqcopy.cmd != 0) { |
737 | spin_unlock_irqrestore(lock: &mappass->copy_lock, flags); |
738 | ret = -EINTR; |
739 | goto out_error; |
740 | } |
741 | |
742 | mappass->reqcopy = *req; |
743 | spin_unlock_irqrestore(lock: &mappass->copy_lock, flags); |
744 | queue_work(wq: mappass->wq, work: &mappass->register_work); |
745 | |
746 | /* Tell the caller we don't need to send back a notification yet */ |
747 | return -1; |
748 | |
749 | out_error: |
750 | rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++); |
751 | rsp->req_id = req->req_id; |
752 | rsp->cmd = req->cmd; |
753 | rsp->u.accept.id = req->u.accept.id; |
754 | rsp->ret = ret; |
755 | return 0; |
756 | } |
757 | |
758 | static int pvcalls_back_poll(struct xenbus_device *dev, |
759 | struct xen_pvcalls_request *req) |
760 | { |
761 | struct pvcalls_fedata *fedata; |
762 | struct sockpass_mapping *mappass; |
763 | struct xen_pvcalls_response *rsp; |
764 | struct inet_connection_sock *icsk; |
765 | struct request_sock_queue *queue; |
766 | unsigned long flags; |
767 | int ret; |
768 | bool data; |
769 | |
770 | fedata = dev_get_drvdata(dev: &dev->dev); |
771 | |
772 | down(sem: &fedata->socket_lock); |
773 | mappass = radix_tree_lookup(&fedata->socketpass_mappings, |
774 | req->u.poll.id); |
775 | up(sem: &fedata->socket_lock); |
776 | if (mappass == NULL) |
777 | return -EINVAL; |
778 | |
779 | /* |
780 | * Limitation of the current implementation: only support one |
781 | * concurrent accept or poll call on one socket. |
782 | */ |
783 | spin_lock_irqsave(&mappass->copy_lock, flags); |
784 | if (mappass->reqcopy.cmd != 0) { |
785 | ret = -EINTR; |
786 | goto out; |
787 | } |
788 | |
789 | mappass->reqcopy = *req; |
790 | icsk = inet_csk(sk: mappass->sock->sk); |
791 | queue = &icsk->icsk_accept_queue; |
792 | data = READ_ONCE(queue->rskq_accept_head) != NULL; |
793 | if (data) { |
794 | mappass->reqcopy.cmd = 0; |
795 | ret = 0; |
796 | goto out; |
797 | } |
798 | spin_unlock_irqrestore(lock: &mappass->copy_lock, flags); |
799 | |
800 | /* Tell the caller we don't need to send back a notification yet */ |
801 | return -1; |
802 | |
803 | out: |
804 | spin_unlock_irqrestore(lock: &mappass->copy_lock, flags); |
805 | |
806 | rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++); |
807 | rsp->req_id = req->req_id; |
808 | rsp->cmd = req->cmd; |
809 | rsp->u.poll.id = req->u.poll.id; |
810 | rsp->ret = ret; |
811 | return 0; |
812 | } |
813 | |
814 | static int pvcalls_back_handle_cmd(struct xenbus_device *dev, |
815 | struct xen_pvcalls_request *req) |
816 | { |
817 | int ret = 0; |
818 | |
819 | switch (req->cmd) { |
820 | case PVCALLS_SOCKET: |
821 | ret = pvcalls_back_socket(dev, req); |
822 | break; |
823 | case PVCALLS_CONNECT: |
824 | ret = pvcalls_back_connect(dev, req); |
825 | break; |
826 | case PVCALLS_RELEASE: |
827 | ret = pvcalls_back_release(dev, req); |
828 | break; |
829 | case PVCALLS_BIND: |
830 | ret = pvcalls_back_bind(dev, req); |
831 | break; |
832 | case PVCALLS_LISTEN: |
833 | ret = pvcalls_back_listen(dev, req); |
834 | break; |
835 | case PVCALLS_ACCEPT: |
836 | ret = pvcalls_back_accept(dev, req); |
837 | break; |
838 | case PVCALLS_POLL: |
839 | ret = pvcalls_back_poll(dev, req); |
840 | break; |
841 | default: |
842 | { |
843 | struct pvcalls_fedata *fedata; |
844 | struct xen_pvcalls_response *rsp; |
845 | |
846 | fedata = dev_get_drvdata(dev: &dev->dev); |
847 | rsp = RING_GET_RESPONSE( |
848 | &fedata->ring, fedata->ring.rsp_prod_pvt++); |
849 | rsp->req_id = req->req_id; |
850 | rsp->cmd = req->cmd; |
851 | rsp->ret = -ENOTSUPP; |
852 | break; |
853 | } |
854 | } |
855 | return ret; |
856 | } |
857 | |
858 | static void pvcalls_back_work(struct pvcalls_fedata *fedata) |
859 | { |
860 | int notify, notify_all = 0, more = 1; |
861 | struct xen_pvcalls_request req; |
862 | struct xenbus_device *dev = fedata->dev; |
863 | |
864 | while (more) { |
865 | while (RING_HAS_UNCONSUMED_REQUESTS(&fedata->ring)) { |
866 | RING_COPY_REQUEST(&fedata->ring, |
867 | fedata->ring.req_cons++, |
868 | &req); |
869 | |
870 | if (!pvcalls_back_handle_cmd(dev, req: &req)) { |
871 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY( |
872 | &fedata->ring, notify); |
873 | notify_all += notify; |
874 | } |
875 | } |
876 | |
877 | if (notify_all) { |
878 | notify_remote_via_irq(irq: fedata->irq); |
879 | notify_all = 0; |
880 | } |
881 | |
882 | RING_FINAL_CHECK_FOR_REQUESTS(&fedata->ring, more); |
883 | } |
884 | } |
885 | |
886 | static irqreturn_t pvcalls_back_event(int irq, void *dev_id) |
887 | { |
888 | struct xenbus_device *dev = dev_id; |
889 | struct pvcalls_fedata *fedata = NULL; |
890 | unsigned int eoi_flags = XEN_EOI_FLAG_SPURIOUS; |
891 | |
892 | if (dev) { |
893 | fedata = dev_get_drvdata(dev: &dev->dev); |
894 | if (fedata) { |
895 | pvcalls_back_work(fedata); |
896 | eoi_flags = 0; |
897 | } |
898 | } |
899 | |
900 | xen_irq_lateeoi(irq, eoi_flags); |
901 | |
902 | return IRQ_HANDLED; |
903 | } |
904 | |
905 | static irqreturn_t pvcalls_back_conn_event(int irq, void *sock_map) |
906 | { |
907 | struct sock_mapping *map = sock_map; |
908 | struct pvcalls_ioworker *iow; |
909 | |
910 | if (map == NULL || map->sock == NULL || map->sock->sk == NULL || |
911 | map->sock->sk->sk_user_data != map) { |
912 | xen_irq_lateeoi(irq, eoi_flags: 0); |
913 | return IRQ_HANDLED; |
914 | } |
915 | |
916 | iow = &map->ioworker; |
917 | |
918 | atomic_inc(v: &map->write); |
919 | atomic_inc(v: &map->eoi); |
920 | atomic_inc(v: &map->io); |
921 | queue_work(wq: iow->wq, work: &iow->register_work); |
922 | |
923 | return IRQ_HANDLED; |
924 | } |
925 | |
926 | static int backend_connect(struct xenbus_device *dev) |
927 | { |
928 | int err; |
929 | evtchn_port_t evtchn; |
930 | grant_ref_t ring_ref; |
931 | struct pvcalls_fedata *fedata = NULL; |
932 | |
933 | fedata = kzalloc(size: sizeof(struct pvcalls_fedata), GFP_KERNEL); |
934 | if (!fedata) |
935 | return -ENOMEM; |
936 | |
937 | fedata->irq = -1; |
938 | err = xenbus_scanf(XBT_NIL, dir: dev->otherend, node: "port" , fmt: "%u" , |
939 | &evtchn); |
940 | if (err != 1) { |
941 | err = -EINVAL; |
942 | xenbus_dev_fatal(dev, err, "reading %s/event-channel" , |
943 | dev->otherend); |
944 | goto error; |
945 | } |
946 | |
947 | err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-ref" , "%u" , &ring_ref); |
948 | if (err != 1) { |
949 | err = -EINVAL; |
950 | xenbus_dev_fatal(dev, err, "reading %s/ring-ref" , |
951 | dev->otherend); |
952 | goto error; |
953 | } |
954 | |
955 | err = bind_interdomain_evtchn_to_irq_lateeoi(dev, evtchn); |
956 | if (err < 0) |
957 | goto error; |
958 | fedata->irq = err; |
959 | |
960 | err = request_threaded_irq(fedata->irq, NULL, pvcalls_back_event, |
961 | IRQF_ONESHOT, "pvcalls-back" , dev); |
962 | if (err < 0) |
963 | goto error; |
964 | |
965 | err = xenbus_map_ring_valloc(dev, &ring_ref, 1, |
966 | (void **)&fedata->sring); |
967 | if (err < 0) |
968 | goto error; |
969 | |
970 | BACK_RING_INIT(&fedata->ring, fedata->sring, XEN_PAGE_SIZE * 1); |
971 | fedata->dev = dev; |
972 | |
973 | INIT_LIST_HEAD(&fedata->socket_mappings); |
974 | INIT_RADIX_TREE(&fedata->socketpass_mappings, GFP_KERNEL); |
975 | sema_init(&fedata->socket_lock, 1); |
976 | dev_set_drvdata(&dev->dev, fedata); |
977 | |
978 | down(&pvcalls_back_global.frontends_lock); |
979 | list_add_tail(&fedata->list, &pvcalls_back_global.frontends); |
980 | up(&pvcalls_back_global.frontends_lock); |
981 | |
982 | return 0; |
983 | |
984 | error: |
985 | if (fedata->irq >= 0) |
986 | unbind_from_irqhandler(fedata->irq, dev); |
987 | if (fedata->sring != NULL) |
988 | xenbus_unmap_ring_vfree(dev, fedata->sring); |
989 | kfree(fedata); |
990 | return err; |
991 | } |
992 | |
993 | static int backend_disconnect(struct xenbus_device *dev) |
994 | { |
995 | struct pvcalls_fedata *fedata; |
996 | struct sock_mapping *map, *n; |
997 | struct sockpass_mapping *mappass; |
998 | struct radix_tree_iter iter; |
999 | void **slot; |
1000 | |
1001 | |
1002 | fedata = dev_get_drvdata(dev: &dev->dev); |
1003 | |
1004 | down(sem: &fedata->socket_lock); |
1005 | list_for_each_entry_safe(map, n, &fedata->socket_mappings, list) { |
1006 | list_del(entry: &map->list); |
1007 | pvcalls_back_release_active(dev, fedata, map); |
1008 | } |
1009 | |
1010 | radix_tree_for_each_slot(slot, &fedata->socketpass_mappings, &iter, 0) { |
1011 | mappass = radix_tree_deref_slot(slot); |
1012 | if (!mappass) |
1013 | continue; |
1014 | if (radix_tree_exception(arg: mappass)) { |
1015 | if (radix_tree_deref_retry(arg: mappass)) |
1016 | slot = radix_tree_iter_retry(iter: &iter); |
1017 | } else { |
1018 | radix_tree_delete(&fedata->socketpass_mappings, |
1019 | mappass->id); |
1020 | pvcalls_back_release_passive(dev, fedata, mappass); |
1021 | } |
1022 | } |
1023 | up(sem: &fedata->socket_lock); |
1024 | |
1025 | unbind_from_irqhandler(irq: fedata->irq, dev_id: dev); |
1026 | xenbus_unmap_ring_vfree(dev, vaddr: fedata->sring); |
1027 | |
1028 | list_del(entry: &fedata->list); |
1029 | kfree(objp: fedata); |
1030 | dev_set_drvdata(dev: &dev->dev, NULL); |
1031 | |
1032 | return 0; |
1033 | } |
1034 | |
1035 | static int pvcalls_back_probe(struct xenbus_device *dev, |
1036 | const struct xenbus_device_id *id) |
1037 | { |
1038 | int err, abort; |
1039 | struct xenbus_transaction xbt; |
1040 | |
1041 | again: |
1042 | abort = 1; |
1043 | |
1044 | err = xenbus_transaction_start(t: &xbt); |
1045 | if (err) { |
1046 | pr_warn("%s cannot create xenstore transaction\n" , __func__); |
1047 | return err; |
1048 | } |
1049 | |
1050 | err = xenbus_printf(t: xbt, dir: dev->nodename, node: "versions" , fmt: "%s" , |
1051 | PVCALLS_VERSIONS); |
1052 | if (err) { |
1053 | pr_warn("%s write out 'versions' failed\n" , __func__); |
1054 | goto abort; |
1055 | } |
1056 | |
1057 | err = xenbus_printf(t: xbt, dir: dev->nodename, node: "max-page-order" , fmt: "%u" , |
1058 | MAX_RING_ORDER); |
1059 | if (err) { |
1060 | pr_warn("%s write out 'max-page-order' failed\n" , __func__); |
1061 | goto abort; |
1062 | } |
1063 | |
1064 | err = xenbus_printf(t: xbt, dir: dev->nodename, node: "function-calls" , |
1065 | XENBUS_FUNCTIONS_CALLS); |
1066 | if (err) { |
1067 | pr_warn("%s write out 'function-calls' failed\n" , __func__); |
1068 | goto abort; |
1069 | } |
1070 | |
1071 | abort = 0; |
1072 | abort: |
1073 | err = xenbus_transaction_end(t: xbt, abort); |
1074 | if (err) { |
1075 | if (err == -EAGAIN && !abort) |
1076 | goto again; |
1077 | pr_warn("%s cannot complete xenstore transaction\n" , __func__); |
1078 | return err; |
1079 | } |
1080 | |
1081 | if (abort) |
1082 | return -EFAULT; |
1083 | |
1084 | xenbus_switch_state(dev, new_state: XenbusStateInitWait); |
1085 | |
1086 | return 0; |
1087 | } |
1088 | |
1089 | static void set_backend_state(struct xenbus_device *dev, |
1090 | enum xenbus_state state) |
1091 | { |
1092 | while (dev->state != state) { |
1093 | switch (dev->state) { |
1094 | case XenbusStateClosed: |
1095 | switch (state) { |
1096 | case XenbusStateInitWait: |
1097 | case XenbusStateConnected: |
1098 | xenbus_switch_state(dev, new_state: XenbusStateInitWait); |
1099 | break; |
1100 | case XenbusStateClosing: |
1101 | xenbus_switch_state(dev, new_state: XenbusStateClosing); |
1102 | break; |
1103 | default: |
1104 | WARN_ON(1); |
1105 | } |
1106 | break; |
1107 | case XenbusStateInitWait: |
1108 | case XenbusStateInitialised: |
1109 | switch (state) { |
1110 | case XenbusStateConnected: |
1111 | if (backend_connect(dev)) |
1112 | return; |
1113 | xenbus_switch_state(dev, new_state: XenbusStateConnected); |
1114 | break; |
1115 | case XenbusStateClosing: |
1116 | case XenbusStateClosed: |
1117 | xenbus_switch_state(dev, new_state: XenbusStateClosing); |
1118 | break; |
1119 | default: |
1120 | WARN_ON(1); |
1121 | } |
1122 | break; |
1123 | case XenbusStateConnected: |
1124 | switch (state) { |
1125 | case XenbusStateInitWait: |
1126 | case XenbusStateClosing: |
1127 | case XenbusStateClosed: |
1128 | down(sem: &pvcalls_back_global.frontends_lock); |
1129 | backend_disconnect(dev); |
1130 | up(sem: &pvcalls_back_global.frontends_lock); |
1131 | xenbus_switch_state(dev, new_state: XenbusStateClosing); |
1132 | break; |
1133 | default: |
1134 | WARN_ON(1); |
1135 | } |
1136 | break; |
1137 | case XenbusStateClosing: |
1138 | switch (state) { |
1139 | case XenbusStateInitWait: |
1140 | case XenbusStateConnected: |
1141 | case XenbusStateClosed: |
1142 | xenbus_switch_state(dev, new_state: XenbusStateClosed); |
1143 | break; |
1144 | default: |
1145 | WARN_ON(1); |
1146 | } |
1147 | break; |
1148 | default: |
1149 | WARN_ON(1); |
1150 | } |
1151 | } |
1152 | } |
1153 | |
1154 | static void pvcalls_back_changed(struct xenbus_device *dev, |
1155 | enum xenbus_state frontend_state) |
1156 | { |
1157 | switch (frontend_state) { |
1158 | case XenbusStateInitialising: |
1159 | set_backend_state(dev, state: XenbusStateInitWait); |
1160 | break; |
1161 | |
1162 | case XenbusStateInitialised: |
1163 | case XenbusStateConnected: |
1164 | set_backend_state(dev, state: XenbusStateConnected); |
1165 | break; |
1166 | |
1167 | case XenbusStateClosing: |
1168 | set_backend_state(dev, state: XenbusStateClosing); |
1169 | break; |
1170 | |
1171 | case XenbusStateClosed: |
1172 | set_backend_state(dev, state: XenbusStateClosed); |
1173 | if (xenbus_dev_is_online(dev)) |
1174 | break; |
1175 | device_unregister(dev: &dev->dev); |
1176 | break; |
1177 | case XenbusStateUnknown: |
1178 | set_backend_state(dev, state: XenbusStateClosed); |
1179 | device_unregister(dev: &dev->dev); |
1180 | break; |
1181 | |
1182 | default: |
1183 | xenbus_dev_fatal(dev, err: -EINVAL, fmt: "saw state %d at frontend" , |
1184 | frontend_state); |
1185 | break; |
1186 | } |
1187 | } |
1188 | |
1189 | static void pvcalls_back_remove(struct xenbus_device *dev) |
1190 | { |
1191 | } |
1192 | |
1193 | static int pvcalls_back_uevent(const struct xenbus_device *xdev, |
1194 | struct kobj_uevent_env *env) |
1195 | { |
1196 | return 0; |
1197 | } |
1198 | |
1199 | static const struct xenbus_device_id pvcalls_back_ids[] = { |
1200 | { "pvcalls" }, |
1201 | { "" } |
1202 | }; |
1203 | |
1204 | static struct xenbus_driver pvcalls_back_driver = { |
1205 | .ids = pvcalls_back_ids, |
1206 | .probe = pvcalls_back_probe, |
1207 | .remove = pvcalls_back_remove, |
1208 | .uevent = pvcalls_back_uevent, |
1209 | .otherend_changed = pvcalls_back_changed, |
1210 | }; |
1211 | |
1212 | static int __init pvcalls_back_init(void) |
1213 | { |
1214 | int ret; |
1215 | |
1216 | if (!xen_domain()) |
1217 | return -ENODEV; |
1218 | |
1219 | ret = xenbus_register_backend(&pvcalls_back_driver); |
1220 | if (ret < 0) |
1221 | return ret; |
1222 | |
1223 | sema_init(sem: &pvcalls_back_global.frontends_lock, val: 1); |
1224 | INIT_LIST_HEAD(list: &pvcalls_back_global.frontends); |
1225 | return 0; |
1226 | } |
1227 | module_init(pvcalls_back_init); |
1228 | |
1229 | static void __exit pvcalls_back_fin(void) |
1230 | { |
1231 | struct pvcalls_fedata *fedata, *nfedata; |
1232 | |
1233 | down(sem: &pvcalls_back_global.frontends_lock); |
1234 | list_for_each_entry_safe(fedata, nfedata, |
1235 | &pvcalls_back_global.frontends, list) { |
1236 | backend_disconnect(dev: fedata->dev); |
1237 | } |
1238 | up(sem: &pvcalls_back_global.frontends_lock); |
1239 | |
1240 | xenbus_unregister_driver(drv: &pvcalls_back_driver); |
1241 | } |
1242 | |
1243 | module_exit(pvcalls_back_fin); |
1244 | |
1245 | MODULE_DESCRIPTION("Xen PV Calls backend driver" ); |
1246 | MODULE_AUTHOR("Stefano Stabellini <sstabellini@kernel.org>" ); |
1247 | MODULE_LICENSE("GPL" ); |
1248 | |