1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * Char device for device raw access |
4 | * |
5 | * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net> |
6 | */ |
7 | |
8 | #include <linux/bug.h> |
9 | #include <linux/compat.h> |
10 | #include <linux/delay.h> |
11 | #include <linux/device.h> |
12 | #include <linux/dma-mapping.h> |
13 | #include <linux/err.h> |
14 | #include <linux/errno.h> |
15 | #include <linux/firewire.h> |
16 | #include <linux/firewire-cdev.h> |
17 | #include <linux/idr.h> |
18 | #include <linux/irqflags.h> |
19 | #include <linux/jiffies.h> |
20 | #include <linux/kernel.h> |
21 | #include <linux/kref.h> |
22 | #include <linux/mm.h> |
23 | #include <linux/module.h> |
24 | #include <linux/mutex.h> |
25 | #include <linux/poll.h> |
26 | #include <linux/sched.h> /* required for linux/wait.h */ |
27 | #include <linux/slab.h> |
28 | #include <linux/spinlock.h> |
29 | #include <linux/string.h> |
30 | #include <linux/time.h> |
31 | #include <linux/uaccess.h> |
32 | #include <linux/vmalloc.h> |
33 | #include <linux/wait.h> |
34 | #include <linux/workqueue.h> |
35 | |
36 | |
37 | #include "core.h" |
38 | |
39 | /* |
40 | * ABI version history is documented in linux/firewire-cdev.h. |
41 | */ |
42 | #define FW_CDEV_KERNEL_VERSION 5 |
43 | #define FW_CDEV_VERSION_EVENT_REQUEST2 4 |
44 | #define FW_CDEV_VERSION_ALLOCATE_REGION_END 4 |
45 | #define FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW 5 |
46 | #define FW_CDEV_VERSION_EVENT_ASYNC_TSTAMP 6 |
47 | |
48 | struct client { |
49 | u32 version; |
50 | struct fw_device *device; |
51 | |
52 | spinlock_t lock; |
53 | bool in_shutdown; |
54 | struct idr resource_idr; |
55 | struct list_head event_list; |
56 | wait_queue_head_t wait; |
57 | wait_queue_head_t tx_flush_wait; |
58 | u64 bus_reset_closure; |
59 | |
60 | struct fw_iso_context *iso_context; |
61 | u64 iso_closure; |
62 | struct fw_iso_buffer buffer; |
63 | unsigned long vm_start; |
64 | bool buffer_is_mapped; |
65 | |
66 | struct list_head phy_receiver_link; |
67 | u64 phy_receiver_closure; |
68 | |
69 | struct list_head link; |
70 | struct kref kref; |
71 | }; |
72 | |
73 | static inline void client_get(struct client *client) |
74 | { |
75 | kref_get(kref: &client->kref); |
76 | } |
77 | |
78 | static void client_release(struct kref *kref) |
79 | { |
80 | struct client *client = container_of(kref, struct client, kref); |
81 | |
82 | fw_device_put(device: client->device); |
83 | kfree(objp: client); |
84 | } |
85 | |
86 | static void client_put(struct client *client) |
87 | { |
88 | kref_put(kref: &client->kref, release: client_release); |
89 | } |
90 | |
91 | struct client_resource; |
92 | typedef void (*client_resource_release_fn_t)(struct client *, |
93 | struct client_resource *); |
94 | struct client_resource { |
95 | client_resource_release_fn_t release; |
96 | int handle; |
97 | }; |
98 | |
99 | struct address_handler_resource { |
100 | struct client_resource resource; |
101 | struct fw_address_handler handler; |
102 | __u64 closure; |
103 | struct client *client; |
104 | }; |
105 | |
106 | struct outbound_transaction_resource { |
107 | struct client_resource resource; |
108 | struct fw_transaction transaction; |
109 | }; |
110 | |
111 | struct inbound_transaction_resource { |
112 | struct client_resource resource; |
113 | struct fw_card *card; |
114 | struct fw_request *request; |
115 | bool is_fcp; |
116 | void *data; |
117 | size_t length; |
118 | }; |
119 | |
120 | struct descriptor_resource { |
121 | struct client_resource resource; |
122 | struct fw_descriptor descriptor; |
123 | u32 data[]; |
124 | }; |
125 | |
126 | struct iso_resource { |
127 | struct client_resource resource; |
128 | struct client *client; |
129 | /* Schedule work and access todo only with client->lock held. */ |
130 | struct delayed_work work; |
131 | enum {ISO_RES_ALLOC, ISO_RES_REALLOC, ISO_RES_DEALLOC, |
132 | ISO_RES_ALLOC_ONCE, ISO_RES_DEALLOC_ONCE,} todo; |
133 | int generation; |
134 | u64 channels; |
135 | s32 bandwidth; |
136 | struct iso_resource_event *e_alloc, *e_dealloc; |
137 | }; |
138 | |
139 | static void release_iso_resource(struct client *, struct client_resource *); |
140 | |
141 | static void schedule_iso_resource(struct iso_resource *r, unsigned long delay) |
142 | { |
143 | client_get(client: r->client); |
144 | if (!queue_delayed_work(wq: fw_workqueue, dwork: &r->work, delay)) |
145 | client_put(client: r->client); |
146 | } |
147 | |
148 | static void schedule_if_iso_resource(struct client_resource *resource) |
149 | { |
150 | if (resource->release == release_iso_resource) |
151 | schedule_iso_resource(container_of(resource, |
152 | struct iso_resource, resource), delay: 0); |
153 | } |
154 | |
155 | /* |
156 | * dequeue_event() just kfree()'s the event, so the event has to be |
157 | * the first field in a struct XYZ_event. |
158 | */ |
159 | struct event { |
160 | struct { void *data; size_t size; } v[2]; |
161 | struct list_head link; |
162 | }; |
163 | |
164 | struct bus_reset_event { |
165 | struct event event; |
166 | struct fw_cdev_event_bus_reset reset; |
167 | }; |
168 | |
169 | struct outbound_transaction_event { |
170 | struct event event; |
171 | struct client *client; |
172 | struct outbound_transaction_resource r; |
173 | union { |
174 | struct fw_cdev_event_response without_tstamp; |
175 | struct fw_cdev_event_response2 with_tstamp; |
176 | } rsp; |
177 | }; |
178 | |
179 | struct inbound_transaction_event { |
180 | struct event event; |
181 | union { |
182 | struct fw_cdev_event_request request; |
183 | struct fw_cdev_event_request2 request2; |
184 | struct fw_cdev_event_request3 with_tstamp; |
185 | } req; |
186 | }; |
187 | |
188 | struct iso_interrupt_event { |
189 | struct event event; |
190 | struct fw_cdev_event_iso_interrupt interrupt; |
191 | }; |
192 | |
193 | struct iso_interrupt_mc_event { |
194 | struct event event; |
195 | struct fw_cdev_event_iso_interrupt_mc interrupt; |
196 | }; |
197 | |
198 | struct iso_resource_event { |
199 | struct event event; |
200 | struct fw_cdev_event_iso_resource iso_resource; |
201 | }; |
202 | |
203 | struct outbound_phy_packet_event { |
204 | struct event event; |
205 | struct client *client; |
206 | struct fw_packet p; |
207 | union { |
208 | struct fw_cdev_event_phy_packet without_tstamp; |
209 | struct fw_cdev_event_phy_packet2 with_tstamp; |
210 | } phy_packet; |
211 | }; |
212 | |
213 | struct inbound_phy_packet_event { |
214 | struct event event; |
215 | union { |
216 | struct fw_cdev_event_phy_packet without_tstamp; |
217 | struct fw_cdev_event_phy_packet2 with_tstamp; |
218 | } phy_packet; |
219 | }; |
220 | |
221 | #ifdef CONFIG_COMPAT |
222 | static void __user *u64_to_uptr(u64 value) |
223 | { |
224 | if (in_compat_syscall()) |
225 | return compat_ptr(uptr: value); |
226 | else |
227 | return (void __user *)(unsigned long)value; |
228 | } |
229 | |
230 | static u64 uptr_to_u64(void __user *ptr) |
231 | { |
232 | if (in_compat_syscall()) |
233 | return ptr_to_compat(uptr: ptr); |
234 | else |
235 | return (u64)(unsigned long)ptr; |
236 | } |
237 | #else |
238 | static inline void __user *u64_to_uptr(u64 value) |
239 | { |
240 | return (void __user *)(unsigned long)value; |
241 | } |
242 | |
243 | static inline u64 uptr_to_u64(void __user *ptr) |
244 | { |
245 | return (u64)(unsigned long)ptr; |
246 | } |
247 | #endif /* CONFIG_COMPAT */ |
248 | |
249 | static int fw_device_op_open(struct inode *inode, struct file *file) |
250 | { |
251 | struct fw_device *device; |
252 | struct client *client; |
253 | |
254 | device = fw_device_get_by_devt(devt: inode->i_rdev); |
255 | if (device == NULL) |
256 | return -ENODEV; |
257 | |
258 | if (fw_device_is_shutdown(device)) { |
259 | fw_device_put(device); |
260 | return -ENODEV; |
261 | } |
262 | |
263 | client = kzalloc(size: sizeof(*client), GFP_KERNEL); |
264 | if (client == NULL) { |
265 | fw_device_put(device); |
266 | return -ENOMEM; |
267 | } |
268 | |
269 | client->device = device; |
270 | spin_lock_init(&client->lock); |
271 | idr_init(idr: &client->resource_idr); |
272 | INIT_LIST_HEAD(list: &client->event_list); |
273 | init_waitqueue_head(&client->wait); |
274 | init_waitqueue_head(&client->tx_flush_wait); |
275 | INIT_LIST_HEAD(list: &client->phy_receiver_link); |
276 | INIT_LIST_HEAD(list: &client->link); |
277 | kref_init(kref: &client->kref); |
278 | |
279 | file->private_data = client; |
280 | |
281 | return nonseekable_open(inode, filp: file); |
282 | } |
283 | |
284 | static void queue_event(struct client *client, struct event *event, |
285 | void *data0, size_t size0, void *data1, size_t size1) |
286 | { |
287 | unsigned long flags; |
288 | |
289 | event->v[0].data = data0; |
290 | event->v[0].size = size0; |
291 | event->v[1].data = data1; |
292 | event->v[1].size = size1; |
293 | |
294 | spin_lock_irqsave(&client->lock, flags); |
295 | if (client->in_shutdown) |
296 | kfree(objp: event); |
297 | else |
298 | list_add_tail(new: &event->link, head: &client->event_list); |
299 | spin_unlock_irqrestore(lock: &client->lock, flags); |
300 | |
301 | wake_up_interruptible(&client->wait); |
302 | } |
303 | |
304 | static int dequeue_event(struct client *client, |
305 | char __user *buffer, size_t count) |
306 | { |
307 | struct event *event; |
308 | size_t size, total; |
309 | int i, ret; |
310 | |
311 | ret = wait_event_interruptible(client->wait, |
312 | !list_empty(&client->event_list) || |
313 | fw_device_is_shutdown(client->device)); |
314 | if (ret < 0) |
315 | return ret; |
316 | |
317 | if (list_empty(head: &client->event_list) && |
318 | fw_device_is_shutdown(device: client->device)) |
319 | return -ENODEV; |
320 | |
321 | spin_lock_irq(lock: &client->lock); |
322 | event = list_first_entry(&client->event_list, struct event, link); |
323 | list_del(entry: &event->link); |
324 | spin_unlock_irq(lock: &client->lock); |
325 | |
326 | total = 0; |
327 | for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) { |
328 | size = min(event->v[i].size, count - total); |
329 | if (copy_to_user(to: buffer + total, from: event->v[i].data, n: size)) { |
330 | ret = -EFAULT; |
331 | goto out; |
332 | } |
333 | total += size; |
334 | } |
335 | ret = total; |
336 | |
337 | out: |
338 | kfree(objp: event); |
339 | |
340 | return ret; |
341 | } |
342 | |
343 | static ssize_t fw_device_op_read(struct file *file, char __user *buffer, |
344 | size_t count, loff_t *offset) |
345 | { |
346 | struct client *client = file->private_data; |
347 | |
348 | return dequeue_event(client, buffer, count); |
349 | } |
350 | |
351 | static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event, |
352 | struct client *client) |
353 | { |
354 | struct fw_card *card = client->device->card; |
355 | |
356 | spin_lock_irq(lock: &card->lock); |
357 | |
358 | event->closure = client->bus_reset_closure; |
359 | event->type = FW_CDEV_EVENT_BUS_RESET; |
360 | event->generation = client->device->generation; |
361 | event->node_id = client->device->node_id; |
362 | event->local_node_id = card->local_node->node_id; |
363 | event->bm_node_id = card->bm_node_id; |
364 | event->irm_node_id = card->irm_node->node_id; |
365 | event->root_node_id = card->root_node->node_id; |
366 | |
367 | spin_unlock_irq(lock: &card->lock); |
368 | } |
369 | |
370 | static void for_each_client(struct fw_device *device, |
371 | void (*callback)(struct client *client)) |
372 | { |
373 | struct client *c; |
374 | |
375 | mutex_lock(&device->client_list_mutex); |
376 | list_for_each_entry(c, &device->client_list, link) |
377 | callback(c); |
378 | mutex_unlock(lock: &device->client_list_mutex); |
379 | } |
380 | |
381 | static int schedule_reallocations(int id, void *p, void *data) |
382 | { |
383 | schedule_if_iso_resource(resource: p); |
384 | |
385 | return 0; |
386 | } |
387 | |
388 | static void queue_bus_reset_event(struct client *client) |
389 | { |
390 | struct bus_reset_event *e; |
391 | |
392 | e = kzalloc(size: sizeof(*e), GFP_KERNEL); |
393 | if (e == NULL) |
394 | return; |
395 | |
396 | fill_bus_reset_event(event: &e->reset, client); |
397 | |
398 | queue_event(client, event: &e->event, |
399 | data0: &e->reset, size0: sizeof(e->reset), NULL, size1: 0); |
400 | |
401 | spin_lock_irq(lock: &client->lock); |
402 | idr_for_each(&client->resource_idr, fn: schedule_reallocations, data: client); |
403 | spin_unlock_irq(lock: &client->lock); |
404 | } |
405 | |
406 | void fw_device_cdev_update(struct fw_device *device) |
407 | { |
408 | for_each_client(device, callback: queue_bus_reset_event); |
409 | } |
410 | |
411 | static void wake_up_client(struct client *client) |
412 | { |
413 | wake_up_interruptible(&client->wait); |
414 | } |
415 | |
416 | void fw_device_cdev_remove(struct fw_device *device) |
417 | { |
418 | for_each_client(device, callback: wake_up_client); |
419 | } |
420 | |
421 | union ioctl_arg { |
422 | struct fw_cdev_get_info get_info; |
423 | struct fw_cdev_send_request send_request; |
424 | struct fw_cdev_allocate allocate; |
425 | struct fw_cdev_deallocate deallocate; |
426 | struct fw_cdev_send_response send_response; |
427 | struct fw_cdev_initiate_bus_reset initiate_bus_reset; |
428 | struct fw_cdev_add_descriptor add_descriptor; |
429 | struct fw_cdev_remove_descriptor remove_descriptor; |
430 | struct fw_cdev_create_iso_context create_iso_context; |
431 | struct fw_cdev_queue_iso queue_iso; |
432 | struct fw_cdev_start_iso start_iso; |
433 | struct fw_cdev_stop_iso stop_iso; |
434 | struct fw_cdev_get_cycle_timer get_cycle_timer; |
435 | struct fw_cdev_allocate_iso_resource allocate_iso_resource; |
436 | struct fw_cdev_send_stream_packet send_stream_packet; |
437 | struct fw_cdev_get_cycle_timer2 get_cycle_timer2; |
438 | struct fw_cdev_send_phy_packet send_phy_packet; |
439 | struct fw_cdev_receive_phy_packets receive_phy_packets; |
440 | struct fw_cdev_set_iso_channels set_iso_channels; |
441 | struct fw_cdev_flush_iso flush_iso; |
442 | }; |
443 | |
444 | static int ioctl_get_info(struct client *client, union ioctl_arg *arg) |
445 | { |
446 | struct fw_cdev_get_info *a = &arg->get_info; |
447 | struct fw_cdev_event_bus_reset bus_reset; |
448 | unsigned long ret = 0; |
449 | |
450 | client->version = a->version; |
451 | a->version = FW_CDEV_KERNEL_VERSION; |
452 | a->card = client->device->card->index; |
453 | |
454 | down_read(sem: &fw_device_rwsem); |
455 | |
456 | if (a->rom != 0) { |
457 | size_t want = a->rom_length; |
458 | size_t have = client->device->config_rom_length * 4; |
459 | |
460 | ret = copy_to_user(to: u64_to_uptr(value: a->rom), |
461 | from: client->device->config_rom, min(want, have)); |
462 | } |
463 | a->rom_length = client->device->config_rom_length * 4; |
464 | |
465 | up_read(sem: &fw_device_rwsem); |
466 | |
467 | if (ret != 0) |
468 | return -EFAULT; |
469 | |
470 | mutex_lock(&client->device->client_list_mutex); |
471 | |
472 | client->bus_reset_closure = a->bus_reset_closure; |
473 | if (a->bus_reset != 0) { |
474 | fill_bus_reset_event(event: &bus_reset, client); |
475 | /* unaligned size of bus_reset is 36 bytes */ |
476 | ret = copy_to_user(to: u64_to_uptr(value: a->bus_reset), from: &bus_reset, n: 36); |
477 | } |
478 | if (ret == 0 && list_empty(head: &client->link)) |
479 | list_add_tail(new: &client->link, head: &client->device->client_list); |
480 | |
481 | mutex_unlock(lock: &client->device->client_list_mutex); |
482 | |
483 | return ret ? -EFAULT : 0; |
484 | } |
485 | |
486 | static int add_client_resource(struct client *client, |
487 | struct client_resource *resource, gfp_t gfp_mask) |
488 | { |
489 | bool preload = gfpflags_allow_blocking(gfp_flags: gfp_mask); |
490 | unsigned long flags; |
491 | int ret; |
492 | |
493 | if (preload) |
494 | idr_preload(gfp_mask); |
495 | spin_lock_irqsave(&client->lock, flags); |
496 | |
497 | if (client->in_shutdown) |
498 | ret = -ECANCELED; |
499 | else |
500 | ret = idr_alloc(&client->resource_idr, ptr: resource, start: 0, end: 0, |
501 | GFP_NOWAIT); |
502 | if (ret >= 0) { |
503 | resource->handle = ret; |
504 | client_get(client); |
505 | schedule_if_iso_resource(resource); |
506 | } |
507 | |
508 | spin_unlock_irqrestore(lock: &client->lock, flags); |
509 | if (preload) |
510 | idr_preload_end(); |
511 | |
512 | return ret < 0 ? ret : 0; |
513 | } |
514 | |
515 | static int release_client_resource(struct client *client, u32 handle, |
516 | client_resource_release_fn_t release, |
517 | struct client_resource **return_resource) |
518 | { |
519 | struct client_resource *resource; |
520 | |
521 | spin_lock_irq(lock: &client->lock); |
522 | if (client->in_shutdown) |
523 | resource = NULL; |
524 | else |
525 | resource = idr_find(&client->resource_idr, id: handle); |
526 | if (resource && resource->release == release) |
527 | idr_remove(&client->resource_idr, id: handle); |
528 | spin_unlock_irq(lock: &client->lock); |
529 | |
530 | if (!(resource && resource->release == release)) |
531 | return -EINVAL; |
532 | |
533 | if (return_resource) |
534 | *return_resource = resource; |
535 | else |
536 | resource->release(client, resource); |
537 | |
538 | client_put(client); |
539 | |
540 | return 0; |
541 | } |
542 | |
543 | static void release_transaction(struct client *client, |
544 | struct client_resource *resource) |
545 | { |
546 | } |
547 | |
548 | static void complete_transaction(struct fw_card *card, int rcode, u32 request_tstamp, |
549 | u32 response_tstamp, void *payload, size_t length, void *data) |
550 | { |
551 | struct outbound_transaction_event *e = data; |
552 | struct client *client = e->client; |
553 | unsigned long flags; |
554 | |
555 | spin_lock_irqsave(&client->lock, flags); |
556 | idr_remove(&client->resource_idr, id: e->r.resource.handle); |
557 | if (client->in_shutdown) |
558 | wake_up(&client->tx_flush_wait); |
559 | spin_unlock_irqrestore(lock: &client->lock, flags); |
560 | |
561 | switch (e->rsp.without_tstamp.type) { |
562 | case FW_CDEV_EVENT_RESPONSE: |
563 | { |
564 | struct fw_cdev_event_response *rsp = &e->rsp.without_tstamp; |
565 | |
566 | if (length < rsp->length) |
567 | rsp->length = length; |
568 | if (rcode == RCODE_COMPLETE) |
569 | memcpy(rsp->data, payload, rsp->length); |
570 | |
571 | rsp->rcode = rcode; |
572 | |
573 | // In the case that sizeof(*rsp) doesn't align with the position of the |
574 | // data, and the read is short, preserve an extra copy of the data |
575 | // to stay compatible with a pre-2.6.27 bug. Since the bug is harmless |
576 | // for short reads and some apps depended on it, this is both safe |
577 | // and prudent for compatibility. |
578 | if (rsp->length <= sizeof(*rsp) - offsetof(typeof(*rsp), data)) |
579 | queue_event(client, event: &e->event, data0: rsp, size0: sizeof(*rsp), data1: rsp->data, size1: rsp->length); |
580 | else |
581 | queue_event(client, event: &e->event, data0: rsp, size0: sizeof(*rsp) + rsp->length, NULL, size1: 0); |
582 | |
583 | break; |
584 | } |
585 | case FW_CDEV_EVENT_RESPONSE2: |
586 | { |
587 | struct fw_cdev_event_response2 *rsp = &e->rsp.with_tstamp; |
588 | |
589 | if (length < rsp->length) |
590 | rsp->length = length; |
591 | if (rcode == RCODE_COMPLETE) |
592 | memcpy(rsp->data, payload, rsp->length); |
593 | |
594 | rsp->rcode = rcode; |
595 | rsp->request_tstamp = request_tstamp; |
596 | rsp->response_tstamp = response_tstamp; |
597 | |
598 | queue_event(client, event: &e->event, data0: rsp, size0: sizeof(*rsp) + rsp->length, NULL, size1: 0); |
599 | |
600 | break; |
601 | default: |
602 | WARN_ON(1); |
603 | break; |
604 | } |
605 | } |
606 | |
607 | /* Drop the idr's reference */ |
608 | client_put(client); |
609 | } |
610 | |
611 | static int init_request(struct client *client, |
612 | struct fw_cdev_send_request *request, |
613 | int destination_id, int speed) |
614 | { |
615 | struct outbound_transaction_event *e; |
616 | void *payload; |
617 | int ret; |
618 | |
619 | if (request->tcode != TCODE_STREAM_DATA && |
620 | (request->length > 4096 || request->length > 512 << speed)) |
621 | return -EIO; |
622 | |
623 | if (request->tcode == TCODE_WRITE_QUADLET_REQUEST && |
624 | request->length < 4) |
625 | return -EINVAL; |
626 | |
627 | e = kmalloc(size: sizeof(*e) + request->length, GFP_KERNEL); |
628 | if (e == NULL) |
629 | return -ENOMEM; |
630 | e->client = client; |
631 | |
632 | if (client->version < FW_CDEV_VERSION_EVENT_ASYNC_TSTAMP) { |
633 | struct fw_cdev_event_response *rsp = &e->rsp.without_tstamp; |
634 | |
635 | rsp->type = FW_CDEV_EVENT_RESPONSE; |
636 | rsp->length = request->length; |
637 | rsp->closure = request->closure; |
638 | payload = rsp->data; |
639 | } else { |
640 | struct fw_cdev_event_response2 *rsp = &e->rsp.with_tstamp; |
641 | |
642 | rsp->type = FW_CDEV_EVENT_RESPONSE2; |
643 | rsp->length = request->length; |
644 | rsp->closure = request->closure; |
645 | payload = rsp->data; |
646 | } |
647 | |
648 | if (request->data && copy_from_user(to: payload, from: u64_to_uptr(value: request->data), n: request->length)) { |
649 | ret = -EFAULT; |
650 | goto failed; |
651 | } |
652 | |
653 | e->r.resource.release = release_transaction; |
654 | ret = add_client_resource(client, resource: &e->r.resource, GFP_KERNEL); |
655 | if (ret < 0) |
656 | goto failed; |
657 | |
658 | fw_send_request_with_tstamp(card: client->device->card, t: &e->r.transaction, tcode: request->tcode, |
659 | destination_id, generation: request->generation, speed, offset: request->offset, |
660 | payload, length: request->length, callback: complete_transaction, callback_data: e); |
661 | return 0; |
662 | |
663 | failed: |
664 | kfree(objp: e); |
665 | |
666 | return ret; |
667 | } |
668 | |
669 | static int ioctl_send_request(struct client *client, union ioctl_arg *arg) |
670 | { |
671 | switch (arg->send_request.tcode) { |
672 | case TCODE_WRITE_QUADLET_REQUEST: |
673 | case TCODE_WRITE_BLOCK_REQUEST: |
674 | case TCODE_READ_QUADLET_REQUEST: |
675 | case TCODE_READ_BLOCK_REQUEST: |
676 | case TCODE_LOCK_MASK_SWAP: |
677 | case TCODE_LOCK_COMPARE_SWAP: |
678 | case TCODE_LOCK_FETCH_ADD: |
679 | case TCODE_LOCK_LITTLE_ADD: |
680 | case TCODE_LOCK_BOUNDED_ADD: |
681 | case TCODE_LOCK_WRAP_ADD: |
682 | case TCODE_LOCK_VENDOR_DEPENDENT: |
683 | break; |
684 | default: |
685 | return -EINVAL; |
686 | } |
687 | |
688 | return init_request(client, request: &arg->send_request, destination_id: client->device->node_id, |
689 | speed: client->device->max_speed); |
690 | } |
691 | |
692 | static void release_request(struct client *client, |
693 | struct client_resource *resource) |
694 | { |
695 | struct inbound_transaction_resource *r = container_of(resource, |
696 | struct inbound_transaction_resource, resource); |
697 | |
698 | if (r->is_fcp) |
699 | fw_request_put(request: r->request); |
700 | else |
701 | fw_send_response(card: r->card, request: r->request, RCODE_CONFLICT_ERROR); |
702 | |
703 | fw_card_put(card: r->card); |
704 | kfree(objp: r); |
705 | } |
706 | |
707 | static void handle_request(struct fw_card *card, struct fw_request *request, |
708 | int tcode, int destination, int source, |
709 | int generation, unsigned long long offset, |
710 | void *payload, size_t length, void *callback_data) |
711 | { |
712 | struct address_handler_resource *handler = callback_data; |
713 | bool is_fcp = is_in_fcp_region(offset, length); |
714 | struct inbound_transaction_resource *r; |
715 | struct inbound_transaction_event *e; |
716 | size_t event_size0; |
717 | int ret; |
718 | |
719 | /* card may be different from handler->client->device->card */ |
720 | fw_card_get(card); |
721 | |
722 | // Extend the lifetime of data for request so that its payload is safely accessible in |
723 | // the process context for the client. |
724 | if (is_fcp) |
725 | fw_request_get(request); |
726 | |
727 | r = kmalloc(size: sizeof(*r), GFP_ATOMIC); |
728 | e = kmalloc(size: sizeof(*e), GFP_ATOMIC); |
729 | if (r == NULL || e == NULL) |
730 | goto failed; |
731 | |
732 | r->card = card; |
733 | r->request = request; |
734 | r->is_fcp = is_fcp; |
735 | r->data = payload; |
736 | r->length = length; |
737 | |
738 | r->resource.release = release_request; |
739 | ret = add_client_resource(client: handler->client, resource: &r->resource, GFP_ATOMIC); |
740 | if (ret < 0) |
741 | goto failed; |
742 | |
743 | if (handler->client->version < FW_CDEV_VERSION_EVENT_REQUEST2) { |
744 | struct fw_cdev_event_request *req = &e->req.request; |
745 | |
746 | if (tcode & 0x10) |
747 | tcode = TCODE_LOCK_REQUEST; |
748 | |
749 | req->type = FW_CDEV_EVENT_REQUEST; |
750 | req->tcode = tcode; |
751 | req->offset = offset; |
752 | req->length = length; |
753 | req->handle = r->resource.handle; |
754 | req->closure = handler->closure; |
755 | event_size0 = sizeof(*req); |
756 | } else if (handler->client->version < FW_CDEV_VERSION_EVENT_ASYNC_TSTAMP) { |
757 | struct fw_cdev_event_request2 *req = &e->req.request2; |
758 | |
759 | req->type = FW_CDEV_EVENT_REQUEST2; |
760 | req->tcode = tcode; |
761 | req->offset = offset; |
762 | req->source_node_id = source; |
763 | req->destination_node_id = destination; |
764 | req->card = card->index; |
765 | req->generation = generation; |
766 | req->length = length; |
767 | req->handle = r->resource.handle; |
768 | req->closure = handler->closure; |
769 | event_size0 = sizeof(*req); |
770 | } else { |
771 | struct fw_cdev_event_request3 *req = &e->req.with_tstamp; |
772 | |
773 | req->type = FW_CDEV_EVENT_REQUEST3; |
774 | req->tcode = tcode; |
775 | req->offset = offset; |
776 | req->source_node_id = source; |
777 | req->destination_node_id = destination; |
778 | req->card = card->index; |
779 | req->generation = generation; |
780 | req->length = length; |
781 | req->handle = r->resource.handle; |
782 | req->closure = handler->closure; |
783 | req->tstamp = fw_request_get_timestamp(request); |
784 | event_size0 = sizeof(*req); |
785 | } |
786 | |
787 | queue_event(client: handler->client, event: &e->event, |
788 | data0: &e->req, size0: event_size0, data1: r->data, size1: length); |
789 | return; |
790 | |
791 | failed: |
792 | kfree(objp: r); |
793 | kfree(objp: e); |
794 | |
795 | if (!is_fcp) |
796 | fw_send_response(card, request, RCODE_CONFLICT_ERROR); |
797 | else |
798 | fw_request_put(request); |
799 | |
800 | fw_card_put(card); |
801 | } |
802 | |
803 | static void release_address_handler(struct client *client, |
804 | struct client_resource *resource) |
805 | { |
806 | struct address_handler_resource *r = |
807 | container_of(resource, struct address_handler_resource, resource); |
808 | |
809 | fw_core_remove_address_handler(handler: &r->handler); |
810 | kfree(objp: r); |
811 | } |
812 | |
813 | static int ioctl_allocate(struct client *client, union ioctl_arg *arg) |
814 | { |
815 | struct fw_cdev_allocate *a = &arg->allocate; |
816 | struct address_handler_resource *r; |
817 | struct fw_address_region region; |
818 | int ret; |
819 | |
820 | r = kmalloc(size: sizeof(*r), GFP_KERNEL); |
821 | if (r == NULL) |
822 | return -ENOMEM; |
823 | |
824 | region.start = a->offset; |
825 | if (client->version < FW_CDEV_VERSION_ALLOCATE_REGION_END) |
826 | region.end = a->offset + a->length; |
827 | else |
828 | region.end = a->region_end; |
829 | |
830 | r->handler.length = a->length; |
831 | r->handler.address_callback = handle_request; |
832 | r->handler.callback_data = r; |
833 | r->closure = a->closure; |
834 | r->client = client; |
835 | |
836 | ret = fw_core_add_address_handler(handler: &r->handler, region: ®ion); |
837 | if (ret < 0) { |
838 | kfree(objp: r); |
839 | return ret; |
840 | } |
841 | a->offset = r->handler.offset; |
842 | |
843 | r->resource.release = release_address_handler; |
844 | ret = add_client_resource(client, resource: &r->resource, GFP_KERNEL); |
845 | if (ret < 0) { |
846 | release_address_handler(client, resource: &r->resource); |
847 | return ret; |
848 | } |
849 | a->handle = r->resource.handle; |
850 | |
851 | return 0; |
852 | } |
853 | |
854 | static int ioctl_deallocate(struct client *client, union ioctl_arg *arg) |
855 | { |
856 | return release_client_resource(client, handle: arg->deallocate.handle, |
857 | release: release_address_handler, NULL); |
858 | } |
859 | |
860 | static int ioctl_send_response(struct client *client, union ioctl_arg *arg) |
861 | { |
862 | struct fw_cdev_send_response *a = &arg->send_response; |
863 | struct client_resource *resource; |
864 | struct inbound_transaction_resource *r; |
865 | int ret = 0; |
866 | |
867 | if (release_client_resource(client, handle: a->handle, |
868 | release: release_request, return_resource: &resource) < 0) |
869 | return -EINVAL; |
870 | |
871 | r = container_of(resource, struct inbound_transaction_resource, |
872 | resource); |
873 | if (r->is_fcp) { |
874 | fw_request_put(request: r->request); |
875 | goto out; |
876 | } |
877 | |
878 | if (a->length != fw_get_response_length(request: r->request)) { |
879 | ret = -EINVAL; |
880 | fw_request_put(request: r->request); |
881 | goto out; |
882 | } |
883 | if (copy_from_user(to: r->data, from: u64_to_uptr(value: a->data), n: a->length)) { |
884 | ret = -EFAULT; |
885 | fw_request_put(request: r->request); |
886 | goto out; |
887 | } |
888 | fw_send_response(card: r->card, request: r->request, rcode: a->rcode); |
889 | out: |
890 | fw_card_put(card: r->card); |
891 | kfree(objp: r); |
892 | |
893 | return ret; |
894 | } |
895 | |
896 | static int ioctl_initiate_bus_reset(struct client *client, union ioctl_arg *arg) |
897 | { |
898 | fw_schedule_bus_reset(card: client->device->card, delayed: true, |
899 | short_reset: arg->initiate_bus_reset.type == FW_CDEV_SHORT_RESET); |
900 | return 0; |
901 | } |
902 | |
903 | static void release_descriptor(struct client *client, |
904 | struct client_resource *resource) |
905 | { |
906 | struct descriptor_resource *r = |
907 | container_of(resource, struct descriptor_resource, resource); |
908 | |
909 | fw_core_remove_descriptor(desc: &r->descriptor); |
910 | kfree(objp: r); |
911 | } |
912 | |
913 | static int ioctl_add_descriptor(struct client *client, union ioctl_arg *arg) |
914 | { |
915 | struct fw_cdev_add_descriptor *a = &arg->add_descriptor; |
916 | struct descriptor_resource *r; |
917 | int ret; |
918 | |
919 | /* Access policy: Allow this ioctl only on local nodes' device files. */ |
920 | if (!client->device->is_local) |
921 | return -ENOSYS; |
922 | |
923 | if (a->length > 256) |
924 | return -EINVAL; |
925 | |
926 | r = kmalloc(size: sizeof(*r) + a->length * 4, GFP_KERNEL); |
927 | if (r == NULL) |
928 | return -ENOMEM; |
929 | |
930 | if (copy_from_user(to: r->data, from: u64_to_uptr(value: a->data), n: a->length * 4)) { |
931 | ret = -EFAULT; |
932 | goto failed; |
933 | } |
934 | |
935 | r->descriptor.length = a->length; |
936 | r->descriptor.immediate = a->immediate; |
937 | r->descriptor.key = a->key; |
938 | r->descriptor.data = r->data; |
939 | |
940 | ret = fw_core_add_descriptor(desc: &r->descriptor); |
941 | if (ret < 0) |
942 | goto failed; |
943 | |
944 | r->resource.release = release_descriptor; |
945 | ret = add_client_resource(client, resource: &r->resource, GFP_KERNEL); |
946 | if (ret < 0) { |
947 | fw_core_remove_descriptor(desc: &r->descriptor); |
948 | goto failed; |
949 | } |
950 | a->handle = r->resource.handle; |
951 | |
952 | return 0; |
953 | failed: |
954 | kfree(objp: r); |
955 | |
956 | return ret; |
957 | } |
958 | |
959 | static int ioctl_remove_descriptor(struct client *client, union ioctl_arg *arg) |
960 | { |
961 | return release_client_resource(client, handle: arg->remove_descriptor.handle, |
962 | release: release_descriptor, NULL); |
963 | } |
964 | |
965 | static void iso_callback(struct fw_iso_context *context, u32 cycle, |
966 | size_t , void *, void *data) |
967 | { |
968 | struct client *client = data; |
969 | struct iso_interrupt_event *e; |
970 | |
971 | e = kmalloc(size: sizeof(*e) + header_length, GFP_ATOMIC); |
972 | if (e == NULL) |
973 | return; |
974 | |
975 | e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT; |
976 | e->interrupt.closure = client->iso_closure; |
977 | e->interrupt.cycle = cycle; |
978 | e->interrupt.header_length = header_length; |
979 | memcpy(e->interrupt.header, header, header_length); |
980 | queue_event(client, event: &e->event, data0: &e->interrupt, |
981 | size0: sizeof(e->interrupt) + header_length, NULL, size1: 0); |
982 | } |
983 | |
984 | static void iso_mc_callback(struct fw_iso_context *context, |
985 | dma_addr_t completed, void *data) |
986 | { |
987 | struct client *client = data; |
988 | struct iso_interrupt_mc_event *e; |
989 | |
990 | e = kmalloc(size: sizeof(*e), GFP_ATOMIC); |
991 | if (e == NULL) |
992 | return; |
993 | |
994 | e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL; |
995 | e->interrupt.closure = client->iso_closure; |
996 | e->interrupt.completed = fw_iso_buffer_lookup(buffer: &client->buffer, |
997 | completed); |
998 | queue_event(client, event: &e->event, data0: &e->interrupt, |
999 | size0: sizeof(e->interrupt), NULL, size1: 0); |
1000 | } |
1001 | |
1002 | static enum dma_data_direction iso_dma_direction(struct fw_iso_context *context) |
1003 | { |
1004 | if (context->type == FW_ISO_CONTEXT_TRANSMIT) |
1005 | return DMA_TO_DEVICE; |
1006 | else |
1007 | return DMA_FROM_DEVICE; |
1008 | } |
1009 | |
1010 | static struct fw_iso_context *fw_iso_mc_context_create(struct fw_card *card, |
1011 | fw_iso_mc_callback_t callback, |
1012 | void *callback_data) |
1013 | { |
1014 | struct fw_iso_context *ctx; |
1015 | |
1016 | ctx = fw_iso_context_create(card, FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL, |
1017 | channel: 0, speed: 0, header_size: 0, NULL, callback_data); |
1018 | if (!IS_ERR(ptr: ctx)) |
1019 | ctx->callback.mc = callback; |
1020 | |
1021 | return ctx; |
1022 | } |
1023 | |
1024 | static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg) |
1025 | { |
1026 | struct fw_cdev_create_iso_context *a = &arg->create_iso_context; |
1027 | struct fw_iso_context *context; |
1028 | union fw_iso_callback cb; |
1029 | int ret; |
1030 | |
1031 | BUILD_BUG_ON(FW_CDEV_ISO_CONTEXT_TRANSMIT != FW_ISO_CONTEXT_TRANSMIT || |
1032 | FW_CDEV_ISO_CONTEXT_RECEIVE != FW_ISO_CONTEXT_RECEIVE || |
1033 | FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL != |
1034 | FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL); |
1035 | |
1036 | switch (a->type) { |
1037 | case FW_ISO_CONTEXT_TRANSMIT: |
1038 | if (a->speed > SCODE_3200 || a->channel > 63) |
1039 | return -EINVAL; |
1040 | |
1041 | cb.sc = iso_callback; |
1042 | break; |
1043 | |
1044 | case FW_ISO_CONTEXT_RECEIVE: |
1045 | if (a->header_size < 4 || (a->header_size & 3) || |
1046 | a->channel > 63) |
1047 | return -EINVAL; |
1048 | |
1049 | cb.sc = iso_callback; |
1050 | break; |
1051 | |
1052 | case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: |
1053 | cb.mc = iso_mc_callback; |
1054 | break; |
1055 | |
1056 | default: |
1057 | return -EINVAL; |
1058 | } |
1059 | |
1060 | if (a->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL) |
1061 | context = fw_iso_mc_context_create(card: client->device->card, callback: cb.mc, |
1062 | callback_data: client); |
1063 | else |
1064 | context = fw_iso_context_create(card: client->device->card, type: a->type, |
1065 | channel: a->channel, speed: a->speed, |
1066 | header_size: a->header_size, callback: cb.sc, callback_data: client); |
1067 | if (IS_ERR(ptr: context)) |
1068 | return PTR_ERR(ptr: context); |
1069 | if (client->version < FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW) |
1070 | context->drop_overflow_headers = true; |
1071 | |
1072 | /* We only support one context at this time. */ |
1073 | spin_lock_irq(lock: &client->lock); |
1074 | if (client->iso_context != NULL) { |
1075 | spin_unlock_irq(lock: &client->lock); |
1076 | fw_iso_context_destroy(ctx: context); |
1077 | |
1078 | return -EBUSY; |
1079 | } |
1080 | if (!client->buffer_is_mapped) { |
1081 | ret = fw_iso_buffer_map_dma(buffer: &client->buffer, |
1082 | card: client->device->card, |
1083 | direction: iso_dma_direction(context)); |
1084 | if (ret < 0) { |
1085 | spin_unlock_irq(lock: &client->lock); |
1086 | fw_iso_context_destroy(ctx: context); |
1087 | |
1088 | return ret; |
1089 | } |
1090 | client->buffer_is_mapped = true; |
1091 | } |
1092 | client->iso_closure = a->closure; |
1093 | client->iso_context = context; |
1094 | spin_unlock_irq(lock: &client->lock); |
1095 | |
1096 | a->handle = 0; |
1097 | |
1098 | return 0; |
1099 | } |
1100 | |
1101 | static int ioctl_set_iso_channels(struct client *client, union ioctl_arg *arg) |
1102 | { |
1103 | struct fw_cdev_set_iso_channels *a = &arg->set_iso_channels; |
1104 | struct fw_iso_context *ctx = client->iso_context; |
1105 | |
1106 | if (ctx == NULL || a->handle != 0) |
1107 | return -EINVAL; |
1108 | |
1109 | return fw_iso_context_set_channels(ctx, channels: &a->channels); |
1110 | } |
1111 | |
1112 | /* Macros for decoding the iso packet control header. */ |
1113 | #define GET_PAYLOAD_LENGTH(v) ((v) & 0xffff) |
1114 | #define GET_INTERRUPT(v) (((v) >> 16) & 0x01) |
1115 | #define GET_SKIP(v) (((v) >> 17) & 0x01) |
1116 | #define GET_TAG(v) (((v) >> 18) & 0x03) |
1117 | #define GET_SY(v) (((v) >> 20) & 0x0f) |
1118 | #define (v) (((v) >> 24) & 0xff) |
1119 | |
1120 | static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg) |
1121 | { |
1122 | struct fw_cdev_queue_iso *a = &arg->queue_iso; |
1123 | struct fw_cdev_iso_packet __user *p, *end, *next; |
1124 | struct fw_iso_context *ctx = client->iso_context; |
1125 | unsigned long payload, buffer_end, = 0; |
1126 | u32 control; |
1127 | int count; |
1128 | struct { |
1129 | struct fw_iso_packet packet; |
1130 | u8 [256]; |
1131 | } u; |
1132 | |
1133 | if (ctx == NULL || a->handle != 0) |
1134 | return -EINVAL; |
1135 | |
1136 | /* |
1137 | * If the user passes a non-NULL data pointer, has mmap()'ed |
1138 | * the iso buffer, and the pointer points inside the buffer, |
1139 | * we setup the payload pointers accordingly. Otherwise we |
1140 | * set them both to 0, which will still let packets with |
1141 | * payload_length == 0 through. In other words, if no packets |
1142 | * use the indirect payload, the iso buffer need not be mapped |
1143 | * and the a->data pointer is ignored. |
1144 | */ |
1145 | payload = (unsigned long)a->data - client->vm_start; |
1146 | buffer_end = client->buffer.page_count << PAGE_SHIFT; |
1147 | if (a->data == 0 || client->buffer.pages == NULL || |
1148 | payload >= buffer_end) { |
1149 | payload = 0; |
1150 | buffer_end = 0; |
1151 | } |
1152 | |
1153 | if (ctx->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL && payload & 3) |
1154 | return -EINVAL; |
1155 | |
1156 | p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(value: a->packets); |
1157 | |
1158 | end = (void __user *)p + a->size; |
1159 | count = 0; |
1160 | while (p < end) { |
1161 | if (get_user(control, &p->control)) |
1162 | return -EFAULT; |
1163 | u.packet.payload_length = GET_PAYLOAD_LENGTH(control); |
1164 | u.packet.interrupt = GET_INTERRUPT(control); |
1165 | u.packet.skip = GET_SKIP(control); |
1166 | u.packet.tag = GET_TAG(control); |
1167 | u.packet.sy = GET_SY(control); |
1168 | u.packet.header_length = GET_HEADER_LENGTH(control); |
1169 | |
1170 | switch (ctx->type) { |
1171 | case FW_ISO_CONTEXT_TRANSMIT: |
1172 | if (u.packet.header_length & 3) |
1173 | return -EINVAL; |
1174 | transmit_header_bytes = u.packet.header_length; |
1175 | break; |
1176 | |
1177 | case FW_ISO_CONTEXT_RECEIVE: |
1178 | if (u.packet.header_length == 0 || |
1179 | u.packet.header_length % ctx->header_size != 0) |
1180 | return -EINVAL; |
1181 | break; |
1182 | |
1183 | case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: |
1184 | if (u.packet.payload_length == 0 || |
1185 | u.packet.payload_length & 3) |
1186 | return -EINVAL; |
1187 | break; |
1188 | } |
1189 | |
1190 | next = (struct fw_cdev_iso_packet __user *) |
1191 | &p->header[transmit_header_bytes / 4]; |
1192 | if (next > end) |
1193 | return -EINVAL; |
1194 | if (copy_from_user |
1195 | (to: u.packet.header, from: p->header, n: transmit_header_bytes)) |
1196 | return -EFAULT; |
1197 | if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT && |
1198 | u.packet.header_length + u.packet.payload_length > 0) |
1199 | return -EINVAL; |
1200 | if (payload + u.packet.payload_length > buffer_end) |
1201 | return -EINVAL; |
1202 | |
1203 | if (fw_iso_context_queue(ctx, packet: &u.packet, |
1204 | buffer: &client->buffer, payload)) |
1205 | break; |
1206 | |
1207 | p = next; |
1208 | payload += u.packet.payload_length; |
1209 | count++; |
1210 | } |
1211 | fw_iso_context_queue_flush(ctx); |
1212 | |
1213 | a->size -= uptr_to_u64(ptr: p) - a->packets; |
1214 | a->packets = uptr_to_u64(ptr: p); |
1215 | a->data = client->vm_start + payload; |
1216 | |
1217 | return count; |
1218 | } |
1219 | |
1220 | static int ioctl_start_iso(struct client *client, union ioctl_arg *arg) |
1221 | { |
1222 | struct fw_cdev_start_iso *a = &arg->start_iso; |
1223 | |
1224 | BUILD_BUG_ON( |
1225 | FW_CDEV_ISO_CONTEXT_MATCH_TAG0 != FW_ISO_CONTEXT_MATCH_TAG0 || |
1226 | FW_CDEV_ISO_CONTEXT_MATCH_TAG1 != FW_ISO_CONTEXT_MATCH_TAG1 || |
1227 | FW_CDEV_ISO_CONTEXT_MATCH_TAG2 != FW_ISO_CONTEXT_MATCH_TAG2 || |
1228 | FW_CDEV_ISO_CONTEXT_MATCH_TAG3 != FW_ISO_CONTEXT_MATCH_TAG3 || |
1229 | FW_CDEV_ISO_CONTEXT_MATCH_ALL_TAGS != FW_ISO_CONTEXT_MATCH_ALL_TAGS); |
1230 | |
1231 | if (client->iso_context == NULL || a->handle != 0) |
1232 | return -EINVAL; |
1233 | |
1234 | if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE && |
1235 | (a->tags == 0 || a->tags > 15 || a->sync > 15)) |
1236 | return -EINVAL; |
1237 | |
1238 | return fw_iso_context_start(ctx: client->iso_context, |
1239 | cycle: a->cycle, sync: a->sync, tags: a->tags); |
1240 | } |
1241 | |
1242 | static int ioctl_stop_iso(struct client *client, union ioctl_arg *arg) |
1243 | { |
1244 | struct fw_cdev_stop_iso *a = &arg->stop_iso; |
1245 | |
1246 | if (client->iso_context == NULL || a->handle != 0) |
1247 | return -EINVAL; |
1248 | |
1249 | return fw_iso_context_stop(ctx: client->iso_context); |
1250 | } |
1251 | |
1252 | static int ioctl_flush_iso(struct client *client, union ioctl_arg *arg) |
1253 | { |
1254 | struct fw_cdev_flush_iso *a = &arg->flush_iso; |
1255 | |
1256 | if (client->iso_context == NULL || a->handle != 0) |
1257 | return -EINVAL; |
1258 | |
1259 | return fw_iso_context_flush_completions(ctx: client->iso_context); |
1260 | } |
1261 | |
1262 | static int ioctl_get_cycle_timer2(struct client *client, union ioctl_arg *arg) |
1263 | { |
1264 | struct fw_cdev_get_cycle_timer2 *a = &arg->get_cycle_timer2; |
1265 | struct fw_card *card = client->device->card; |
1266 | struct timespec64 ts = {0, 0}; |
1267 | u32 cycle_time = 0; |
1268 | int ret = 0; |
1269 | |
1270 | local_irq_disable(); |
1271 | |
1272 | ret = fw_card_read_cycle_time(card, cycle_time: &cycle_time); |
1273 | if (ret < 0) |
1274 | goto end; |
1275 | |
1276 | switch (a->clk_id) { |
1277 | case CLOCK_REALTIME: ktime_get_real_ts64(tv: &ts); break; |
1278 | case CLOCK_MONOTONIC: ktime_get_ts64(ts: &ts); break; |
1279 | case CLOCK_MONOTONIC_RAW: ktime_get_raw_ts64(ts: &ts); break; |
1280 | default: |
1281 | ret = -EINVAL; |
1282 | } |
1283 | end: |
1284 | local_irq_enable(); |
1285 | |
1286 | a->tv_sec = ts.tv_sec; |
1287 | a->tv_nsec = ts.tv_nsec; |
1288 | a->cycle_timer = cycle_time; |
1289 | |
1290 | return ret; |
1291 | } |
1292 | |
1293 | static int ioctl_get_cycle_timer(struct client *client, union ioctl_arg *arg) |
1294 | { |
1295 | struct fw_cdev_get_cycle_timer *a = &arg->get_cycle_timer; |
1296 | struct fw_cdev_get_cycle_timer2 ct2; |
1297 | |
1298 | ct2.clk_id = CLOCK_REALTIME; |
1299 | ioctl_get_cycle_timer2(client, arg: (union ioctl_arg *)&ct2); |
1300 | |
1301 | a->local_time = ct2.tv_sec * USEC_PER_SEC + ct2.tv_nsec / NSEC_PER_USEC; |
1302 | a->cycle_timer = ct2.cycle_timer; |
1303 | |
1304 | return 0; |
1305 | } |
1306 | |
1307 | static void iso_resource_work(struct work_struct *work) |
1308 | { |
1309 | struct iso_resource_event *e; |
1310 | struct iso_resource *r = |
1311 | container_of(work, struct iso_resource, work.work); |
1312 | struct client *client = r->client; |
1313 | int generation, channel, bandwidth, todo; |
1314 | bool skip, free, success; |
1315 | |
1316 | spin_lock_irq(lock: &client->lock); |
1317 | generation = client->device->generation; |
1318 | todo = r->todo; |
1319 | /* Allow 1000ms grace period for other reallocations. */ |
1320 | if (todo == ISO_RES_ALLOC && |
1321 | time_before64(get_jiffies_64(), |
1322 | client->device->card->reset_jiffies + HZ)) { |
1323 | schedule_iso_resource(r, DIV_ROUND_UP(HZ, 3)); |
1324 | skip = true; |
1325 | } else { |
1326 | /* We could be called twice within the same generation. */ |
1327 | skip = todo == ISO_RES_REALLOC && |
1328 | r->generation == generation; |
1329 | } |
1330 | free = todo == ISO_RES_DEALLOC || |
1331 | todo == ISO_RES_ALLOC_ONCE || |
1332 | todo == ISO_RES_DEALLOC_ONCE; |
1333 | r->generation = generation; |
1334 | spin_unlock_irq(lock: &client->lock); |
1335 | |
1336 | if (skip) |
1337 | goto out; |
1338 | |
1339 | bandwidth = r->bandwidth; |
1340 | |
1341 | fw_iso_resource_manage(card: client->device->card, generation, |
1342 | channels_mask: r->channels, channel: &channel, bandwidth: &bandwidth, |
1343 | allocate: todo == ISO_RES_ALLOC || |
1344 | todo == ISO_RES_REALLOC || |
1345 | todo == ISO_RES_ALLOC_ONCE); |
1346 | /* |
1347 | * Is this generation outdated already? As long as this resource sticks |
1348 | * in the idr, it will be scheduled again for a newer generation or at |
1349 | * shutdown. |
1350 | */ |
1351 | if (channel == -EAGAIN && |
1352 | (todo == ISO_RES_ALLOC || todo == ISO_RES_REALLOC)) |
1353 | goto out; |
1354 | |
1355 | success = channel >= 0 || bandwidth > 0; |
1356 | |
1357 | spin_lock_irq(lock: &client->lock); |
1358 | /* |
1359 | * Transit from allocation to reallocation, except if the client |
1360 | * requested deallocation in the meantime. |
1361 | */ |
1362 | if (r->todo == ISO_RES_ALLOC) |
1363 | r->todo = ISO_RES_REALLOC; |
1364 | /* |
1365 | * Allocation or reallocation failure? Pull this resource out of the |
1366 | * idr and prepare for deletion, unless the client is shutting down. |
1367 | */ |
1368 | if (r->todo == ISO_RES_REALLOC && !success && |
1369 | !client->in_shutdown && |
1370 | idr_remove(&client->resource_idr, id: r->resource.handle)) { |
1371 | client_put(client); |
1372 | free = true; |
1373 | } |
1374 | spin_unlock_irq(lock: &client->lock); |
1375 | |
1376 | if (todo == ISO_RES_ALLOC && channel >= 0) |
1377 | r->channels = 1ULL << channel; |
1378 | |
1379 | if (todo == ISO_RES_REALLOC && success) |
1380 | goto out; |
1381 | |
1382 | if (todo == ISO_RES_ALLOC || todo == ISO_RES_ALLOC_ONCE) { |
1383 | e = r->e_alloc; |
1384 | r->e_alloc = NULL; |
1385 | } else { |
1386 | e = r->e_dealloc; |
1387 | r->e_dealloc = NULL; |
1388 | } |
1389 | e->iso_resource.handle = r->resource.handle; |
1390 | e->iso_resource.channel = channel; |
1391 | e->iso_resource.bandwidth = bandwidth; |
1392 | |
1393 | queue_event(client, event: &e->event, |
1394 | data0: &e->iso_resource, size0: sizeof(e->iso_resource), NULL, size1: 0); |
1395 | |
1396 | if (free) { |
1397 | cancel_delayed_work(dwork: &r->work); |
1398 | kfree(objp: r->e_alloc); |
1399 | kfree(objp: r->e_dealloc); |
1400 | kfree(objp: r); |
1401 | } |
1402 | out: |
1403 | client_put(client); |
1404 | } |
1405 | |
1406 | static void release_iso_resource(struct client *client, |
1407 | struct client_resource *resource) |
1408 | { |
1409 | struct iso_resource *r = |
1410 | container_of(resource, struct iso_resource, resource); |
1411 | |
1412 | spin_lock_irq(lock: &client->lock); |
1413 | r->todo = ISO_RES_DEALLOC; |
1414 | schedule_iso_resource(r, delay: 0); |
1415 | spin_unlock_irq(lock: &client->lock); |
1416 | } |
1417 | |
1418 | static int init_iso_resource(struct client *client, |
1419 | struct fw_cdev_allocate_iso_resource *request, int todo) |
1420 | { |
1421 | struct iso_resource_event *e1, *e2; |
1422 | struct iso_resource *r; |
1423 | int ret; |
1424 | |
1425 | if ((request->channels == 0 && request->bandwidth == 0) || |
1426 | request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL) |
1427 | return -EINVAL; |
1428 | |
1429 | r = kmalloc(size: sizeof(*r), GFP_KERNEL); |
1430 | e1 = kmalloc(size: sizeof(*e1), GFP_KERNEL); |
1431 | e2 = kmalloc(size: sizeof(*e2), GFP_KERNEL); |
1432 | if (r == NULL || e1 == NULL || e2 == NULL) { |
1433 | ret = -ENOMEM; |
1434 | goto fail; |
1435 | } |
1436 | |
1437 | INIT_DELAYED_WORK(&r->work, iso_resource_work); |
1438 | r->client = client; |
1439 | r->todo = todo; |
1440 | r->generation = -1; |
1441 | r->channels = request->channels; |
1442 | r->bandwidth = request->bandwidth; |
1443 | r->e_alloc = e1; |
1444 | r->e_dealloc = e2; |
1445 | |
1446 | e1->iso_resource.closure = request->closure; |
1447 | e1->iso_resource.type = FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED; |
1448 | e2->iso_resource.closure = request->closure; |
1449 | e2->iso_resource.type = FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED; |
1450 | |
1451 | if (todo == ISO_RES_ALLOC) { |
1452 | r->resource.release = release_iso_resource; |
1453 | ret = add_client_resource(client, resource: &r->resource, GFP_KERNEL); |
1454 | if (ret < 0) |
1455 | goto fail; |
1456 | } else { |
1457 | r->resource.release = NULL; |
1458 | r->resource.handle = -1; |
1459 | schedule_iso_resource(r, delay: 0); |
1460 | } |
1461 | request->handle = r->resource.handle; |
1462 | |
1463 | return 0; |
1464 | fail: |
1465 | kfree(objp: r); |
1466 | kfree(objp: e1); |
1467 | kfree(objp: e2); |
1468 | |
1469 | return ret; |
1470 | } |
1471 | |
1472 | static int ioctl_allocate_iso_resource(struct client *client, |
1473 | union ioctl_arg *arg) |
1474 | { |
1475 | return init_iso_resource(client, |
1476 | request: &arg->allocate_iso_resource, todo: ISO_RES_ALLOC); |
1477 | } |
1478 | |
1479 | static int ioctl_deallocate_iso_resource(struct client *client, |
1480 | union ioctl_arg *arg) |
1481 | { |
1482 | return release_client_resource(client, |
1483 | handle: arg->deallocate.handle, release: release_iso_resource, NULL); |
1484 | } |
1485 | |
1486 | static int ioctl_allocate_iso_resource_once(struct client *client, |
1487 | union ioctl_arg *arg) |
1488 | { |
1489 | return init_iso_resource(client, |
1490 | request: &arg->allocate_iso_resource, todo: ISO_RES_ALLOC_ONCE); |
1491 | } |
1492 | |
1493 | static int ioctl_deallocate_iso_resource_once(struct client *client, |
1494 | union ioctl_arg *arg) |
1495 | { |
1496 | return init_iso_resource(client, |
1497 | request: &arg->allocate_iso_resource, todo: ISO_RES_DEALLOC_ONCE); |
1498 | } |
1499 | |
1500 | /* |
1501 | * Returns a speed code: Maximum speed to or from this device, |
1502 | * limited by the device's link speed, the local node's link speed, |
1503 | * and all PHY port speeds between the two links. |
1504 | */ |
1505 | static int ioctl_get_speed(struct client *client, union ioctl_arg *arg) |
1506 | { |
1507 | return client->device->max_speed; |
1508 | } |
1509 | |
1510 | static int ioctl_send_broadcast_request(struct client *client, |
1511 | union ioctl_arg *arg) |
1512 | { |
1513 | struct fw_cdev_send_request *a = &arg->send_request; |
1514 | |
1515 | switch (a->tcode) { |
1516 | case TCODE_WRITE_QUADLET_REQUEST: |
1517 | case TCODE_WRITE_BLOCK_REQUEST: |
1518 | break; |
1519 | default: |
1520 | return -EINVAL; |
1521 | } |
1522 | |
1523 | /* Security policy: Only allow accesses to Units Space. */ |
1524 | if (a->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END) |
1525 | return -EACCES; |
1526 | |
1527 | return init_request(client, request: a, LOCAL_BUS | 0x3f, SCODE_100); |
1528 | } |
1529 | |
1530 | static int ioctl_send_stream_packet(struct client *client, union ioctl_arg *arg) |
1531 | { |
1532 | struct fw_cdev_send_stream_packet *a = &arg->send_stream_packet; |
1533 | struct fw_cdev_send_request request; |
1534 | int dest; |
1535 | |
1536 | if (a->speed > client->device->card->link_speed || |
1537 | a->length > 1024 << a->speed) |
1538 | return -EIO; |
1539 | |
1540 | if (a->tag > 3 || a->channel > 63 || a->sy > 15) |
1541 | return -EINVAL; |
1542 | |
1543 | dest = fw_stream_packet_destination_id(tag: a->tag, channel: a->channel, sy: a->sy); |
1544 | request.tcode = TCODE_STREAM_DATA; |
1545 | request.length = a->length; |
1546 | request.closure = a->closure; |
1547 | request.data = a->data; |
1548 | request.generation = a->generation; |
1549 | |
1550 | return init_request(client, request: &request, destination_id: dest, speed: a->speed); |
1551 | } |
1552 | |
1553 | static void outbound_phy_packet_callback(struct fw_packet *packet, |
1554 | struct fw_card *card, int status) |
1555 | { |
1556 | struct outbound_phy_packet_event *e = |
1557 | container_of(packet, struct outbound_phy_packet_event, p); |
1558 | struct client *e_client = e->client; |
1559 | u32 rcode; |
1560 | |
1561 | switch (status) { |
1562 | // expected: |
1563 | case ACK_COMPLETE: |
1564 | rcode = RCODE_COMPLETE; |
1565 | break; |
1566 | // should never happen with PHY packets: |
1567 | case ACK_PENDING: |
1568 | rcode = RCODE_COMPLETE; |
1569 | break; |
1570 | case ACK_BUSY_X: |
1571 | case ACK_BUSY_A: |
1572 | case ACK_BUSY_B: |
1573 | rcode = RCODE_BUSY; |
1574 | break; |
1575 | case ACK_DATA_ERROR: |
1576 | rcode = RCODE_DATA_ERROR; |
1577 | break; |
1578 | case ACK_TYPE_ERROR: |
1579 | rcode = RCODE_TYPE_ERROR; |
1580 | break; |
1581 | // stale generation; cancelled; on certain controllers: no ack |
1582 | default: |
1583 | rcode = status; |
1584 | break; |
1585 | } |
1586 | |
1587 | switch (e->phy_packet.without_tstamp.type) { |
1588 | case FW_CDEV_EVENT_PHY_PACKET_SENT: |
1589 | { |
1590 | struct fw_cdev_event_phy_packet *pp = &e->phy_packet.without_tstamp; |
1591 | |
1592 | pp->rcode = rcode; |
1593 | pp->data[0] = packet->timestamp; |
1594 | queue_event(client: e->client, event: &e->event, data0: &e->phy_packet, size0: sizeof(*pp) + pp->length, |
1595 | NULL, size1: 0); |
1596 | break; |
1597 | } |
1598 | case FW_CDEV_EVENT_PHY_PACKET_SENT2: |
1599 | { |
1600 | struct fw_cdev_event_phy_packet2 *pp = &e->phy_packet.with_tstamp; |
1601 | |
1602 | pp->rcode = rcode; |
1603 | pp->tstamp = packet->timestamp; |
1604 | queue_event(client: e->client, event: &e->event, data0: &e->phy_packet, size0: sizeof(*pp) + pp->length, |
1605 | NULL, size1: 0); |
1606 | break; |
1607 | } |
1608 | default: |
1609 | WARN_ON(1); |
1610 | break; |
1611 | } |
1612 | |
1613 | client_put(client: e_client); |
1614 | } |
1615 | |
1616 | static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg) |
1617 | { |
1618 | struct fw_cdev_send_phy_packet *a = &arg->send_phy_packet; |
1619 | struct fw_card *card = client->device->card; |
1620 | struct outbound_phy_packet_event *e; |
1621 | |
1622 | /* Access policy: Allow this ioctl only on local nodes' device files. */ |
1623 | if (!client->device->is_local) |
1624 | return -ENOSYS; |
1625 | |
1626 | e = kzalloc(size: sizeof(*e) + sizeof(a->data), GFP_KERNEL); |
1627 | if (e == NULL) |
1628 | return -ENOMEM; |
1629 | |
1630 | client_get(client); |
1631 | e->client = client; |
1632 | e->p.speed = SCODE_100; |
1633 | e->p.generation = a->generation; |
1634 | e->p.header[0] = TCODE_LINK_INTERNAL << 4; |
1635 | e->p.header[1] = a->data[0]; |
1636 | e->p.header[2] = a->data[1]; |
1637 | e->p.header_length = 12; |
1638 | e->p.callback = outbound_phy_packet_callback; |
1639 | |
1640 | if (client->version < FW_CDEV_VERSION_EVENT_ASYNC_TSTAMP) { |
1641 | struct fw_cdev_event_phy_packet *pp = &e->phy_packet.without_tstamp; |
1642 | |
1643 | pp->closure = a->closure; |
1644 | pp->type = FW_CDEV_EVENT_PHY_PACKET_SENT; |
1645 | if (is_ping_packet(data: a->data)) |
1646 | pp->length = 4; |
1647 | } else { |
1648 | struct fw_cdev_event_phy_packet2 *pp = &e->phy_packet.with_tstamp; |
1649 | |
1650 | pp->closure = a->closure; |
1651 | pp->type = FW_CDEV_EVENT_PHY_PACKET_SENT2; |
1652 | // Keep the data field so that application can match the response event to the |
1653 | // request. |
1654 | pp->length = sizeof(a->data); |
1655 | memcpy(pp->data, a->data, sizeof(a->data)); |
1656 | } |
1657 | |
1658 | card->driver->send_request(card, &e->p); |
1659 | |
1660 | return 0; |
1661 | } |
1662 | |
1663 | static int ioctl_receive_phy_packets(struct client *client, union ioctl_arg *arg) |
1664 | { |
1665 | struct fw_cdev_receive_phy_packets *a = &arg->receive_phy_packets; |
1666 | struct fw_card *card = client->device->card; |
1667 | |
1668 | /* Access policy: Allow this ioctl only on local nodes' device files. */ |
1669 | if (!client->device->is_local) |
1670 | return -ENOSYS; |
1671 | |
1672 | spin_lock_irq(lock: &card->lock); |
1673 | |
1674 | list_move_tail(list: &client->phy_receiver_link, head: &card->phy_receiver_list); |
1675 | client->phy_receiver_closure = a->closure; |
1676 | |
1677 | spin_unlock_irq(lock: &card->lock); |
1678 | |
1679 | return 0; |
1680 | } |
1681 | |
1682 | void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p) |
1683 | { |
1684 | struct client *client; |
1685 | struct inbound_phy_packet_event *e; |
1686 | unsigned long flags; |
1687 | |
1688 | spin_lock_irqsave(&card->lock, flags); |
1689 | |
1690 | list_for_each_entry(client, &card->phy_receiver_list, phy_receiver_link) { |
1691 | e = kmalloc(size: sizeof(*e) + 8, GFP_ATOMIC); |
1692 | if (e == NULL) |
1693 | break; |
1694 | |
1695 | if (client->version < FW_CDEV_VERSION_EVENT_ASYNC_TSTAMP) { |
1696 | struct fw_cdev_event_phy_packet *pp = &e->phy_packet.without_tstamp; |
1697 | |
1698 | pp->closure = client->phy_receiver_closure; |
1699 | pp->type = FW_CDEV_EVENT_PHY_PACKET_RECEIVED; |
1700 | pp->rcode = RCODE_COMPLETE; |
1701 | pp->length = 8; |
1702 | pp->data[0] = p->header[1]; |
1703 | pp->data[1] = p->header[2]; |
1704 | queue_event(client, event: &e->event, data0: &e->phy_packet, size0: sizeof(*pp) + 8, NULL, size1: 0); |
1705 | } else { |
1706 | struct fw_cdev_event_phy_packet2 *pp = &e->phy_packet.with_tstamp; |
1707 | |
1708 | pp = &e->phy_packet.with_tstamp; |
1709 | pp->closure = client->phy_receiver_closure; |
1710 | pp->type = FW_CDEV_EVENT_PHY_PACKET_RECEIVED2; |
1711 | pp->rcode = RCODE_COMPLETE; |
1712 | pp->length = 8; |
1713 | pp->tstamp = p->timestamp; |
1714 | pp->data[0] = p->header[1]; |
1715 | pp->data[1] = p->header[2]; |
1716 | queue_event(client, event: &e->event, data0: &e->phy_packet, size0: sizeof(*pp) + 8, NULL, size1: 0); |
1717 | } |
1718 | } |
1719 | |
1720 | spin_unlock_irqrestore(lock: &card->lock, flags); |
1721 | } |
1722 | |
1723 | static int (* const ioctl_handlers[])(struct client *, union ioctl_arg *) = { |
1724 | [0x00] = ioctl_get_info, |
1725 | [0x01] = ioctl_send_request, |
1726 | [0x02] = ioctl_allocate, |
1727 | [0x03] = ioctl_deallocate, |
1728 | [0x04] = ioctl_send_response, |
1729 | [0x05] = ioctl_initiate_bus_reset, |
1730 | [0x06] = ioctl_add_descriptor, |
1731 | [0x07] = ioctl_remove_descriptor, |
1732 | [0x08] = ioctl_create_iso_context, |
1733 | [0x09] = ioctl_queue_iso, |
1734 | [0x0a] = ioctl_start_iso, |
1735 | [0x0b] = ioctl_stop_iso, |
1736 | [0x0c] = ioctl_get_cycle_timer, |
1737 | [0x0d] = ioctl_allocate_iso_resource, |
1738 | [0x0e] = ioctl_deallocate_iso_resource, |
1739 | [0x0f] = ioctl_allocate_iso_resource_once, |
1740 | [0x10] = ioctl_deallocate_iso_resource_once, |
1741 | [0x11] = ioctl_get_speed, |
1742 | [0x12] = ioctl_send_broadcast_request, |
1743 | [0x13] = ioctl_send_stream_packet, |
1744 | [0x14] = ioctl_get_cycle_timer2, |
1745 | [0x15] = ioctl_send_phy_packet, |
1746 | [0x16] = ioctl_receive_phy_packets, |
1747 | [0x17] = ioctl_set_iso_channels, |
1748 | [0x18] = ioctl_flush_iso, |
1749 | }; |
1750 | |
1751 | static int dispatch_ioctl(struct client *client, |
1752 | unsigned int cmd, void __user *arg) |
1753 | { |
1754 | union ioctl_arg buffer; |
1755 | int ret; |
1756 | |
1757 | if (fw_device_is_shutdown(device: client->device)) |
1758 | return -ENODEV; |
1759 | |
1760 | if (_IOC_TYPE(cmd) != '#' || |
1761 | _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers) || |
1762 | _IOC_SIZE(cmd) > sizeof(buffer)) |
1763 | return -ENOTTY; |
1764 | |
1765 | memset(&buffer, 0, sizeof(buffer)); |
1766 | |
1767 | if (_IOC_DIR(cmd) & _IOC_WRITE) |
1768 | if (copy_from_user(to: &buffer, from: arg, _IOC_SIZE(cmd))) |
1769 | return -EFAULT; |
1770 | |
1771 | ret = ioctl_handlers[_IOC_NR(cmd)](client, &buffer); |
1772 | if (ret < 0) |
1773 | return ret; |
1774 | |
1775 | if (_IOC_DIR(cmd) & _IOC_READ) |
1776 | if (copy_to_user(to: arg, from: &buffer, _IOC_SIZE(cmd))) |
1777 | return -EFAULT; |
1778 | |
1779 | return ret; |
1780 | } |
1781 | |
1782 | static long fw_device_op_ioctl(struct file *file, |
1783 | unsigned int cmd, unsigned long arg) |
1784 | { |
1785 | return dispatch_ioctl(client: file->private_data, cmd, arg: (void __user *)arg); |
1786 | } |
1787 | |
1788 | static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma) |
1789 | { |
1790 | struct client *client = file->private_data; |
1791 | unsigned long size; |
1792 | int page_count, ret; |
1793 | |
1794 | if (fw_device_is_shutdown(device: client->device)) |
1795 | return -ENODEV; |
1796 | |
1797 | /* FIXME: We could support multiple buffers, but we don't. */ |
1798 | if (client->buffer.pages != NULL) |
1799 | return -EBUSY; |
1800 | |
1801 | if (!(vma->vm_flags & VM_SHARED)) |
1802 | return -EINVAL; |
1803 | |
1804 | if (vma->vm_start & ~PAGE_MASK) |
1805 | return -EINVAL; |
1806 | |
1807 | client->vm_start = vma->vm_start; |
1808 | size = vma->vm_end - vma->vm_start; |
1809 | page_count = size >> PAGE_SHIFT; |
1810 | if (size & ~PAGE_MASK) |
1811 | return -EINVAL; |
1812 | |
1813 | ret = fw_iso_buffer_alloc(buffer: &client->buffer, page_count); |
1814 | if (ret < 0) |
1815 | return ret; |
1816 | |
1817 | spin_lock_irq(lock: &client->lock); |
1818 | if (client->iso_context) { |
1819 | ret = fw_iso_buffer_map_dma(buffer: &client->buffer, |
1820 | card: client->device->card, |
1821 | direction: iso_dma_direction(context: client->iso_context)); |
1822 | client->buffer_is_mapped = (ret == 0); |
1823 | } |
1824 | spin_unlock_irq(lock: &client->lock); |
1825 | if (ret < 0) |
1826 | goto fail; |
1827 | |
1828 | ret = vm_map_pages_zero(vma, pages: client->buffer.pages, |
1829 | num: client->buffer.page_count); |
1830 | if (ret < 0) |
1831 | goto fail; |
1832 | |
1833 | return 0; |
1834 | fail: |
1835 | fw_iso_buffer_destroy(buffer: &client->buffer, card: client->device->card); |
1836 | return ret; |
1837 | } |
1838 | |
1839 | static int is_outbound_transaction_resource(int id, void *p, void *data) |
1840 | { |
1841 | struct client_resource *resource = p; |
1842 | |
1843 | return resource->release == release_transaction; |
1844 | } |
1845 | |
1846 | static int has_outbound_transactions(struct client *client) |
1847 | { |
1848 | int ret; |
1849 | |
1850 | spin_lock_irq(lock: &client->lock); |
1851 | ret = idr_for_each(&client->resource_idr, |
1852 | fn: is_outbound_transaction_resource, NULL); |
1853 | spin_unlock_irq(lock: &client->lock); |
1854 | |
1855 | return ret; |
1856 | } |
1857 | |
1858 | static int shutdown_resource(int id, void *p, void *data) |
1859 | { |
1860 | struct client_resource *resource = p; |
1861 | struct client *client = data; |
1862 | |
1863 | resource->release(client, resource); |
1864 | client_put(client); |
1865 | |
1866 | return 0; |
1867 | } |
1868 | |
1869 | static int fw_device_op_release(struct inode *inode, struct file *file) |
1870 | { |
1871 | struct client *client = file->private_data; |
1872 | struct event *event, *next_event; |
1873 | |
1874 | spin_lock_irq(lock: &client->device->card->lock); |
1875 | list_del(entry: &client->phy_receiver_link); |
1876 | spin_unlock_irq(lock: &client->device->card->lock); |
1877 | |
1878 | mutex_lock(&client->device->client_list_mutex); |
1879 | list_del(entry: &client->link); |
1880 | mutex_unlock(lock: &client->device->client_list_mutex); |
1881 | |
1882 | if (client->iso_context) |
1883 | fw_iso_context_destroy(ctx: client->iso_context); |
1884 | |
1885 | if (client->buffer.pages) |
1886 | fw_iso_buffer_destroy(buffer: &client->buffer, card: client->device->card); |
1887 | |
1888 | /* Freeze client->resource_idr and client->event_list */ |
1889 | spin_lock_irq(lock: &client->lock); |
1890 | client->in_shutdown = true; |
1891 | spin_unlock_irq(lock: &client->lock); |
1892 | |
1893 | wait_event(client->tx_flush_wait, !has_outbound_transactions(client)); |
1894 | |
1895 | idr_for_each(&client->resource_idr, fn: shutdown_resource, data: client); |
1896 | idr_destroy(&client->resource_idr); |
1897 | |
1898 | list_for_each_entry_safe(event, next_event, &client->event_list, link) |
1899 | kfree(objp: event); |
1900 | |
1901 | client_put(client); |
1902 | |
1903 | return 0; |
1904 | } |
1905 | |
1906 | static __poll_t fw_device_op_poll(struct file *file, poll_table * pt) |
1907 | { |
1908 | struct client *client = file->private_data; |
1909 | __poll_t mask = 0; |
1910 | |
1911 | poll_wait(filp: file, wait_address: &client->wait, p: pt); |
1912 | |
1913 | if (fw_device_is_shutdown(device: client->device)) |
1914 | mask |= EPOLLHUP | EPOLLERR; |
1915 | if (!list_empty(head: &client->event_list)) |
1916 | mask |= EPOLLIN | EPOLLRDNORM; |
1917 | |
1918 | return mask; |
1919 | } |
1920 | |
1921 | const struct file_operations fw_device_ops = { |
1922 | .owner = THIS_MODULE, |
1923 | .llseek = no_llseek, |
1924 | .open = fw_device_op_open, |
1925 | .read = fw_device_op_read, |
1926 | .unlocked_ioctl = fw_device_op_ioctl, |
1927 | .mmap = fw_device_op_mmap, |
1928 | .release = fw_device_op_release, |
1929 | .poll = fw_device_op_poll, |
1930 | .compat_ioctl = compat_ptr_ioctl, |
1931 | }; |
1932 | |