1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * RapidIO mport character device |
4 | * |
5 | * Copyright 2014-2015 Integrated Device Technology, Inc. |
6 | * Alexandre Bounine <alexandre.bounine@idt.com> |
7 | * Copyright 2014-2015 Prodrive Technologies |
8 | * Andre van Herk <andre.van.herk@prodrive-technologies.com> |
9 | * Jerry Jacobs <jerry.jacobs@prodrive-technologies.com> |
10 | * Copyright (C) 2014 Texas Instruments Incorporated |
11 | * Aurelien Jacquiot <a-jacquiot@ti.com> |
12 | */ |
13 | #include <linux/module.h> |
14 | #include <linux/kernel.h> |
15 | #include <linux/cdev.h> |
16 | #include <linux/ioctl.h> |
17 | #include <linux/uaccess.h> |
18 | #include <linux/list.h> |
19 | #include <linux/fs.h> |
20 | #include <linux/err.h> |
21 | #include <linux/net.h> |
22 | #include <linux/poll.h> |
23 | #include <linux/spinlock.h> |
24 | #include <linux/sched.h> |
25 | #include <linux/kfifo.h> |
26 | |
27 | #include <linux/mm.h> |
28 | #include <linux/slab.h> |
29 | #include <linux/vmalloc.h> |
30 | #include <linux/mman.h> |
31 | |
32 | #include <linux/dma-mapping.h> |
33 | #ifdef CONFIG_RAPIDIO_DMA_ENGINE |
34 | #include <linux/dmaengine.h> |
35 | #endif |
36 | |
37 | #include <linux/rio.h> |
38 | #include <linux/rio_ids.h> |
39 | #include <linux/rio_drv.h> |
40 | #include <linux/rio_mport_cdev.h> |
41 | |
42 | #include "../rio.h" |
43 | |
44 | #define DRV_NAME "rio_mport" |
45 | #define DRV_PREFIX DRV_NAME ": " |
46 | #define DEV_NAME "rio_mport" |
47 | #define DRV_VERSION "1.0.0" |
48 | |
49 | /* Debug output filtering masks */ |
50 | enum { |
51 | DBG_NONE = 0, |
52 | DBG_INIT = BIT(0), /* driver init */ |
53 | DBG_EXIT = BIT(1), /* driver exit */ |
54 | DBG_MPORT = BIT(2), /* mport add/remove */ |
55 | DBG_RDEV = BIT(3), /* RapidIO device add/remove */ |
56 | DBG_DMA = BIT(4), /* DMA transfer messages */ |
57 | DBG_MMAP = BIT(5), /* mapping messages */ |
58 | DBG_IBW = BIT(6), /* inbound window */ |
59 | DBG_EVENT = BIT(7), /* event handling messages */ |
60 | DBG_OBW = BIT(8), /* outbound window messages */ |
61 | DBG_DBELL = BIT(9), /* doorbell messages */ |
62 | DBG_ALL = ~0, |
63 | }; |
64 | |
65 | #ifdef DEBUG |
66 | #define rmcd_debug(level, fmt, arg...) \ |
67 | do { \ |
68 | if (DBG_##level & dbg_level) \ |
69 | pr_debug(DRV_PREFIX "%s: " fmt "\n", __func__, ##arg); \ |
70 | } while (0) |
71 | #else |
72 | #define rmcd_debug(level, fmt, arg...) \ |
73 | no_printk(KERN_DEBUG pr_fmt(DRV_PREFIX fmt "\n"), ##arg) |
74 | #endif |
75 | |
76 | #define rmcd_warn(fmt, arg...) \ |
77 | pr_warn(DRV_PREFIX "%s WARNING " fmt "\n", __func__, ##arg) |
78 | |
79 | #define rmcd_error(fmt, arg...) \ |
80 | pr_err(DRV_PREFIX "%s ERROR " fmt "\n", __func__, ##arg) |
81 | |
82 | MODULE_AUTHOR("Jerry Jacobs <jerry.jacobs@prodrive-technologies.com>" ); |
83 | MODULE_AUTHOR("Aurelien Jacquiot <a-jacquiot@ti.com>" ); |
84 | MODULE_AUTHOR("Alexandre Bounine <alexandre.bounine@idt.com>" ); |
85 | MODULE_AUTHOR("Andre van Herk <andre.van.herk@prodrive-technologies.com>" ); |
86 | MODULE_DESCRIPTION("RapidIO mport character device driver" ); |
87 | MODULE_LICENSE("GPL" ); |
88 | MODULE_VERSION(DRV_VERSION); |
89 | |
90 | static int dma_timeout = 3000; /* DMA transfer timeout in msec */ |
91 | module_param(dma_timeout, int, S_IRUGO); |
92 | MODULE_PARM_DESC(dma_timeout, "DMA Transfer Timeout in msec (default: 3000)" ); |
93 | |
94 | #ifdef DEBUG |
95 | static u32 dbg_level = DBG_NONE; |
96 | module_param(dbg_level, uint, S_IWUSR | S_IWGRP | S_IRUGO); |
97 | MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)" ); |
98 | #endif |
99 | |
100 | /* |
101 | * An internal DMA coherent buffer |
102 | */ |
103 | struct mport_dma_buf { |
104 | void *ib_base; |
105 | dma_addr_t ib_phys; |
106 | u32 ib_size; |
107 | u64 ib_rio_base; |
108 | bool ib_map; |
109 | struct file *filp; |
110 | }; |
111 | |
112 | /* |
113 | * Internal memory mapping structure |
114 | */ |
115 | enum rio_mport_map_dir { |
116 | MAP_INBOUND, |
117 | MAP_OUTBOUND, |
118 | MAP_DMA, |
119 | }; |
120 | |
121 | struct rio_mport_mapping { |
122 | struct list_head node; |
123 | struct mport_dev *md; |
124 | enum rio_mport_map_dir dir; |
125 | u16 rioid; |
126 | u64 rio_addr; |
127 | dma_addr_t phys_addr; /* for mmap */ |
128 | void *virt_addr; /* kernel address, for dma_free_coherent */ |
129 | u64 size; |
130 | struct kref ref; /* refcount of vmas sharing the mapping */ |
131 | struct file *filp; |
132 | }; |
133 | |
134 | struct rio_mport_dma_map { |
135 | int valid; |
136 | u64 length; |
137 | void *vaddr; |
138 | dma_addr_t paddr; |
139 | }; |
140 | |
141 | #define MPORT_MAX_DMA_BUFS 16 |
142 | #define MPORT_EVENT_DEPTH 10 |
143 | |
144 | /* |
145 | * mport_dev driver-specific structure that represents mport device |
146 | * @active mport device status flag |
147 | * @node list node to maintain list of registered mports |
148 | * @cdev character device |
149 | * @dev associated device object |
150 | * @mport associated subsystem's master port device object |
151 | * @buf_mutex lock for buffer handling |
152 | * @file_mutex - lock for open files list |
153 | * @file_list - list of open files on given mport |
154 | * @properties properties of this mport |
155 | * @portwrites queue of inbound portwrites |
156 | * @pw_lock lock for port write queue |
157 | * @mappings queue for memory mappings |
158 | * @dma_chan DMA channels associated with this device |
159 | * @dma_ref: |
160 | * @comp: |
161 | */ |
162 | struct mport_dev { |
163 | atomic_t active; |
164 | struct list_head node; |
165 | struct cdev cdev; |
166 | struct device dev; |
167 | struct rio_mport *mport; |
168 | struct mutex buf_mutex; |
169 | struct mutex file_mutex; |
170 | struct list_head file_list; |
171 | struct rio_mport_properties properties; |
172 | struct list_head doorbells; |
173 | spinlock_t db_lock; |
174 | struct list_head portwrites; |
175 | spinlock_t pw_lock; |
176 | struct list_head mappings; |
177 | #ifdef CONFIG_RAPIDIO_DMA_ENGINE |
178 | struct dma_chan *dma_chan; |
179 | struct kref dma_ref; |
180 | struct completion comp; |
181 | #endif |
182 | }; |
183 | |
184 | /* |
185 | * mport_cdev_priv - data structure specific to individual file object |
186 | * associated with an open device |
187 | * @md master port character device object |
188 | * @async_queue - asynchronous notification queue |
189 | * @list - file objects tracking list |
190 | * @db_filters inbound doorbell filters for this descriptor |
191 | * @pw_filters portwrite filters for this descriptor |
192 | * @event_fifo event fifo for this descriptor |
193 | * @event_rx_wait wait queue for this descriptor |
194 | * @fifo_lock lock for event_fifo |
195 | * @event_mask event mask for this descriptor |
196 | * @dmach DMA engine channel allocated for specific file object |
197 | */ |
198 | struct mport_cdev_priv { |
199 | struct mport_dev *md; |
200 | struct fasync_struct *async_queue; |
201 | struct list_head list; |
202 | struct list_head db_filters; |
203 | struct list_head pw_filters; |
204 | struct kfifo event_fifo; |
205 | wait_queue_head_t event_rx_wait; |
206 | spinlock_t fifo_lock; |
207 | u32 event_mask; /* RIO_DOORBELL, RIO_PORTWRITE */ |
208 | #ifdef CONFIG_RAPIDIO_DMA_ENGINE |
209 | struct dma_chan *dmach; |
210 | struct list_head async_list; |
211 | spinlock_t req_lock; |
212 | struct mutex dma_lock; |
213 | struct kref dma_ref; |
214 | struct completion comp; |
215 | #endif |
216 | }; |
217 | |
218 | /* |
219 | * rio_mport_pw_filter - structure to describe a portwrite filter |
220 | * md_node node in mport device's list |
221 | * priv_node node in private file object's list |
222 | * priv reference to private data |
223 | * filter actual portwrite filter |
224 | */ |
225 | struct rio_mport_pw_filter { |
226 | struct list_head md_node; |
227 | struct list_head priv_node; |
228 | struct mport_cdev_priv *priv; |
229 | struct rio_pw_filter filter; |
230 | }; |
231 | |
232 | /* |
233 | * rio_mport_db_filter - structure to describe a doorbell filter |
234 | * @data_node reference to device node |
235 | * @priv_node node in private data |
236 | * @priv reference to private data |
237 | * @filter actual doorbell filter |
238 | */ |
239 | struct rio_mport_db_filter { |
240 | struct list_head data_node; |
241 | struct list_head priv_node; |
242 | struct mport_cdev_priv *priv; |
243 | struct rio_doorbell_filter filter; |
244 | }; |
245 | |
246 | static LIST_HEAD(mport_devs); |
247 | static DEFINE_MUTEX(mport_devs_lock); |
248 | |
249 | #if (0) /* used by commented out portion of poll function : FIXME */ |
250 | static DECLARE_WAIT_QUEUE_HEAD(mport_cdev_wait); |
251 | #endif |
252 | |
253 | static const struct class dev_class = { |
254 | .name = DRV_NAME, |
255 | }; |
256 | static dev_t dev_number; |
257 | |
258 | static void mport_release_mapping(struct kref *ref); |
259 | |
260 | static int rio_mport_maint_rd(struct mport_cdev_priv *priv, void __user *arg, |
261 | int local) |
262 | { |
263 | struct rio_mport *mport = priv->md->mport; |
264 | struct rio_mport_maint_io maint_io; |
265 | u32 *buffer; |
266 | u32 offset; |
267 | size_t length; |
268 | int ret, i; |
269 | |
270 | if (unlikely(copy_from_user(&maint_io, arg, sizeof(maint_io)))) |
271 | return -EFAULT; |
272 | |
273 | if ((maint_io.offset % 4) || |
274 | (maint_io.length == 0) || (maint_io.length % 4) || |
275 | (maint_io.length + maint_io.offset) > RIO_MAINT_SPACE_SZ) |
276 | return -EINVAL; |
277 | |
278 | buffer = vmalloc(size: maint_io.length); |
279 | if (buffer == NULL) |
280 | return -ENOMEM; |
281 | length = maint_io.length/sizeof(u32); |
282 | offset = maint_io.offset; |
283 | |
284 | for (i = 0; i < length; i++) { |
285 | if (local) |
286 | ret = __rio_local_read_config_32(port: mport, |
287 | offset, data: &buffer[i]); |
288 | else |
289 | ret = rio_mport_read_config_32(port: mport, destid: maint_io.rioid, |
290 | hopcount: maint_io.hopcount, offset, data: &buffer[i]); |
291 | if (ret) |
292 | goto out; |
293 | |
294 | offset += 4; |
295 | } |
296 | |
297 | if (unlikely(copy_to_user((void __user *)(uintptr_t)maint_io.buffer, |
298 | buffer, maint_io.length))) |
299 | ret = -EFAULT; |
300 | out: |
301 | vfree(addr: buffer); |
302 | return ret; |
303 | } |
304 | |
305 | static int rio_mport_maint_wr(struct mport_cdev_priv *priv, void __user *arg, |
306 | int local) |
307 | { |
308 | struct rio_mport *mport = priv->md->mport; |
309 | struct rio_mport_maint_io maint_io; |
310 | u32 *buffer; |
311 | u32 offset; |
312 | size_t length; |
313 | int ret = -EINVAL, i; |
314 | |
315 | if (unlikely(copy_from_user(&maint_io, arg, sizeof(maint_io)))) |
316 | return -EFAULT; |
317 | |
318 | if ((maint_io.offset % 4) || |
319 | (maint_io.length == 0) || (maint_io.length % 4) || |
320 | (maint_io.length + maint_io.offset) > RIO_MAINT_SPACE_SZ) |
321 | return -EINVAL; |
322 | |
323 | buffer = vmalloc(size: maint_io.length); |
324 | if (buffer == NULL) |
325 | return -ENOMEM; |
326 | length = maint_io.length; |
327 | |
328 | if (unlikely(copy_from_user(buffer, |
329 | (void __user *)(uintptr_t)maint_io.buffer, length))) { |
330 | ret = -EFAULT; |
331 | goto out; |
332 | } |
333 | |
334 | offset = maint_io.offset; |
335 | length /= sizeof(u32); |
336 | |
337 | for (i = 0; i < length; i++) { |
338 | if (local) |
339 | ret = __rio_local_write_config_32(port: mport, |
340 | offset, data: buffer[i]); |
341 | else |
342 | ret = rio_mport_write_config_32(port: mport, destid: maint_io.rioid, |
343 | hopcount: maint_io.hopcount, |
344 | offset, data: buffer[i]); |
345 | if (ret) |
346 | goto out; |
347 | |
348 | offset += 4; |
349 | } |
350 | |
351 | out: |
352 | vfree(addr: buffer); |
353 | return ret; |
354 | } |
355 | |
356 | |
357 | /* |
358 | * Inbound/outbound memory mapping functions |
359 | */ |
360 | static int |
361 | rio_mport_create_outbound_mapping(struct mport_dev *md, struct file *filp, |
362 | u16 rioid, u64 raddr, u32 size, |
363 | dma_addr_t *paddr) |
364 | { |
365 | struct rio_mport *mport = md->mport; |
366 | struct rio_mport_mapping *map; |
367 | int ret; |
368 | |
369 | rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%x" , rioid, raddr, size); |
370 | |
371 | map = kzalloc(size: sizeof(*map), GFP_KERNEL); |
372 | if (map == NULL) |
373 | return -ENOMEM; |
374 | |
375 | ret = rio_map_outb_region(mport, destid: rioid, rbase: raddr, size, rflags: 0, local: paddr); |
376 | if (ret < 0) |
377 | goto err_map_outb; |
378 | |
379 | map->dir = MAP_OUTBOUND; |
380 | map->rioid = rioid; |
381 | map->rio_addr = raddr; |
382 | map->size = size; |
383 | map->phys_addr = *paddr; |
384 | map->filp = filp; |
385 | map->md = md; |
386 | kref_init(kref: &map->ref); |
387 | list_add_tail(new: &map->node, head: &md->mappings); |
388 | return 0; |
389 | err_map_outb: |
390 | kfree(objp: map); |
391 | return ret; |
392 | } |
393 | |
394 | static int |
395 | rio_mport_get_outbound_mapping(struct mport_dev *md, struct file *filp, |
396 | u16 rioid, u64 raddr, u32 size, |
397 | dma_addr_t *paddr) |
398 | { |
399 | struct rio_mport_mapping *map; |
400 | int err = -ENOMEM; |
401 | |
402 | mutex_lock(&md->buf_mutex); |
403 | list_for_each_entry(map, &md->mappings, node) { |
404 | if (map->dir != MAP_OUTBOUND) |
405 | continue; |
406 | if (rioid == map->rioid && |
407 | raddr == map->rio_addr && size == map->size) { |
408 | *paddr = map->phys_addr; |
409 | err = 0; |
410 | break; |
411 | } else if (rioid == map->rioid && |
412 | raddr < (map->rio_addr + map->size - 1) && |
413 | (raddr + size) > map->rio_addr) { |
414 | err = -EBUSY; |
415 | break; |
416 | } |
417 | } |
418 | |
419 | /* If not found, create new */ |
420 | if (err == -ENOMEM) |
421 | err = rio_mport_create_outbound_mapping(md, filp, rioid, raddr, |
422 | size, paddr); |
423 | mutex_unlock(lock: &md->buf_mutex); |
424 | return err; |
425 | } |
426 | |
427 | static int rio_mport_obw_map(struct file *filp, void __user *arg) |
428 | { |
429 | struct mport_cdev_priv *priv = filp->private_data; |
430 | struct mport_dev *data = priv->md; |
431 | struct rio_mmap map; |
432 | dma_addr_t paddr; |
433 | int ret; |
434 | |
435 | if (unlikely(copy_from_user(&map, arg, sizeof(map)))) |
436 | return -EFAULT; |
437 | |
438 | rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%llx" , |
439 | map.rioid, map.rio_addr, map.length); |
440 | |
441 | ret = rio_mport_get_outbound_mapping(md: data, filp, rioid: map.rioid, |
442 | raddr: map.rio_addr, size: map.length, paddr: &paddr); |
443 | if (ret < 0) { |
444 | rmcd_error("Failed to set OBW err= %d" , ret); |
445 | return ret; |
446 | } |
447 | |
448 | map.handle = paddr; |
449 | |
450 | if (unlikely(copy_to_user(arg, &map, sizeof(map)))) |
451 | return -EFAULT; |
452 | return 0; |
453 | } |
454 | |
455 | /* |
456 | * rio_mport_obw_free() - unmap an OutBound Window from RapidIO address space |
457 | * |
458 | * @priv: driver private data |
459 | * @arg: buffer handle returned by allocation routine |
460 | */ |
461 | static int rio_mport_obw_free(struct file *filp, void __user *arg) |
462 | { |
463 | struct mport_cdev_priv *priv = filp->private_data; |
464 | struct mport_dev *md = priv->md; |
465 | u64 handle; |
466 | struct rio_mport_mapping *map, *_map; |
467 | |
468 | if (!md->mport->ops->unmap_outb) |
469 | return -EPROTONOSUPPORT; |
470 | |
471 | if (copy_from_user(to: &handle, from: arg, n: sizeof(handle))) |
472 | return -EFAULT; |
473 | |
474 | rmcd_debug(OBW, "h=0x%llx" , handle); |
475 | |
476 | mutex_lock(&md->buf_mutex); |
477 | list_for_each_entry_safe(map, _map, &md->mappings, node) { |
478 | if (map->dir == MAP_OUTBOUND && map->phys_addr == handle) { |
479 | if (map->filp == filp) { |
480 | rmcd_debug(OBW, "kref_put h=0x%llx" , handle); |
481 | map->filp = NULL; |
482 | kref_put(kref: &map->ref, release: mport_release_mapping); |
483 | } |
484 | break; |
485 | } |
486 | } |
487 | mutex_unlock(lock: &md->buf_mutex); |
488 | |
489 | return 0; |
490 | } |
491 | |
492 | /* |
493 | * maint_hdid_set() - Set the host Device ID |
494 | * @priv: driver private data |
495 | * @arg: Device Id |
496 | */ |
497 | static int maint_hdid_set(struct mport_cdev_priv *priv, void __user *arg) |
498 | { |
499 | struct mport_dev *md = priv->md; |
500 | u16 hdid; |
501 | |
502 | if (copy_from_user(to: &hdid, from: arg, n: sizeof(hdid))) |
503 | return -EFAULT; |
504 | |
505 | md->mport->host_deviceid = hdid; |
506 | md->properties.hdid = hdid; |
507 | rio_local_set_device_id(port: md->mport, did: hdid); |
508 | |
509 | rmcd_debug(MPORT, "Set host device Id to %d" , hdid); |
510 | |
511 | return 0; |
512 | } |
513 | |
514 | /* |
515 | * maint_comptag_set() - Set the host Component Tag |
516 | * @priv: driver private data |
517 | * @arg: Component Tag |
518 | */ |
519 | static int maint_comptag_set(struct mport_cdev_priv *priv, void __user *arg) |
520 | { |
521 | struct mport_dev *md = priv->md; |
522 | u32 comptag; |
523 | |
524 | if (copy_from_user(to: &comptag, from: arg, n: sizeof(comptag))) |
525 | return -EFAULT; |
526 | |
527 | rio_local_write_config_32(port: md->mport, RIO_COMPONENT_TAG_CSR, data: comptag); |
528 | |
529 | rmcd_debug(MPORT, "Set host Component Tag to %d" , comptag); |
530 | |
531 | return 0; |
532 | } |
533 | |
534 | #ifdef CONFIG_RAPIDIO_DMA_ENGINE |
535 | |
536 | struct mport_dma_req { |
537 | struct kref refcount; |
538 | struct list_head node; |
539 | struct file *filp; |
540 | struct mport_cdev_priv *priv; |
541 | enum rio_transfer_sync sync; |
542 | struct sg_table sgt; |
543 | struct page **page_list; |
544 | unsigned int nr_pages; |
545 | struct rio_mport_mapping *map; |
546 | struct dma_chan *dmach; |
547 | enum dma_data_direction dir; |
548 | dma_cookie_t cookie; |
549 | enum dma_status status; |
550 | struct completion req_comp; |
551 | }; |
552 | |
553 | static void mport_release_def_dma(struct kref *dma_ref) |
554 | { |
555 | struct mport_dev *md = |
556 | container_of(dma_ref, struct mport_dev, dma_ref); |
557 | |
558 | rmcd_debug(EXIT, "DMA_%d" , md->dma_chan->chan_id); |
559 | rio_release_dma(dchan: md->dma_chan); |
560 | md->dma_chan = NULL; |
561 | } |
562 | |
563 | static void mport_release_dma(struct kref *dma_ref) |
564 | { |
565 | struct mport_cdev_priv *priv = |
566 | container_of(dma_ref, struct mport_cdev_priv, dma_ref); |
567 | |
568 | rmcd_debug(EXIT, "DMA_%d" , priv->dmach->chan_id); |
569 | complete(&priv->comp); |
570 | } |
571 | |
572 | static void dma_req_free(struct kref *ref) |
573 | { |
574 | struct mport_dma_req *req = container_of(ref, struct mport_dma_req, |
575 | refcount); |
576 | struct mport_cdev_priv *priv = req->priv; |
577 | |
578 | dma_unmap_sg(req->dmach->device->dev, |
579 | req->sgt.sgl, req->sgt.nents, req->dir); |
580 | sg_free_table(&req->sgt); |
581 | if (req->page_list) { |
582 | unpin_user_pages(pages: req->page_list, npages: req->nr_pages); |
583 | kfree(objp: req->page_list); |
584 | } |
585 | |
586 | if (req->map) { |
587 | mutex_lock(&req->map->md->buf_mutex); |
588 | kref_put(kref: &req->map->ref, release: mport_release_mapping); |
589 | mutex_unlock(lock: &req->map->md->buf_mutex); |
590 | } |
591 | |
592 | kref_put(kref: &priv->dma_ref, release: mport_release_dma); |
593 | |
594 | kfree(objp: req); |
595 | } |
596 | |
597 | static void dma_xfer_callback(void *param) |
598 | { |
599 | struct mport_dma_req *req = (struct mport_dma_req *)param; |
600 | struct mport_cdev_priv *priv = req->priv; |
601 | |
602 | req->status = dma_async_is_tx_complete(chan: priv->dmach, cookie: req->cookie, |
603 | NULL, NULL); |
604 | complete(&req->req_comp); |
605 | kref_put(kref: &req->refcount, release: dma_req_free); |
606 | } |
607 | |
608 | /* |
609 | * prep_dma_xfer() - Configure and send request to DMAengine to prepare DMA |
610 | * transfer object. |
611 | * Returns pointer to DMA transaction descriptor allocated by DMA driver on |
612 | * success or ERR_PTR (and/or NULL) if failed. Caller must check returned |
613 | * non-NULL pointer using IS_ERR macro. |
614 | */ |
615 | static struct dma_async_tx_descriptor |
616 | *prep_dma_xfer(struct dma_chan *chan, struct rio_transfer_io *transfer, |
617 | struct sg_table *sgt, int nents, enum dma_transfer_direction dir, |
618 | enum dma_ctrl_flags flags) |
619 | { |
620 | struct rio_dma_data tx_data; |
621 | |
622 | tx_data.sg = sgt->sgl; |
623 | tx_data.sg_len = nents; |
624 | tx_data.rio_addr_u = 0; |
625 | tx_data.rio_addr = transfer->rio_addr; |
626 | if (dir == DMA_MEM_TO_DEV) { |
627 | switch (transfer->method) { |
628 | case RIO_EXCHANGE_NWRITE: |
629 | tx_data.wr_type = RDW_ALL_NWRITE; |
630 | break; |
631 | case RIO_EXCHANGE_NWRITE_R_ALL: |
632 | tx_data.wr_type = RDW_ALL_NWRITE_R; |
633 | break; |
634 | case RIO_EXCHANGE_NWRITE_R: |
635 | tx_data.wr_type = RDW_LAST_NWRITE_R; |
636 | break; |
637 | case RIO_EXCHANGE_DEFAULT: |
638 | tx_data.wr_type = RDW_DEFAULT; |
639 | break; |
640 | default: |
641 | return ERR_PTR(error: -EINVAL); |
642 | } |
643 | } |
644 | |
645 | return rio_dma_prep_xfer(dchan: chan, destid: transfer->rioid, data: &tx_data, direction: dir, flags); |
646 | } |
647 | |
648 | /* Request DMA channel associated with this mport device. |
649 | * Try to request DMA channel for every new process that opened given |
650 | * mport. If a new DMA channel is not available use default channel |
651 | * which is the first DMA channel opened on mport device. |
652 | */ |
653 | static int get_dma_channel(struct mport_cdev_priv *priv) |
654 | { |
655 | mutex_lock(&priv->dma_lock); |
656 | if (!priv->dmach) { |
657 | priv->dmach = rio_request_mport_dma(mport: priv->md->mport); |
658 | if (!priv->dmach) { |
659 | /* Use default DMA channel if available */ |
660 | if (priv->md->dma_chan) { |
661 | priv->dmach = priv->md->dma_chan; |
662 | kref_get(kref: &priv->md->dma_ref); |
663 | } else { |
664 | rmcd_error("Failed to get DMA channel" ); |
665 | mutex_unlock(lock: &priv->dma_lock); |
666 | return -ENODEV; |
667 | } |
668 | } else if (!priv->md->dma_chan) { |
669 | /* Register default DMA channel if we do not have one */ |
670 | priv->md->dma_chan = priv->dmach; |
671 | kref_init(kref: &priv->md->dma_ref); |
672 | rmcd_debug(DMA, "Register DMA_chan %d as default" , |
673 | priv->dmach->chan_id); |
674 | } |
675 | |
676 | kref_init(kref: &priv->dma_ref); |
677 | init_completion(x: &priv->comp); |
678 | } |
679 | |
680 | kref_get(kref: &priv->dma_ref); |
681 | mutex_unlock(lock: &priv->dma_lock); |
682 | return 0; |
683 | } |
684 | |
685 | static void put_dma_channel(struct mport_cdev_priv *priv) |
686 | { |
687 | kref_put(kref: &priv->dma_ref, release: mport_release_dma); |
688 | } |
689 | |
690 | /* |
691 | * DMA transfer functions |
692 | */ |
693 | static int do_dma_request(struct mport_dma_req *req, |
694 | struct rio_transfer_io *xfer, |
695 | enum rio_transfer_sync sync, int nents) |
696 | { |
697 | struct mport_cdev_priv *priv; |
698 | struct sg_table *sgt; |
699 | struct dma_chan *chan; |
700 | struct dma_async_tx_descriptor *tx; |
701 | dma_cookie_t cookie; |
702 | unsigned long tmo = msecs_to_jiffies(m: dma_timeout); |
703 | enum dma_transfer_direction dir; |
704 | long wret; |
705 | int ret = 0; |
706 | |
707 | priv = req->priv; |
708 | sgt = &req->sgt; |
709 | |
710 | chan = priv->dmach; |
711 | dir = (req->dir == DMA_FROM_DEVICE) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV; |
712 | |
713 | rmcd_debug(DMA, "%s(%d) uses %s for DMA_%s" , |
714 | current->comm, task_pid_nr(current), |
715 | dev_name(&chan->dev->device), |
716 | (dir == DMA_DEV_TO_MEM)?"READ" :"WRITE" ); |
717 | |
718 | /* Initialize DMA transaction request */ |
719 | tx = prep_dma_xfer(chan, transfer: xfer, sgt, nents, dir, |
720 | flags: DMA_CTRL_ACK | DMA_PREP_INTERRUPT); |
721 | |
722 | if (!tx) { |
723 | rmcd_debug(DMA, "prep error for %s A:0x%llx L:0x%llx" , |
724 | (dir == DMA_DEV_TO_MEM)?"READ" :"WRITE" , |
725 | xfer->rio_addr, xfer->length); |
726 | ret = -EIO; |
727 | goto err_out; |
728 | } else if (IS_ERR(ptr: tx)) { |
729 | ret = PTR_ERR(ptr: tx); |
730 | rmcd_debug(DMA, "prep error %d for %s A:0x%llx L:0x%llx" , ret, |
731 | (dir == DMA_DEV_TO_MEM)?"READ" :"WRITE" , |
732 | xfer->rio_addr, xfer->length); |
733 | goto err_out; |
734 | } |
735 | |
736 | tx->callback = dma_xfer_callback; |
737 | tx->callback_param = req; |
738 | |
739 | req->status = DMA_IN_PROGRESS; |
740 | kref_get(kref: &req->refcount); |
741 | |
742 | cookie = dmaengine_submit(desc: tx); |
743 | req->cookie = cookie; |
744 | |
745 | rmcd_debug(DMA, "pid=%d DMA_%s tx_cookie = %d" , task_pid_nr(current), |
746 | (dir == DMA_DEV_TO_MEM)?"READ" :"WRITE" , cookie); |
747 | |
748 | if (dma_submit_error(cookie)) { |
749 | rmcd_error("submit err=%d (addr:0x%llx len:0x%llx)" , |
750 | cookie, xfer->rio_addr, xfer->length); |
751 | kref_put(kref: &req->refcount, release: dma_req_free); |
752 | ret = -EIO; |
753 | goto err_out; |
754 | } |
755 | |
756 | dma_async_issue_pending(chan); |
757 | |
758 | if (sync == RIO_TRANSFER_ASYNC) { |
759 | spin_lock(lock: &priv->req_lock); |
760 | list_add_tail(new: &req->node, head: &priv->async_list); |
761 | spin_unlock(lock: &priv->req_lock); |
762 | return cookie; |
763 | } else if (sync == RIO_TRANSFER_FAF) |
764 | return 0; |
765 | |
766 | wret = wait_for_completion_interruptible_timeout(x: &req->req_comp, timeout: tmo); |
767 | |
768 | if (wret == 0) { |
769 | /* Timeout on wait occurred */ |
770 | rmcd_error("%s(%d) timed out waiting for DMA_%s %d" , |
771 | current->comm, task_pid_nr(current), |
772 | (dir == DMA_DEV_TO_MEM)?"READ" :"WRITE" , cookie); |
773 | return -ETIMEDOUT; |
774 | } else if (wret == -ERESTARTSYS) { |
775 | /* Wait_for_completion was interrupted by a signal but DMA may |
776 | * be in progress |
777 | */ |
778 | rmcd_error("%s(%d) wait for DMA_%s %d was interrupted" , |
779 | current->comm, task_pid_nr(current), |
780 | (dir == DMA_DEV_TO_MEM)?"READ" :"WRITE" , cookie); |
781 | return -EINTR; |
782 | } |
783 | |
784 | if (req->status != DMA_COMPLETE) { |
785 | /* DMA transaction completion was signaled with error */ |
786 | rmcd_error("%s(%d) DMA_%s %d completed with status %d (ret=%d)" , |
787 | current->comm, task_pid_nr(current), |
788 | (dir == DMA_DEV_TO_MEM)?"READ" :"WRITE" , |
789 | cookie, req->status, ret); |
790 | ret = -EIO; |
791 | } |
792 | |
793 | err_out: |
794 | return ret; |
795 | } |
796 | |
797 | /* |
798 | * rio_dma_transfer() - Perform RapidIO DMA data transfer to/from |
799 | * the remote RapidIO device |
800 | * @filp: file pointer associated with the call |
801 | * @transfer_mode: DMA transfer mode |
802 | * @sync: synchronization mode |
803 | * @dir: DMA transfer direction (DMA_MEM_TO_DEV = write OR |
804 | * DMA_DEV_TO_MEM = read) |
805 | * @xfer: data transfer descriptor structure |
806 | */ |
807 | static int |
808 | rio_dma_transfer(struct file *filp, u32 transfer_mode, |
809 | enum rio_transfer_sync sync, enum dma_data_direction dir, |
810 | struct rio_transfer_io *xfer) |
811 | { |
812 | struct mport_cdev_priv *priv = filp->private_data; |
813 | unsigned long nr_pages = 0; |
814 | struct page **page_list = NULL; |
815 | struct mport_dma_req *req; |
816 | struct mport_dev *md = priv->md; |
817 | struct dma_chan *chan; |
818 | int ret; |
819 | int nents; |
820 | |
821 | if (xfer->length == 0) |
822 | return -EINVAL; |
823 | req = kzalloc(size: sizeof(*req), GFP_KERNEL); |
824 | if (!req) |
825 | return -ENOMEM; |
826 | |
827 | ret = get_dma_channel(priv); |
828 | if (ret) { |
829 | kfree(objp: req); |
830 | return ret; |
831 | } |
832 | chan = priv->dmach; |
833 | |
834 | kref_init(kref: &req->refcount); |
835 | init_completion(x: &req->req_comp); |
836 | req->dir = dir; |
837 | req->filp = filp; |
838 | req->priv = priv; |
839 | req->dmach = chan; |
840 | req->sync = sync; |
841 | |
842 | /* |
843 | * If parameter loc_addr != NULL, we are transferring data from/to |
844 | * data buffer allocated in user-space: lock in memory user-space |
845 | * buffer pages and build an SG table for DMA transfer request |
846 | * |
847 | * Otherwise (loc_addr == NULL) contiguous kernel-space buffer is |
848 | * used for DMA data transfers: build single entry SG table using |
849 | * offset within the internal buffer specified by handle parameter. |
850 | */ |
851 | if (xfer->loc_addr) { |
852 | unsigned int offset; |
853 | long pinned; |
854 | |
855 | offset = lower_32_bits(offset_in_page(xfer->loc_addr)); |
856 | nr_pages = PAGE_ALIGN(xfer->length + offset) >> PAGE_SHIFT; |
857 | |
858 | page_list = kmalloc_array(n: nr_pages, |
859 | size: sizeof(*page_list), GFP_KERNEL); |
860 | if (page_list == NULL) { |
861 | ret = -ENOMEM; |
862 | goto err_req; |
863 | } |
864 | |
865 | pinned = pin_user_pages_fast( |
866 | start: (unsigned long)xfer->loc_addr & PAGE_MASK, |
867 | nr_pages, |
868 | gup_flags: dir == DMA_FROM_DEVICE ? FOLL_WRITE : 0, |
869 | pages: page_list); |
870 | |
871 | if (pinned != nr_pages) { |
872 | if (pinned < 0) { |
873 | rmcd_error("pin_user_pages_fast err=%ld" , |
874 | pinned); |
875 | nr_pages = 0; |
876 | } else { |
877 | rmcd_error("pinned %ld out of %ld pages" , |
878 | pinned, nr_pages); |
879 | /* |
880 | * Set nr_pages up to mean "how many pages to unpin, in |
881 | * the error handler: |
882 | */ |
883 | nr_pages = pinned; |
884 | } |
885 | ret = -EFAULT; |
886 | goto err_pg; |
887 | } |
888 | |
889 | ret = sg_alloc_table_from_pages(sgt: &req->sgt, pages: page_list, n_pages: nr_pages, |
890 | offset, size: xfer->length, GFP_KERNEL); |
891 | if (ret) { |
892 | rmcd_error("sg_alloc_table failed with err=%d" , ret); |
893 | goto err_pg; |
894 | } |
895 | |
896 | req->page_list = page_list; |
897 | req->nr_pages = nr_pages; |
898 | } else { |
899 | dma_addr_t baddr; |
900 | struct rio_mport_mapping *map; |
901 | |
902 | baddr = (dma_addr_t)xfer->handle; |
903 | |
904 | mutex_lock(&md->buf_mutex); |
905 | list_for_each_entry(map, &md->mappings, node) { |
906 | if (baddr >= map->phys_addr && |
907 | baddr < (map->phys_addr + map->size)) { |
908 | kref_get(kref: &map->ref); |
909 | req->map = map; |
910 | break; |
911 | } |
912 | } |
913 | mutex_unlock(lock: &md->buf_mutex); |
914 | |
915 | if (req->map == NULL) { |
916 | ret = -ENOMEM; |
917 | goto err_req; |
918 | } |
919 | |
920 | if (xfer->length + xfer->offset > req->map->size) { |
921 | ret = -EINVAL; |
922 | goto err_req; |
923 | } |
924 | |
925 | ret = sg_alloc_table(&req->sgt, 1, GFP_KERNEL); |
926 | if (unlikely(ret)) { |
927 | rmcd_error("sg_alloc_table failed for internal buf" ); |
928 | goto err_req; |
929 | } |
930 | |
931 | sg_set_buf(sg: req->sgt.sgl, |
932 | buf: req->map->virt_addr + (baddr - req->map->phys_addr) + |
933 | xfer->offset, buflen: xfer->length); |
934 | } |
935 | |
936 | nents = dma_map_sg(chan->device->dev, |
937 | req->sgt.sgl, req->sgt.nents, dir); |
938 | if (nents == 0) { |
939 | rmcd_error("Failed to map SG list" ); |
940 | ret = -EFAULT; |
941 | goto err_pg; |
942 | } |
943 | |
944 | ret = do_dma_request(req, xfer, sync, nents); |
945 | |
946 | if (ret >= 0) { |
947 | if (sync == RIO_TRANSFER_ASYNC) |
948 | return ret; /* return ASYNC cookie */ |
949 | } else { |
950 | rmcd_debug(DMA, "do_dma_request failed with err=%d" , ret); |
951 | } |
952 | |
953 | err_pg: |
954 | if (!req->page_list) { |
955 | unpin_user_pages(pages: page_list, npages: nr_pages); |
956 | kfree(objp: page_list); |
957 | } |
958 | err_req: |
959 | kref_put(kref: &req->refcount, release: dma_req_free); |
960 | return ret; |
961 | } |
962 | |
963 | static int rio_mport_transfer_ioctl(struct file *filp, void __user *arg) |
964 | { |
965 | struct mport_cdev_priv *priv = filp->private_data; |
966 | struct rio_transaction transaction; |
967 | struct rio_transfer_io *transfer; |
968 | enum dma_data_direction dir; |
969 | int i, ret = 0; |
970 | size_t size; |
971 | |
972 | if (unlikely(copy_from_user(&transaction, arg, sizeof(transaction)))) |
973 | return -EFAULT; |
974 | |
975 | if (transaction.count != 1) /* only single transfer for now */ |
976 | return -EINVAL; |
977 | |
978 | if ((transaction.transfer_mode & |
979 | priv->md->properties.transfer_mode) == 0) |
980 | return -ENODEV; |
981 | |
982 | size = array_size(sizeof(*transfer), transaction.count); |
983 | transfer = vmalloc(size); |
984 | if (!transfer) |
985 | return -ENOMEM; |
986 | |
987 | if (unlikely(copy_from_user(transfer, |
988 | (void __user *)(uintptr_t)transaction.block, |
989 | size))) { |
990 | ret = -EFAULT; |
991 | goto out_free; |
992 | } |
993 | |
994 | dir = (transaction.dir == RIO_TRANSFER_DIR_READ) ? |
995 | DMA_FROM_DEVICE : DMA_TO_DEVICE; |
996 | for (i = 0; i < transaction.count && ret == 0; i++) |
997 | ret = rio_dma_transfer(filp, transfer_mode: transaction.transfer_mode, |
998 | sync: transaction.sync, dir, xfer: &transfer[i]); |
999 | |
1000 | if (unlikely(copy_to_user((void __user *)(uintptr_t)transaction.block, |
1001 | transfer, size))) |
1002 | ret = -EFAULT; |
1003 | |
1004 | out_free: |
1005 | vfree(addr: transfer); |
1006 | |
1007 | return ret; |
1008 | } |
1009 | |
1010 | static int rio_mport_wait_for_async_dma(struct file *filp, void __user *arg) |
1011 | { |
1012 | struct mport_cdev_priv *priv; |
1013 | struct rio_async_tx_wait w_param; |
1014 | struct mport_dma_req *req; |
1015 | dma_cookie_t cookie; |
1016 | unsigned long tmo; |
1017 | long wret; |
1018 | int found = 0; |
1019 | int ret; |
1020 | |
1021 | priv = (struct mport_cdev_priv *)filp->private_data; |
1022 | |
1023 | if (unlikely(copy_from_user(&w_param, arg, sizeof(w_param)))) |
1024 | return -EFAULT; |
1025 | |
1026 | cookie = w_param.token; |
1027 | if (w_param.timeout) |
1028 | tmo = msecs_to_jiffies(m: w_param.timeout); |
1029 | else /* Use default DMA timeout */ |
1030 | tmo = msecs_to_jiffies(m: dma_timeout); |
1031 | |
1032 | spin_lock(lock: &priv->req_lock); |
1033 | list_for_each_entry(req, &priv->async_list, node) { |
1034 | if (req->cookie == cookie) { |
1035 | list_del(entry: &req->node); |
1036 | found = 1; |
1037 | break; |
1038 | } |
1039 | } |
1040 | spin_unlock(lock: &priv->req_lock); |
1041 | |
1042 | if (!found) |
1043 | return -EAGAIN; |
1044 | |
1045 | wret = wait_for_completion_interruptible_timeout(x: &req->req_comp, timeout: tmo); |
1046 | |
1047 | if (wret == 0) { |
1048 | /* Timeout on wait occurred */ |
1049 | rmcd_error("%s(%d) timed out waiting for ASYNC DMA_%s" , |
1050 | current->comm, task_pid_nr(current), |
1051 | (req->dir == DMA_FROM_DEVICE)?"READ" :"WRITE" ); |
1052 | ret = -ETIMEDOUT; |
1053 | goto err_tmo; |
1054 | } else if (wret == -ERESTARTSYS) { |
1055 | /* Wait_for_completion was interrupted by a signal but DMA may |
1056 | * be still in progress |
1057 | */ |
1058 | rmcd_error("%s(%d) wait for ASYNC DMA_%s was interrupted" , |
1059 | current->comm, task_pid_nr(current), |
1060 | (req->dir == DMA_FROM_DEVICE)?"READ" :"WRITE" ); |
1061 | ret = -EINTR; |
1062 | goto err_tmo; |
1063 | } |
1064 | |
1065 | if (req->status != DMA_COMPLETE) { |
1066 | /* DMA transaction completion signaled with transfer error */ |
1067 | rmcd_error("%s(%d) ASYNC DMA_%s completion with status %d" , |
1068 | current->comm, task_pid_nr(current), |
1069 | (req->dir == DMA_FROM_DEVICE)?"READ" :"WRITE" , |
1070 | req->status); |
1071 | ret = -EIO; |
1072 | } else |
1073 | ret = 0; |
1074 | |
1075 | if (req->status != DMA_IN_PROGRESS && req->status != DMA_PAUSED) |
1076 | kref_put(kref: &req->refcount, release: dma_req_free); |
1077 | |
1078 | return ret; |
1079 | |
1080 | err_tmo: |
1081 | /* Return request back into async queue */ |
1082 | spin_lock(lock: &priv->req_lock); |
1083 | list_add_tail(new: &req->node, head: &priv->async_list); |
1084 | spin_unlock(lock: &priv->req_lock); |
1085 | return ret; |
1086 | } |
1087 | |
1088 | static int rio_mport_create_dma_mapping(struct mport_dev *md, struct file *filp, |
1089 | u64 size, struct rio_mport_mapping **mapping) |
1090 | { |
1091 | struct rio_mport_mapping *map; |
1092 | |
1093 | map = kzalloc(size: sizeof(*map), GFP_KERNEL); |
1094 | if (map == NULL) |
1095 | return -ENOMEM; |
1096 | |
1097 | map->virt_addr = dma_alloc_coherent(dev: md->mport->dev.parent, size, |
1098 | dma_handle: &map->phys_addr, GFP_KERNEL); |
1099 | if (map->virt_addr == NULL) { |
1100 | kfree(objp: map); |
1101 | return -ENOMEM; |
1102 | } |
1103 | |
1104 | map->dir = MAP_DMA; |
1105 | map->size = size; |
1106 | map->filp = filp; |
1107 | map->md = md; |
1108 | kref_init(kref: &map->ref); |
1109 | mutex_lock(&md->buf_mutex); |
1110 | list_add_tail(new: &map->node, head: &md->mappings); |
1111 | mutex_unlock(lock: &md->buf_mutex); |
1112 | *mapping = map; |
1113 | |
1114 | return 0; |
1115 | } |
1116 | |
1117 | static int rio_mport_alloc_dma(struct file *filp, void __user *arg) |
1118 | { |
1119 | struct mport_cdev_priv *priv = filp->private_data; |
1120 | struct mport_dev *md = priv->md; |
1121 | struct rio_dma_mem map; |
1122 | struct rio_mport_mapping *mapping = NULL; |
1123 | int ret; |
1124 | |
1125 | if (unlikely(copy_from_user(&map, arg, sizeof(map)))) |
1126 | return -EFAULT; |
1127 | |
1128 | ret = rio_mport_create_dma_mapping(md, filp, size: map.length, mapping: &mapping); |
1129 | if (ret) |
1130 | return ret; |
1131 | |
1132 | map.dma_handle = mapping->phys_addr; |
1133 | |
1134 | if (unlikely(copy_to_user(arg, &map, sizeof(map)))) { |
1135 | mutex_lock(&md->buf_mutex); |
1136 | kref_put(kref: &mapping->ref, release: mport_release_mapping); |
1137 | mutex_unlock(lock: &md->buf_mutex); |
1138 | return -EFAULT; |
1139 | } |
1140 | |
1141 | return 0; |
1142 | } |
1143 | |
1144 | static int rio_mport_free_dma(struct file *filp, void __user *arg) |
1145 | { |
1146 | struct mport_cdev_priv *priv = filp->private_data; |
1147 | struct mport_dev *md = priv->md; |
1148 | u64 handle; |
1149 | int ret = -EFAULT; |
1150 | struct rio_mport_mapping *map, *_map; |
1151 | |
1152 | if (copy_from_user(to: &handle, from: arg, n: sizeof(handle))) |
1153 | return -EFAULT; |
1154 | rmcd_debug(EXIT, "filp=%p" , filp); |
1155 | |
1156 | mutex_lock(&md->buf_mutex); |
1157 | list_for_each_entry_safe(map, _map, &md->mappings, node) { |
1158 | if (map->dir == MAP_DMA && map->phys_addr == handle && |
1159 | map->filp == filp) { |
1160 | kref_put(kref: &map->ref, release: mport_release_mapping); |
1161 | ret = 0; |
1162 | break; |
1163 | } |
1164 | } |
1165 | mutex_unlock(lock: &md->buf_mutex); |
1166 | |
1167 | if (ret == -EFAULT) { |
1168 | rmcd_debug(DMA, "ERR no matching mapping" ); |
1169 | return ret; |
1170 | } |
1171 | |
1172 | return 0; |
1173 | } |
1174 | #else |
1175 | static int rio_mport_transfer_ioctl(struct file *filp, void *arg) |
1176 | { |
1177 | return -ENODEV; |
1178 | } |
1179 | |
1180 | static int rio_mport_wait_for_async_dma(struct file *filp, void __user *arg) |
1181 | { |
1182 | return -ENODEV; |
1183 | } |
1184 | |
1185 | static int rio_mport_alloc_dma(struct file *filp, void __user *arg) |
1186 | { |
1187 | return -ENODEV; |
1188 | } |
1189 | |
1190 | static int rio_mport_free_dma(struct file *filp, void __user *arg) |
1191 | { |
1192 | return -ENODEV; |
1193 | } |
1194 | #endif /* CONFIG_RAPIDIO_DMA_ENGINE */ |
1195 | |
1196 | /* |
1197 | * Inbound/outbound memory mapping functions |
1198 | */ |
1199 | |
1200 | static int |
1201 | rio_mport_create_inbound_mapping(struct mport_dev *md, struct file *filp, |
1202 | u64 raddr, u64 size, |
1203 | struct rio_mport_mapping **mapping) |
1204 | { |
1205 | struct rio_mport *mport = md->mport; |
1206 | struct rio_mport_mapping *map; |
1207 | int ret; |
1208 | |
1209 | /* rio_map_inb_region() accepts u32 size */ |
1210 | if (size > 0xffffffff) |
1211 | return -EINVAL; |
1212 | |
1213 | map = kzalloc(size: sizeof(*map), GFP_KERNEL); |
1214 | if (map == NULL) |
1215 | return -ENOMEM; |
1216 | |
1217 | map->virt_addr = dma_alloc_coherent(dev: mport->dev.parent, size, |
1218 | dma_handle: &map->phys_addr, GFP_KERNEL); |
1219 | if (map->virt_addr == NULL) { |
1220 | ret = -ENOMEM; |
1221 | goto err_dma_alloc; |
1222 | } |
1223 | |
1224 | if (raddr == RIO_MAP_ANY_ADDR) |
1225 | raddr = map->phys_addr; |
1226 | ret = rio_map_inb_region(mport, local: map->phys_addr, rbase: raddr, size: (u32)size, rflags: 0); |
1227 | if (ret < 0) |
1228 | goto err_map_inb; |
1229 | |
1230 | map->dir = MAP_INBOUND; |
1231 | map->rio_addr = raddr; |
1232 | map->size = size; |
1233 | map->filp = filp; |
1234 | map->md = md; |
1235 | kref_init(kref: &map->ref); |
1236 | mutex_lock(&md->buf_mutex); |
1237 | list_add_tail(new: &map->node, head: &md->mappings); |
1238 | mutex_unlock(lock: &md->buf_mutex); |
1239 | *mapping = map; |
1240 | return 0; |
1241 | |
1242 | err_map_inb: |
1243 | dma_free_coherent(dev: mport->dev.parent, size, |
1244 | cpu_addr: map->virt_addr, dma_handle: map->phys_addr); |
1245 | err_dma_alloc: |
1246 | kfree(objp: map); |
1247 | return ret; |
1248 | } |
1249 | |
1250 | static int |
1251 | rio_mport_get_inbound_mapping(struct mport_dev *md, struct file *filp, |
1252 | u64 raddr, u64 size, |
1253 | struct rio_mport_mapping **mapping) |
1254 | { |
1255 | struct rio_mport_mapping *map; |
1256 | int err = -ENOMEM; |
1257 | |
1258 | if (raddr == RIO_MAP_ANY_ADDR) |
1259 | goto get_new; |
1260 | |
1261 | mutex_lock(&md->buf_mutex); |
1262 | list_for_each_entry(map, &md->mappings, node) { |
1263 | if (map->dir != MAP_INBOUND) |
1264 | continue; |
1265 | if (raddr == map->rio_addr && size == map->size) { |
1266 | /* allow exact match only */ |
1267 | *mapping = map; |
1268 | err = 0; |
1269 | break; |
1270 | } else if (raddr < (map->rio_addr + map->size - 1) && |
1271 | (raddr + size) > map->rio_addr) { |
1272 | err = -EBUSY; |
1273 | break; |
1274 | } |
1275 | } |
1276 | mutex_unlock(lock: &md->buf_mutex); |
1277 | |
1278 | if (err != -ENOMEM) |
1279 | return err; |
1280 | get_new: |
1281 | /* not found, create new */ |
1282 | return rio_mport_create_inbound_mapping(md, filp, raddr, size, mapping); |
1283 | } |
1284 | |
1285 | static int rio_mport_map_inbound(struct file *filp, void __user *arg) |
1286 | { |
1287 | struct mport_cdev_priv *priv = filp->private_data; |
1288 | struct mport_dev *md = priv->md; |
1289 | struct rio_mmap map; |
1290 | struct rio_mport_mapping *mapping = NULL; |
1291 | int ret; |
1292 | |
1293 | if (!md->mport->ops->map_inb) |
1294 | return -EPROTONOSUPPORT; |
1295 | if (unlikely(copy_from_user(&map, arg, sizeof(map)))) |
1296 | return -EFAULT; |
1297 | |
1298 | rmcd_debug(IBW, "%s filp=%p" , dev_name(&priv->md->dev), filp); |
1299 | |
1300 | ret = rio_mport_get_inbound_mapping(md, filp, raddr: map.rio_addr, |
1301 | size: map.length, mapping: &mapping); |
1302 | if (ret) |
1303 | return ret; |
1304 | |
1305 | map.handle = mapping->phys_addr; |
1306 | map.rio_addr = mapping->rio_addr; |
1307 | |
1308 | if (unlikely(copy_to_user(arg, &map, sizeof(map)))) { |
1309 | /* Delete mapping if it was created by this request */ |
1310 | if (ret == 0 && mapping->filp == filp) { |
1311 | mutex_lock(&md->buf_mutex); |
1312 | kref_put(kref: &mapping->ref, release: mport_release_mapping); |
1313 | mutex_unlock(lock: &md->buf_mutex); |
1314 | } |
1315 | return -EFAULT; |
1316 | } |
1317 | |
1318 | return 0; |
1319 | } |
1320 | |
1321 | /* |
1322 | * rio_mport_inbound_free() - unmap from RapidIO address space and free |
1323 | * previously allocated inbound DMA coherent buffer |
1324 | * @priv: driver private data |
1325 | * @arg: buffer handle returned by allocation routine |
1326 | */ |
1327 | static int rio_mport_inbound_free(struct file *filp, void __user *arg) |
1328 | { |
1329 | struct mport_cdev_priv *priv = filp->private_data; |
1330 | struct mport_dev *md = priv->md; |
1331 | u64 handle; |
1332 | struct rio_mport_mapping *map, *_map; |
1333 | |
1334 | rmcd_debug(IBW, "%s filp=%p" , dev_name(&priv->md->dev), filp); |
1335 | |
1336 | if (!md->mport->ops->unmap_inb) |
1337 | return -EPROTONOSUPPORT; |
1338 | |
1339 | if (copy_from_user(to: &handle, from: arg, n: sizeof(handle))) |
1340 | return -EFAULT; |
1341 | |
1342 | mutex_lock(&md->buf_mutex); |
1343 | list_for_each_entry_safe(map, _map, &md->mappings, node) { |
1344 | if (map->dir == MAP_INBOUND && map->phys_addr == handle) { |
1345 | if (map->filp == filp) { |
1346 | map->filp = NULL; |
1347 | kref_put(kref: &map->ref, release: mport_release_mapping); |
1348 | } |
1349 | break; |
1350 | } |
1351 | } |
1352 | mutex_unlock(lock: &md->buf_mutex); |
1353 | |
1354 | return 0; |
1355 | } |
1356 | |
1357 | /* |
1358 | * maint_port_idx_get() - Get the port index of the mport instance |
1359 | * @priv: driver private data |
1360 | * @arg: port index |
1361 | */ |
1362 | static int maint_port_idx_get(struct mport_cdev_priv *priv, void __user *arg) |
1363 | { |
1364 | struct mport_dev *md = priv->md; |
1365 | u32 port_idx = md->mport->index; |
1366 | |
1367 | rmcd_debug(MPORT, "port_index=%d" , port_idx); |
1368 | |
1369 | if (copy_to_user(to: arg, from: &port_idx, n: sizeof(port_idx))) |
1370 | return -EFAULT; |
1371 | |
1372 | return 0; |
1373 | } |
1374 | |
1375 | static int rio_mport_add_event(struct mport_cdev_priv *priv, |
1376 | struct rio_event *event) |
1377 | { |
1378 | int overflow; |
1379 | |
1380 | if (!(priv->event_mask & event->header)) |
1381 | return -EACCES; |
1382 | |
1383 | spin_lock(lock: &priv->fifo_lock); |
1384 | overflow = kfifo_avail(&priv->event_fifo) < sizeof(*event) |
1385 | || kfifo_in(&priv->event_fifo, (unsigned char *)event, |
1386 | sizeof(*event)) != sizeof(*event); |
1387 | spin_unlock(lock: &priv->fifo_lock); |
1388 | |
1389 | wake_up_interruptible(&priv->event_rx_wait); |
1390 | |
1391 | if (overflow) { |
1392 | dev_warn(&priv->md->dev, DRV_NAME ": event fifo overflow\n" ); |
1393 | return -EBUSY; |
1394 | } |
1395 | |
1396 | return 0; |
1397 | } |
1398 | |
1399 | static void rio_mport_doorbell_handler(struct rio_mport *mport, void *dev_id, |
1400 | u16 src, u16 dst, u16 info) |
1401 | { |
1402 | struct mport_dev *data = dev_id; |
1403 | struct mport_cdev_priv *priv; |
1404 | struct rio_mport_db_filter *db_filter; |
1405 | struct rio_event event; |
1406 | int handled; |
1407 | |
1408 | event.header = RIO_DOORBELL; |
1409 | event.u.doorbell.rioid = src; |
1410 | event.u.doorbell.payload = info; |
1411 | |
1412 | handled = 0; |
1413 | spin_lock(lock: &data->db_lock); |
1414 | list_for_each_entry(db_filter, &data->doorbells, data_node) { |
1415 | if (((db_filter->filter.rioid == RIO_INVALID_DESTID || |
1416 | db_filter->filter.rioid == src)) && |
1417 | info >= db_filter->filter.low && |
1418 | info <= db_filter->filter.high) { |
1419 | priv = db_filter->priv; |
1420 | rio_mport_add_event(priv, event: &event); |
1421 | handled = 1; |
1422 | } |
1423 | } |
1424 | spin_unlock(lock: &data->db_lock); |
1425 | |
1426 | if (!handled) |
1427 | dev_warn(&data->dev, |
1428 | "%s: spurious DB received from 0x%x, info=0x%04x\n" , |
1429 | __func__, src, info); |
1430 | } |
1431 | |
1432 | static int rio_mport_add_db_filter(struct mport_cdev_priv *priv, |
1433 | void __user *arg) |
1434 | { |
1435 | struct mport_dev *md = priv->md; |
1436 | struct rio_mport_db_filter *db_filter; |
1437 | struct rio_doorbell_filter filter; |
1438 | unsigned long flags; |
1439 | int ret; |
1440 | |
1441 | if (copy_from_user(to: &filter, from: arg, n: sizeof(filter))) |
1442 | return -EFAULT; |
1443 | |
1444 | if (filter.low > filter.high) |
1445 | return -EINVAL; |
1446 | |
1447 | ret = rio_request_inb_dbell(md->mport, md, filter.low, filter.high, |
1448 | rio_mport_doorbell_handler); |
1449 | if (ret) { |
1450 | rmcd_error("%s failed to register IBDB, err=%d" , |
1451 | dev_name(&md->dev), ret); |
1452 | return ret; |
1453 | } |
1454 | |
1455 | db_filter = kzalloc(size: sizeof(*db_filter), GFP_KERNEL); |
1456 | if (db_filter == NULL) { |
1457 | rio_release_inb_dbell(md->mport, filter.low, filter.high); |
1458 | return -ENOMEM; |
1459 | } |
1460 | |
1461 | db_filter->filter = filter; |
1462 | db_filter->priv = priv; |
1463 | spin_lock_irqsave(&md->db_lock, flags); |
1464 | list_add_tail(new: &db_filter->priv_node, head: &priv->db_filters); |
1465 | list_add_tail(new: &db_filter->data_node, head: &md->doorbells); |
1466 | spin_unlock_irqrestore(lock: &md->db_lock, flags); |
1467 | |
1468 | return 0; |
1469 | } |
1470 | |
1471 | static void rio_mport_delete_db_filter(struct rio_mport_db_filter *db_filter) |
1472 | { |
1473 | list_del(entry: &db_filter->data_node); |
1474 | list_del(entry: &db_filter->priv_node); |
1475 | kfree(objp: db_filter); |
1476 | } |
1477 | |
1478 | static int rio_mport_remove_db_filter(struct mport_cdev_priv *priv, |
1479 | void __user *arg) |
1480 | { |
1481 | struct rio_mport_db_filter *db_filter; |
1482 | struct rio_doorbell_filter filter; |
1483 | unsigned long flags; |
1484 | int ret = -EINVAL; |
1485 | |
1486 | if (copy_from_user(to: &filter, from: arg, n: sizeof(filter))) |
1487 | return -EFAULT; |
1488 | |
1489 | if (filter.low > filter.high) |
1490 | return -EINVAL; |
1491 | |
1492 | spin_lock_irqsave(&priv->md->db_lock, flags); |
1493 | list_for_each_entry(db_filter, &priv->db_filters, priv_node) { |
1494 | if (db_filter->filter.rioid == filter.rioid && |
1495 | db_filter->filter.low == filter.low && |
1496 | db_filter->filter.high == filter.high) { |
1497 | rio_mport_delete_db_filter(db_filter); |
1498 | ret = 0; |
1499 | break; |
1500 | } |
1501 | } |
1502 | spin_unlock_irqrestore(lock: &priv->md->db_lock, flags); |
1503 | |
1504 | if (!ret) |
1505 | rio_release_inb_dbell(priv->md->mport, filter.low, filter.high); |
1506 | |
1507 | return ret; |
1508 | } |
1509 | |
1510 | static int rio_mport_match_pw(union rio_pw_msg *msg, |
1511 | struct rio_pw_filter *filter) |
1512 | { |
1513 | if ((msg->em.comptag & filter->mask) < filter->low || |
1514 | (msg->em.comptag & filter->mask) > filter->high) |
1515 | return 0; |
1516 | return 1; |
1517 | } |
1518 | |
1519 | static int rio_mport_pw_handler(struct rio_mport *mport, void *context, |
1520 | union rio_pw_msg *msg, int step) |
1521 | { |
1522 | struct mport_dev *md = context; |
1523 | struct mport_cdev_priv *priv; |
1524 | struct rio_mport_pw_filter *pw_filter; |
1525 | struct rio_event event; |
1526 | int handled; |
1527 | |
1528 | event.header = RIO_PORTWRITE; |
1529 | memcpy(event.u.portwrite.payload, msg->raw, RIO_PW_MSG_SIZE); |
1530 | |
1531 | handled = 0; |
1532 | spin_lock(lock: &md->pw_lock); |
1533 | list_for_each_entry(pw_filter, &md->portwrites, md_node) { |
1534 | if (rio_mport_match_pw(msg, filter: &pw_filter->filter)) { |
1535 | priv = pw_filter->priv; |
1536 | rio_mport_add_event(priv, event: &event); |
1537 | handled = 1; |
1538 | } |
1539 | } |
1540 | spin_unlock(lock: &md->pw_lock); |
1541 | |
1542 | if (!handled) { |
1543 | printk_ratelimited(KERN_WARNING DRV_NAME |
1544 | ": mport%d received spurious PW from 0x%08x\n" , |
1545 | mport->id, msg->em.comptag); |
1546 | } |
1547 | |
1548 | return 0; |
1549 | } |
1550 | |
1551 | static int rio_mport_add_pw_filter(struct mport_cdev_priv *priv, |
1552 | void __user *arg) |
1553 | { |
1554 | struct mport_dev *md = priv->md; |
1555 | struct rio_mport_pw_filter *pw_filter; |
1556 | struct rio_pw_filter filter; |
1557 | unsigned long flags; |
1558 | int hadd = 0; |
1559 | |
1560 | if (copy_from_user(to: &filter, from: arg, n: sizeof(filter))) |
1561 | return -EFAULT; |
1562 | |
1563 | pw_filter = kzalloc(size: sizeof(*pw_filter), GFP_KERNEL); |
1564 | if (pw_filter == NULL) |
1565 | return -ENOMEM; |
1566 | |
1567 | pw_filter->filter = filter; |
1568 | pw_filter->priv = priv; |
1569 | spin_lock_irqsave(&md->pw_lock, flags); |
1570 | if (list_empty(head: &md->portwrites)) |
1571 | hadd = 1; |
1572 | list_add_tail(new: &pw_filter->priv_node, head: &priv->pw_filters); |
1573 | list_add_tail(new: &pw_filter->md_node, head: &md->portwrites); |
1574 | spin_unlock_irqrestore(lock: &md->pw_lock, flags); |
1575 | |
1576 | if (hadd) { |
1577 | int ret; |
1578 | |
1579 | ret = rio_add_mport_pw_handler(mport: md->mport, dev_id: md, |
1580 | pwcback: rio_mport_pw_handler); |
1581 | if (ret) { |
1582 | dev_err(&md->dev, |
1583 | "%s: failed to add IB_PW handler, err=%d\n" , |
1584 | __func__, ret); |
1585 | return ret; |
1586 | } |
1587 | rio_pw_enable(mport: md->mport, enable: 1); |
1588 | } |
1589 | |
1590 | return 0; |
1591 | } |
1592 | |
1593 | static void rio_mport_delete_pw_filter(struct rio_mport_pw_filter *pw_filter) |
1594 | { |
1595 | list_del(entry: &pw_filter->md_node); |
1596 | list_del(entry: &pw_filter->priv_node); |
1597 | kfree(objp: pw_filter); |
1598 | } |
1599 | |
1600 | static int rio_mport_match_pw_filter(struct rio_pw_filter *a, |
1601 | struct rio_pw_filter *b) |
1602 | { |
1603 | if ((a->mask == b->mask) && (a->low == b->low) && (a->high == b->high)) |
1604 | return 1; |
1605 | return 0; |
1606 | } |
1607 | |
1608 | static int rio_mport_remove_pw_filter(struct mport_cdev_priv *priv, |
1609 | void __user *arg) |
1610 | { |
1611 | struct mport_dev *md = priv->md; |
1612 | struct rio_mport_pw_filter *pw_filter; |
1613 | struct rio_pw_filter filter; |
1614 | unsigned long flags; |
1615 | int ret = -EINVAL; |
1616 | int hdel = 0; |
1617 | |
1618 | if (copy_from_user(to: &filter, from: arg, n: sizeof(filter))) |
1619 | return -EFAULT; |
1620 | |
1621 | spin_lock_irqsave(&md->pw_lock, flags); |
1622 | list_for_each_entry(pw_filter, &priv->pw_filters, priv_node) { |
1623 | if (rio_mport_match_pw_filter(a: &pw_filter->filter, b: &filter)) { |
1624 | rio_mport_delete_pw_filter(pw_filter); |
1625 | ret = 0; |
1626 | break; |
1627 | } |
1628 | } |
1629 | |
1630 | if (list_empty(head: &md->portwrites)) |
1631 | hdel = 1; |
1632 | spin_unlock_irqrestore(lock: &md->pw_lock, flags); |
1633 | |
1634 | if (hdel) { |
1635 | rio_del_mport_pw_handler(mport: md->mport, dev_id: priv->md, |
1636 | pwcback: rio_mport_pw_handler); |
1637 | rio_pw_enable(mport: md->mport, enable: 0); |
1638 | } |
1639 | |
1640 | return ret; |
1641 | } |
1642 | |
1643 | /* |
1644 | * rio_release_dev - release routine for kernel RIO device object |
1645 | * @dev: kernel device object associated with a RIO device structure |
1646 | * |
1647 | * Frees a RIO device struct associated a RIO device struct. |
1648 | * The RIO device struct is freed. |
1649 | */ |
1650 | static void rio_release_dev(struct device *dev) |
1651 | { |
1652 | struct rio_dev *rdev; |
1653 | |
1654 | rdev = to_rio_dev(dev); |
1655 | pr_info(DRV_PREFIX "%s: %s\n" , __func__, rio_name(rdev)); |
1656 | kfree(objp: rdev); |
1657 | } |
1658 | |
1659 | |
1660 | static void rio_release_net(struct device *dev) |
1661 | { |
1662 | struct rio_net *net; |
1663 | |
1664 | net = to_rio_net(dev); |
1665 | rmcd_debug(RDEV, "net_%d" , net->id); |
1666 | kfree(objp: net); |
1667 | } |
1668 | |
1669 | |
1670 | /* |
1671 | * rio_mport_add_riodev - creates a kernel RIO device object |
1672 | * |
1673 | * Allocates a RIO device data structure and initializes required fields based |
1674 | * on device's configuration space contents. |
1675 | * If the device has switch capabilities, then a switch specific portion is |
1676 | * allocated and configured. |
1677 | */ |
1678 | static int rio_mport_add_riodev(struct mport_cdev_priv *priv, |
1679 | void __user *arg) |
1680 | { |
1681 | struct mport_dev *md = priv->md; |
1682 | struct rio_rdev_info dev_info; |
1683 | struct rio_dev *rdev; |
1684 | struct rio_switch *rswitch = NULL; |
1685 | struct rio_mport *mport; |
1686 | struct device *dev; |
1687 | size_t size; |
1688 | u32 rval; |
1689 | u32 swpinfo = 0; |
1690 | u16 destid; |
1691 | u8 hopcount; |
1692 | int err; |
1693 | |
1694 | if (copy_from_user(to: &dev_info, from: arg, n: sizeof(dev_info))) |
1695 | return -EFAULT; |
1696 | dev_info.name[sizeof(dev_info.name) - 1] = '\0'; |
1697 | |
1698 | rmcd_debug(RDEV, "name:%s ct:0x%x did:0x%x hc:0x%x" , dev_info.name, |
1699 | dev_info.comptag, dev_info.destid, dev_info.hopcount); |
1700 | |
1701 | dev = bus_find_device_by_name(bus: &rio_bus_type, NULL, name: dev_info.name); |
1702 | if (dev) { |
1703 | rmcd_debug(RDEV, "device %s already exists" , dev_info.name); |
1704 | put_device(dev); |
1705 | return -EEXIST; |
1706 | } |
1707 | |
1708 | size = sizeof(*rdev); |
1709 | mport = md->mport; |
1710 | destid = dev_info.destid; |
1711 | hopcount = dev_info.hopcount; |
1712 | |
1713 | if (rio_mport_read_config_32(port: mport, destid, hopcount, |
1714 | RIO_PEF_CAR, data: &rval)) |
1715 | return -EIO; |
1716 | |
1717 | if (rval & RIO_PEF_SWITCH) { |
1718 | rio_mport_read_config_32(port: mport, destid, hopcount, |
1719 | RIO_SWP_INFO_CAR, data: &swpinfo); |
1720 | size += struct_size(rswitch, nextdev, RIO_GET_TOTAL_PORTS(swpinfo)); |
1721 | } |
1722 | |
1723 | rdev = kzalloc(size, GFP_KERNEL); |
1724 | if (rdev == NULL) |
1725 | return -ENOMEM; |
1726 | |
1727 | if (mport->net == NULL) { |
1728 | struct rio_net *net; |
1729 | |
1730 | net = rio_alloc_net(mport); |
1731 | if (!net) { |
1732 | err = -ENOMEM; |
1733 | rmcd_debug(RDEV, "failed to allocate net object" ); |
1734 | goto cleanup; |
1735 | } |
1736 | |
1737 | net->id = mport->id; |
1738 | net->hport = mport; |
1739 | dev_set_name(dev: &net->dev, name: "rnet_%d" , net->id); |
1740 | net->dev.parent = &mport->dev; |
1741 | net->dev.release = rio_release_net; |
1742 | err = rio_add_net(net); |
1743 | if (err) { |
1744 | rmcd_debug(RDEV, "failed to register net, err=%d" , err); |
1745 | kfree(objp: net); |
1746 | goto cleanup; |
1747 | } |
1748 | } |
1749 | |
1750 | rdev->net = mport->net; |
1751 | rdev->pef = rval; |
1752 | rdev->swpinfo = swpinfo; |
1753 | rio_mport_read_config_32(port: mport, destid, hopcount, |
1754 | RIO_DEV_ID_CAR, data: &rval); |
1755 | rdev->did = rval >> 16; |
1756 | rdev->vid = rval & 0xffff; |
1757 | rio_mport_read_config_32(port: mport, destid, hopcount, RIO_DEV_INFO_CAR, |
1758 | data: &rdev->device_rev); |
1759 | rio_mport_read_config_32(port: mport, destid, hopcount, RIO_ASM_ID_CAR, |
1760 | data: &rval); |
1761 | rdev->asm_did = rval >> 16; |
1762 | rdev->asm_vid = rval & 0xffff; |
1763 | rio_mport_read_config_32(port: mport, destid, hopcount, RIO_ASM_INFO_CAR, |
1764 | data: &rval); |
1765 | rdev->asm_rev = rval >> 16; |
1766 | |
1767 | if (rdev->pef & RIO_PEF_EXT_FEATURES) { |
1768 | rdev->efptr = rval & 0xffff; |
1769 | rdev->phys_efptr = rio_mport_get_physefb(port: mport, local: 0, destid, |
1770 | hopcount, rmap: &rdev->phys_rmap); |
1771 | |
1772 | rdev->em_efptr = rio_mport_get_feature(mport, local: 0, destid, |
1773 | hopcount, RIO_EFB_ERR_MGMNT); |
1774 | } |
1775 | |
1776 | rio_mport_read_config_32(port: mport, destid, hopcount, RIO_SRC_OPS_CAR, |
1777 | data: &rdev->src_ops); |
1778 | rio_mport_read_config_32(port: mport, destid, hopcount, RIO_DST_OPS_CAR, |
1779 | data: &rdev->dst_ops); |
1780 | |
1781 | rdev->comp_tag = dev_info.comptag; |
1782 | rdev->destid = destid; |
1783 | /* hopcount is stored as specified by a caller, regardles of EP or SW */ |
1784 | rdev->hopcount = hopcount; |
1785 | |
1786 | if (rdev->pef & RIO_PEF_SWITCH) { |
1787 | rswitch = rdev->rswitch; |
1788 | rswitch->route_table = NULL; |
1789 | } |
1790 | |
1791 | if (strlen(dev_info.name)) |
1792 | dev_set_name(dev: &rdev->dev, name: "%s" , dev_info.name); |
1793 | else if (rdev->pef & RIO_PEF_SWITCH) |
1794 | dev_set_name(dev: &rdev->dev, name: "%02x:s:%04x" , mport->id, |
1795 | rdev->comp_tag & RIO_CTAG_UDEVID); |
1796 | else |
1797 | dev_set_name(dev: &rdev->dev, name: "%02x:e:%04x" , mport->id, |
1798 | rdev->comp_tag & RIO_CTAG_UDEVID); |
1799 | |
1800 | INIT_LIST_HEAD(list: &rdev->net_list); |
1801 | rdev->dev.parent = &mport->net->dev; |
1802 | rio_attach_device(rdev); |
1803 | rdev->dev.release = rio_release_dev; |
1804 | |
1805 | if (rdev->dst_ops & RIO_DST_OPS_DOORBELL) |
1806 | rio_init_dbell_res(res: &rdev->riores[RIO_DOORBELL_RESOURCE], |
1807 | start: 0, end: 0xffff); |
1808 | err = rio_add_device(rdev); |
1809 | if (err) { |
1810 | put_device(dev: &rdev->dev); |
1811 | return err; |
1812 | } |
1813 | |
1814 | rio_dev_get(rdev); |
1815 | |
1816 | return 0; |
1817 | cleanup: |
1818 | kfree(objp: rdev); |
1819 | return err; |
1820 | } |
1821 | |
1822 | static int rio_mport_del_riodev(struct mport_cdev_priv *priv, void __user *arg) |
1823 | { |
1824 | struct rio_rdev_info dev_info; |
1825 | struct rio_dev *rdev = NULL; |
1826 | struct device *dev; |
1827 | struct rio_mport *mport; |
1828 | struct rio_net *net; |
1829 | |
1830 | if (copy_from_user(to: &dev_info, from: arg, n: sizeof(dev_info))) |
1831 | return -EFAULT; |
1832 | dev_info.name[sizeof(dev_info.name) - 1] = '\0'; |
1833 | |
1834 | mport = priv->md->mport; |
1835 | |
1836 | /* If device name is specified, removal by name has priority */ |
1837 | if (strlen(dev_info.name)) { |
1838 | dev = bus_find_device_by_name(bus: &rio_bus_type, NULL, |
1839 | name: dev_info.name); |
1840 | if (dev) |
1841 | rdev = to_rio_dev(dev); |
1842 | } else { |
1843 | do { |
1844 | rdev = rio_get_comptag(comp_tag: dev_info.comptag, from: rdev); |
1845 | if (rdev && rdev->dev.parent == &mport->net->dev && |
1846 | rdev->destid == dev_info.destid && |
1847 | rdev->hopcount == dev_info.hopcount) |
1848 | break; |
1849 | } while (rdev); |
1850 | } |
1851 | |
1852 | if (!rdev) { |
1853 | rmcd_debug(RDEV, |
1854 | "device name:%s ct:0x%x did:0x%x hc:0x%x not found" , |
1855 | dev_info.name, dev_info.comptag, dev_info.destid, |
1856 | dev_info.hopcount); |
1857 | return -ENODEV; |
1858 | } |
1859 | |
1860 | net = rdev->net; |
1861 | rio_dev_put(rdev); |
1862 | rio_del_device(rdev, state: RIO_DEVICE_SHUTDOWN); |
1863 | |
1864 | if (list_empty(head: &net->devices)) { |
1865 | rio_free_net(net); |
1866 | mport->net = NULL; |
1867 | } |
1868 | |
1869 | return 0; |
1870 | } |
1871 | |
1872 | /* |
1873 | * Mport cdev management |
1874 | */ |
1875 | |
1876 | /* |
1877 | * mport_cdev_open() - Open character device (mport) |
1878 | */ |
1879 | static int mport_cdev_open(struct inode *inode, struct file *filp) |
1880 | { |
1881 | int ret; |
1882 | int minor = iminor(inode); |
1883 | struct mport_dev *chdev; |
1884 | struct mport_cdev_priv *priv; |
1885 | |
1886 | /* Test for valid device */ |
1887 | if (minor >= RIO_MAX_MPORTS) { |
1888 | rmcd_error("Invalid minor device number" ); |
1889 | return -EINVAL; |
1890 | } |
1891 | |
1892 | chdev = container_of(inode->i_cdev, struct mport_dev, cdev); |
1893 | |
1894 | rmcd_debug(INIT, "%s filp=%p" , dev_name(&chdev->dev), filp); |
1895 | |
1896 | if (atomic_read(v: &chdev->active) == 0) |
1897 | return -ENODEV; |
1898 | |
1899 | get_device(dev: &chdev->dev); |
1900 | |
1901 | priv = kzalloc(size: sizeof(*priv), GFP_KERNEL); |
1902 | if (!priv) { |
1903 | put_device(dev: &chdev->dev); |
1904 | return -ENOMEM; |
1905 | } |
1906 | |
1907 | priv->md = chdev; |
1908 | |
1909 | INIT_LIST_HEAD(list: &priv->db_filters); |
1910 | INIT_LIST_HEAD(list: &priv->pw_filters); |
1911 | spin_lock_init(&priv->fifo_lock); |
1912 | init_waitqueue_head(&priv->event_rx_wait); |
1913 | ret = kfifo_alloc(&priv->event_fifo, |
1914 | sizeof(struct rio_event) * MPORT_EVENT_DEPTH, |
1915 | GFP_KERNEL); |
1916 | if (ret < 0) { |
1917 | put_device(dev: &chdev->dev); |
1918 | dev_err(&chdev->dev, DRV_NAME ": kfifo_alloc failed\n" ); |
1919 | ret = -ENOMEM; |
1920 | goto err_fifo; |
1921 | } |
1922 | |
1923 | #ifdef CONFIG_RAPIDIO_DMA_ENGINE |
1924 | INIT_LIST_HEAD(list: &priv->async_list); |
1925 | spin_lock_init(&priv->req_lock); |
1926 | mutex_init(&priv->dma_lock); |
1927 | #endif |
1928 | mutex_lock(&chdev->file_mutex); |
1929 | list_add_tail(new: &priv->list, head: &chdev->file_list); |
1930 | mutex_unlock(lock: &chdev->file_mutex); |
1931 | |
1932 | filp->private_data = priv; |
1933 | goto out; |
1934 | err_fifo: |
1935 | kfree(objp: priv); |
1936 | out: |
1937 | return ret; |
1938 | } |
1939 | |
1940 | static int mport_cdev_fasync(int fd, struct file *filp, int mode) |
1941 | { |
1942 | struct mport_cdev_priv *priv = filp->private_data; |
1943 | |
1944 | return fasync_helper(fd, filp, mode, &priv->async_queue); |
1945 | } |
1946 | |
1947 | #ifdef CONFIG_RAPIDIO_DMA_ENGINE |
1948 | static void mport_cdev_release_dma(struct file *filp) |
1949 | { |
1950 | struct mport_cdev_priv *priv = filp->private_data; |
1951 | struct mport_dev *md; |
1952 | struct mport_dma_req *req, *req_next; |
1953 | unsigned long tmo = msecs_to_jiffies(m: dma_timeout); |
1954 | long wret; |
1955 | LIST_HEAD(list); |
1956 | |
1957 | rmcd_debug(EXIT, "from filp=%p %s(%d)" , |
1958 | filp, current->comm, task_pid_nr(current)); |
1959 | |
1960 | if (!priv->dmach) { |
1961 | rmcd_debug(EXIT, "No DMA channel for filp=%p" , filp); |
1962 | return; |
1963 | } |
1964 | |
1965 | md = priv->md; |
1966 | |
1967 | spin_lock(lock: &priv->req_lock); |
1968 | if (!list_empty(head: &priv->async_list)) { |
1969 | rmcd_debug(EXIT, "async list not empty filp=%p %s(%d)" , |
1970 | filp, current->comm, task_pid_nr(current)); |
1971 | list_splice_init(list: &priv->async_list, head: &list); |
1972 | } |
1973 | spin_unlock(lock: &priv->req_lock); |
1974 | |
1975 | if (!list_empty(head: &list)) { |
1976 | rmcd_debug(EXIT, "temp list not empty" ); |
1977 | list_for_each_entry_safe(req, req_next, &list, node) { |
1978 | rmcd_debug(EXIT, "free req->filp=%p cookie=%d compl=%s" , |
1979 | req->filp, req->cookie, |
1980 | completion_done(&req->req_comp)?"yes" :"no" ); |
1981 | list_del(entry: &req->node); |
1982 | kref_put(kref: &req->refcount, release: dma_req_free); |
1983 | } |
1984 | } |
1985 | |
1986 | put_dma_channel(priv); |
1987 | wret = wait_for_completion_interruptible_timeout(x: &priv->comp, timeout: tmo); |
1988 | |
1989 | if (wret <= 0) { |
1990 | rmcd_error("%s(%d) failed waiting for DMA release err=%ld" , |
1991 | current->comm, task_pid_nr(current), wret); |
1992 | } |
1993 | |
1994 | if (priv->dmach != priv->md->dma_chan) { |
1995 | rmcd_debug(EXIT, "Release DMA channel for filp=%p %s(%d)" , |
1996 | filp, current->comm, task_pid_nr(current)); |
1997 | rio_release_dma(dchan: priv->dmach); |
1998 | } else { |
1999 | rmcd_debug(EXIT, "Adjust default DMA channel refcount" ); |
2000 | kref_put(kref: &md->dma_ref, release: mport_release_def_dma); |
2001 | } |
2002 | |
2003 | priv->dmach = NULL; |
2004 | } |
2005 | #else |
2006 | #define mport_cdev_release_dma(priv) do {} while (0) |
2007 | #endif |
2008 | |
2009 | /* |
2010 | * mport_cdev_release() - Release character device |
2011 | */ |
2012 | static int mport_cdev_release(struct inode *inode, struct file *filp) |
2013 | { |
2014 | struct mport_cdev_priv *priv = filp->private_data; |
2015 | struct mport_dev *chdev; |
2016 | struct rio_mport_pw_filter *pw_filter, *pw_filter_next; |
2017 | struct rio_mport_db_filter *db_filter, *db_filter_next; |
2018 | struct rio_mport_mapping *map, *_map; |
2019 | unsigned long flags; |
2020 | |
2021 | rmcd_debug(EXIT, "%s filp=%p" , dev_name(&priv->md->dev), filp); |
2022 | |
2023 | chdev = priv->md; |
2024 | mport_cdev_release_dma(filp); |
2025 | |
2026 | priv->event_mask = 0; |
2027 | |
2028 | spin_lock_irqsave(&chdev->pw_lock, flags); |
2029 | if (!list_empty(head: &priv->pw_filters)) { |
2030 | list_for_each_entry_safe(pw_filter, pw_filter_next, |
2031 | &priv->pw_filters, priv_node) |
2032 | rio_mport_delete_pw_filter(pw_filter); |
2033 | } |
2034 | spin_unlock_irqrestore(lock: &chdev->pw_lock, flags); |
2035 | |
2036 | spin_lock_irqsave(&chdev->db_lock, flags); |
2037 | list_for_each_entry_safe(db_filter, db_filter_next, |
2038 | &priv->db_filters, priv_node) { |
2039 | rio_mport_delete_db_filter(db_filter); |
2040 | } |
2041 | spin_unlock_irqrestore(lock: &chdev->db_lock, flags); |
2042 | |
2043 | kfifo_free(&priv->event_fifo); |
2044 | |
2045 | mutex_lock(&chdev->buf_mutex); |
2046 | list_for_each_entry_safe(map, _map, &chdev->mappings, node) { |
2047 | if (map->filp == filp) { |
2048 | rmcd_debug(EXIT, "release mapping %p filp=%p" , |
2049 | map->virt_addr, filp); |
2050 | kref_put(kref: &map->ref, release: mport_release_mapping); |
2051 | } |
2052 | } |
2053 | mutex_unlock(lock: &chdev->buf_mutex); |
2054 | |
2055 | mport_cdev_fasync(fd: -1, filp, mode: 0); |
2056 | filp->private_data = NULL; |
2057 | mutex_lock(&chdev->file_mutex); |
2058 | list_del(entry: &priv->list); |
2059 | mutex_unlock(lock: &chdev->file_mutex); |
2060 | put_device(dev: &chdev->dev); |
2061 | kfree(objp: priv); |
2062 | return 0; |
2063 | } |
2064 | |
2065 | /* |
2066 | * mport_cdev_ioctl() - IOCTLs for character device |
2067 | */ |
2068 | static long mport_cdev_ioctl(struct file *filp, |
2069 | unsigned int cmd, unsigned long arg) |
2070 | { |
2071 | int err = -EINVAL; |
2072 | struct mport_cdev_priv *data = filp->private_data; |
2073 | struct mport_dev *md = data->md; |
2074 | |
2075 | if (atomic_read(v: &md->active) == 0) |
2076 | return -ENODEV; |
2077 | |
2078 | switch (cmd) { |
2079 | case RIO_MPORT_MAINT_READ_LOCAL: |
2080 | return rio_mport_maint_rd(priv: data, arg: (void __user *)arg, local: 1); |
2081 | case RIO_MPORT_MAINT_WRITE_LOCAL: |
2082 | return rio_mport_maint_wr(priv: data, arg: (void __user *)arg, local: 1); |
2083 | case RIO_MPORT_MAINT_READ_REMOTE: |
2084 | return rio_mport_maint_rd(priv: data, arg: (void __user *)arg, local: 0); |
2085 | case RIO_MPORT_MAINT_WRITE_REMOTE: |
2086 | return rio_mport_maint_wr(priv: data, arg: (void __user *)arg, local: 0); |
2087 | case RIO_MPORT_MAINT_HDID_SET: |
2088 | return maint_hdid_set(priv: data, arg: (void __user *)arg); |
2089 | case RIO_MPORT_MAINT_COMPTAG_SET: |
2090 | return maint_comptag_set(priv: data, arg: (void __user *)arg); |
2091 | case RIO_MPORT_MAINT_PORT_IDX_GET: |
2092 | return maint_port_idx_get(priv: data, arg: (void __user *)arg); |
2093 | case RIO_MPORT_GET_PROPERTIES: |
2094 | md->properties.hdid = md->mport->host_deviceid; |
2095 | if (copy_to_user(to: (void __user *)arg, from: &(md->properties), |
2096 | n: sizeof(md->properties))) |
2097 | return -EFAULT; |
2098 | return 0; |
2099 | case RIO_ENABLE_DOORBELL_RANGE: |
2100 | return rio_mport_add_db_filter(priv: data, arg: (void __user *)arg); |
2101 | case RIO_DISABLE_DOORBELL_RANGE: |
2102 | return rio_mport_remove_db_filter(priv: data, arg: (void __user *)arg); |
2103 | case RIO_ENABLE_PORTWRITE_RANGE: |
2104 | return rio_mport_add_pw_filter(priv: data, arg: (void __user *)arg); |
2105 | case RIO_DISABLE_PORTWRITE_RANGE: |
2106 | return rio_mport_remove_pw_filter(priv: data, arg: (void __user *)arg); |
2107 | case RIO_SET_EVENT_MASK: |
2108 | data->event_mask = (u32)arg; |
2109 | return 0; |
2110 | case RIO_GET_EVENT_MASK: |
2111 | if (copy_to_user(to: (void __user *)arg, from: &data->event_mask, |
2112 | n: sizeof(u32))) |
2113 | return -EFAULT; |
2114 | return 0; |
2115 | case RIO_MAP_OUTBOUND: |
2116 | return rio_mport_obw_map(filp, arg: (void __user *)arg); |
2117 | case RIO_MAP_INBOUND: |
2118 | return rio_mport_map_inbound(filp, arg: (void __user *)arg); |
2119 | case RIO_UNMAP_OUTBOUND: |
2120 | return rio_mport_obw_free(filp, arg: (void __user *)arg); |
2121 | case RIO_UNMAP_INBOUND: |
2122 | return rio_mport_inbound_free(filp, arg: (void __user *)arg); |
2123 | case RIO_ALLOC_DMA: |
2124 | return rio_mport_alloc_dma(filp, arg: (void __user *)arg); |
2125 | case RIO_FREE_DMA: |
2126 | return rio_mport_free_dma(filp, arg: (void __user *)arg); |
2127 | case RIO_WAIT_FOR_ASYNC: |
2128 | return rio_mport_wait_for_async_dma(filp, arg: (void __user *)arg); |
2129 | case RIO_TRANSFER: |
2130 | return rio_mport_transfer_ioctl(filp, arg: (void __user *)arg); |
2131 | case RIO_DEV_ADD: |
2132 | return rio_mport_add_riodev(priv: data, arg: (void __user *)arg); |
2133 | case RIO_DEV_DEL: |
2134 | return rio_mport_del_riodev(priv: data, arg: (void __user *)arg); |
2135 | default: |
2136 | break; |
2137 | } |
2138 | |
2139 | return err; |
2140 | } |
2141 | |
2142 | /* |
2143 | * mport_release_mapping - free mapping resources and info structure |
2144 | * @ref: a pointer to the kref within struct rio_mport_mapping |
2145 | * |
2146 | * NOTE: Shall be called while holding buf_mutex. |
2147 | */ |
2148 | static void mport_release_mapping(struct kref *ref) |
2149 | { |
2150 | struct rio_mport_mapping *map = |
2151 | container_of(ref, struct rio_mport_mapping, ref); |
2152 | struct rio_mport *mport = map->md->mport; |
2153 | |
2154 | rmcd_debug(MMAP, "type %d mapping @ %p (phys = %pad) for %s" , |
2155 | map->dir, map->virt_addr, |
2156 | &map->phys_addr, mport->name); |
2157 | |
2158 | list_del(entry: &map->node); |
2159 | |
2160 | switch (map->dir) { |
2161 | case MAP_INBOUND: |
2162 | rio_unmap_inb_region(mport, lstart: map->phys_addr); |
2163 | fallthrough; |
2164 | case MAP_DMA: |
2165 | dma_free_coherent(dev: mport->dev.parent, size: map->size, |
2166 | cpu_addr: map->virt_addr, dma_handle: map->phys_addr); |
2167 | break; |
2168 | case MAP_OUTBOUND: |
2169 | rio_unmap_outb_region(mport, destid: map->rioid, rstart: map->rio_addr); |
2170 | break; |
2171 | } |
2172 | kfree(objp: map); |
2173 | } |
2174 | |
2175 | static void mport_mm_open(struct vm_area_struct *vma) |
2176 | { |
2177 | struct rio_mport_mapping *map = vma->vm_private_data; |
2178 | |
2179 | rmcd_debug(MMAP, "%pad" , &map->phys_addr); |
2180 | kref_get(kref: &map->ref); |
2181 | } |
2182 | |
2183 | static void mport_mm_close(struct vm_area_struct *vma) |
2184 | { |
2185 | struct rio_mport_mapping *map = vma->vm_private_data; |
2186 | |
2187 | rmcd_debug(MMAP, "%pad" , &map->phys_addr); |
2188 | mutex_lock(&map->md->buf_mutex); |
2189 | kref_put(kref: &map->ref, release: mport_release_mapping); |
2190 | mutex_unlock(lock: &map->md->buf_mutex); |
2191 | } |
2192 | |
2193 | static const struct vm_operations_struct vm_ops = { |
2194 | .open = mport_mm_open, |
2195 | .close = mport_mm_close, |
2196 | }; |
2197 | |
2198 | static int mport_cdev_mmap(struct file *filp, struct vm_area_struct *vma) |
2199 | { |
2200 | struct mport_cdev_priv *priv = filp->private_data; |
2201 | struct mport_dev *md; |
2202 | size_t size = vma->vm_end - vma->vm_start; |
2203 | dma_addr_t baddr; |
2204 | unsigned long offset; |
2205 | int found = 0, ret; |
2206 | struct rio_mport_mapping *map; |
2207 | |
2208 | rmcd_debug(MMAP, "0x%x bytes at offset 0x%lx" , |
2209 | (unsigned int)size, vma->vm_pgoff); |
2210 | |
2211 | md = priv->md; |
2212 | baddr = ((dma_addr_t)vma->vm_pgoff << PAGE_SHIFT); |
2213 | |
2214 | mutex_lock(&md->buf_mutex); |
2215 | list_for_each_entry(map, &md->mappings, node) { |
2216 | if (baddr >= map->phys_addr && |
2217 | baddr < (map->phys_addr + map->size)) { |
2218 | found = 1; |
2219 | break; |
2220 | } |
2221 | } |
2222 | mutex_unlock(lock: &md->buf_mutex); |
2223 | |
2224 | if (!found) |
2225 | return -ENOMEM; |
2226 | |
2227 | offset = baddr - map->phys_addr; |
2228 | |
2229 | if (size + offset > map->size) |
2230 | return -EINVAL; |
2231 | |
2232 | vma->vm_pgoff = offset >> PAGE_SHIFT; |
2233 | rmcd_debug(MMAP, "MMAP adjusted offset = 0x%lx" , vma->vm_pgoff); |
2234 | |
2235 | if (map->dir == MAP_INBOUND || map->dir == MAP_DMA) |
2236 | ret = dma_mmap_coherent(md->mport->dev.parent, vma, |
2237 | map->virt_addr, map->phys_addr, map->size); |
2238 | else if (map->dir == MAP_OUTBOUND) { |
2239 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
2240 | ret = vm_iomap_memory(vma, start: map->phys_addr, len: map->size); |
2241 | } else { |
2242 | rmcd_error("Attempt to mmap unsupported mapping type" ); |
2243 | ret = -EIO; |
2244 | } |
2245 | |
2246 | if (!ret) { |
2247 | vma->vm_private_data = map; |
2248 | vma->vm_ops = &vm_ops; |
2249 | mport_mm_open(vma); |
2250 | } else { |
2251 | rmcd_error("MMAP exit with err=%d" , ret); |
2252 | } |
2253 | |
2254 | return ret; |
2255 | } |
2256 | |
2257 | static __poll_t mport_cdev_poll(struct file *filp, poll_table *wait) |
2258 | { |
2259 | struct mport_cdev_priv *priv = filp->private_data; |
2260 | |
2261 | poll_wait(filp, wait_address: &priv->event_rx_wait, p: wait); |
2262 | if (kfifo_len(&priv->event_fifo)) |
2263 | return EPOLLIN | EPOLLRDNORM; |
2264 | |
2265 | return 0; |
2266 | } |
2267 | |
2268 | static ssize_t mport_read(struct file *filp, char __user *buf, size_t count, |
2269 | loff_t *ppos) |
2270 | { |
2271 | struct mport_cdev_priv *priv = filp->private_data; |
2272 | int copied; |
2273 | ssize_t ret; |
2274 | |
2275 | if (!count) |
2276 | return 0; |
2277 | |
2278 | if (kfifo_is_empty(&priv->event_fifo) && |
2279 | (filp->f_flags & O_NONBLOCK)) |
2280 | return -EAGAIN; |
2281 | |
2282 | if (count % sizeof(struct rio_event)) |
2283 | return -EINVAL; |
2284 | |
2285 | ret = wait_event_interruptible(priv->event_rx_wait, |
2286 | kfifo_len(&priv->event_fifo) != 0); |
2287 | if (ret) |
2288 | return ret; |
2289 | |
2290 | while (ret < count) { |
2291 | if (kfifo_to_user(&priv->event_fifo, buf, |
2292 | sizeof(struct rio_event), &copied)) |
2293 | return -EFAULT; |
2294 | ret += copied; |
2295 | buf += copied; |
2296 | } |
2297 | |
2298 | return ret; |
2299 | } |
2300 | |
2301 | static ssize_t mport_write(struct file *filp, const char __user *buf, |
2302 | size_t count, loff_t *ppos) |
2303 | { |
2304 | struct mport_cdev_priv *priv = filp->private_data; |
2305 | struct rio_mport *mport = priv->md->mport; |
2306 | struct rio_event event; |
2307 | int len, ret; |
2308 | |
2309 | if (!count) |
2310 | return 0; |
2311 | |
2312 | if (count % sizeof(event)) |
2313 | return -EINVAL; |
2314 | |
2315 | len = 0; |
2316 | while ((count - len) >= (int)sizeof(event)) { |
2317 | if (copy_from_user(to: &event, from: buf, n: sizeof(event))) |
2318 | return -EFAULT; |
2319 | |
2320 | if (event.header != RIO_DOORBELL) |
2321 | return -EINVAL; |
2322 | |
2323 | ret = rio_mport_send_doorbell(mport, |
2324 | destid: event.u.doorbell.rioid, |
2325 | data: event.u.doorbell.payload); |
2326 | if (ret < 0) |
2327 | return ret; |
2328 | |
2329 | len += sizeof(event); |
2330 | buf += sizeof(event); |
2331 | } |
2332 | |
2333 | return len; |
2334 | } |
2335 | |
2336 | static const struct file_operations mport_fops = { |
2337 | .owner = THIS_MODULE, |
2338 | .open = mport_cdev_open, |
2339 | .release = mport_cdev_release, |
2340 | .poll = mport_cdev_poll, |
2341 | .read = mport_read, |
2342 | .write = mport_write, |
2343 | .mmap = mport_cdev_mmap, |
2344 | .fasync = mport_cdev_fasync, |
2345 | .unlocked_ioctl = mport_cdev_ioctl |
2346 | }; |
2347 | |
2348 | /* |
2349 | * Character device management |
2350 | */ |
2351 | |
2352 | static void mport_device_release(struct device *dev) |
2353 | { |
2354 | struct mport_dev *md; |
2355 | |
2356 | rmcd_debug(EXIT, "%s" , dev_name(dev)); |
2357 | md = container_of(dev, struct mport_dev, dev); |
2358 | kfree(objp: md); |
2359 | } |
2360 | |
2361 | /* |
2362 | * mport_cdev_add() - Create mport_dev from rio_mport |
2363 | * @mport: RapidIO master port |
2364 | */ |
2365 | static struct mport_dev *mport_cdev_add(struct rio_mport *mport) |
2366 | { |
2367 | int ret = 0; |
2368 | struct mport_dev *md; |
2369 | struct rio_mport_attr attr; |
2370 | |
2371 | md = kzalloc(size: sizeof(*md), GFP_KERNEL); |
2372 | if (!md) { |
2373 | rmcd_error("Unable allocate a device object" ); |
2374 | return NULL; |
2375 | } |
2376 | |
2377 | md->mport = mport; |
2378 | mutex_init(&md->buf_mutex); |
2379 | mutex_init(&md->file_mutex); |
2380 | INIT_LIST_HEAD(list: &md->file_list); |
2381 | |
2382 | device_initialize(dev: &md->dev); |
2383 | md->dev.devt = MKDEV(MAJOR(dev_number), mport->id); |
2384 | md->dev.class = &dev_class; |
2385 | md->dev.parent = &mport->dev; |
2386 | md->dev.release = mport_device_release; |
2387 | dev_set_name(dev: &md->dev, DEV_NAME "%d" , mport->id); |
2388 | atomic_set(v: &md->active, i: 1); |
2389 | |
2390 | cdev_init(&md->cdev, &mport_fops); |
2391 | md->cdev.owner = THIS_MODULE; |
2392 | |
2393 | INIT_LIST_HEAD(list: &md->doorbells); |
2394 | spin_lock_init(&md->db_lock); |
2395 | INIT_LIST_HEAD(list: &md->portwrites); |
2396 | spin_lock_init(&md->pw_lock); |
2397 | INIT_LIST_HEAD(list: &md->mappings); |
2398 | |
2399 | md->properties.id = mport->id; |
2400 | md->properties.sys_size = mport->sys_size; |
2401 | md->properties.hdid = mport->host_deviceid; |
2402 | md->properties.index = mport->index; |
2403 | |
2404 | /* The transfer_mode property will be returned through mport query |
2405 | * interface |
2406 | */ |
2407 | #ifdef CONFIG_FSL_RIO /* for now: only on Freescale's SoCs */ |
2408 | md->properties.transfer_mode |= RIO_TRANSFER_MODE_MAPPED; |
2409 | #else |
2410 | md->properties.transfer_mode |= RIO_TRANSFER_MODE_TRANSFER; |
2411 | #endif |
2412 | |
2413 | ret = cdev_device_add(cdev: &md->cdev, dev: &md->dev); |
2414 | if (ret) { |
2415 | rmcd_error("Failed to register mport %d (err=%d)" , |
2416 | mport->id, ret); |
2417 | goto err_cdev; |
2418 | } |
2419 | ret = rio_query_mport(port: mport, mport_attr: &attr); |
2420 | if (!ret) { |
2421 | md->properties.flags = attr.flags; |
2422 | md->properties.link_speed = attr.link_speed; |
2423 | md->properties.link_width = attr.link_width; |
2424 | md->properties.dma_max_sge = attr.dma_max_sge; |
2425 | md->properties.dma_max_size = attr.dma_max_size; |
2426 | md->properties.dma_align = attr.dma_align; |
2427 | md->properties.cap_sys_size = 0; |
2428 | md->properties.cap_transfer_mode = 0; |
2429 | md->properties.cap_addr_size = 0; |
2430 | } else |
2431 | pr_info(DRV_PREFIX "Failed to obtain info for %s cdev(%d:%d)\n" , |
2432 | mport->name, MAJOR(dev_number), mport->id); |
2433 | |
2434 | mutex_lock(&mport_devs_lock); |
2435 | list_add_tail(new: &md->node, head: &mport_devs); |
2436 | mutex_unlock(lock: &mport_devs_lock); |
2437 | |
2438 | pr_info(DRV_PREFIX "Added %s cdev(%d:%d)\n" , |
2439 | mport->name, MAJOR(dev_number), mport->id); |
2440 | |
2441 | return md; |
2442 | |
2443 | err_cdev: |
2444 | put_device(dev: &md->dev); |
2445 | return NULL; |
2446 | } |
2447 | |
2448 | /* |
2449 | * mport_cdev_terminate_dma() - Stop all active DMA data transfers and release |
2450 | * associated DMA channels. |
2451 | */ |
2452 | static void mport_cdev_terminate_dma(struct mport_dev *md) |
2453 | { |
2454 | #ifdef CONFIG_RAPIDIO_DMA_ENGINE |
2455 | struct mport_cdev_priv *client; |
2456 | |
2457 | rmcd_debug(DMA, "%s" , dev_name(&md->dev)); |
2458 | |
2459 | mutex_lock(&md->file_mutex); |
2460 | list_for_each_entry(client, &md->file_list, list) { |
2461 | if (client->dmach) { |
2462 | dmaengine_terminate_all(chan: client->dmach); |
2463 | rio_release_dma(dchan: client->dmach); |
2464 | } |
2465 | } |
2466 | mutex_unlock(lock: &md->file_mutex); |
2467 | |
2468 | if (md->dma_chan) { |
2469 | dmaengine_terminate_all(chan: md->dma_chan); |
2470 | rio_release_dma(dchan: md->dma_chan); |
2471 | md->dma_chan = NULL; |
2472 | } |
2473 | #endif |
2474 | } |
2475 | |
2476 | |
2477 | /* |
2478 | * mport_cdev_kill_fasync() - Send SIGIO signal to all processes with open |
2479 | * mport_cdev files. |
2480 | */ |
2481 | static int mport_cdev_kill_fasync(struct mport_dev *md) |
2482 | { |
2483 | unsigned int files = 0; |
2484 | struct mport_cdev_priv *client; |
2485 | |
2486 | mutex_lock(&md->file_mutex); |
2487 | list_for_each_entry(client, &md->file_list, list) { |
2488 | if (client->async_queue) |
2489 | kill_fasync(&client->async_queue, SIGIO, POLL_HUP); |
2490 | files++; |
2491 | } |
2492 | mutex_unlock(lock: &md->file_mutex); |
2493 | return files; |
2494 | } |
2495 | |
2496 | /* |
2497 | * mport_cdev_remove() - Remove mport character device |
2498 | * @dev: Mport device to remove |
2499 | */ |
2500 | static void mport_cdev_remove(struct mport_dev *md) |
2501 | { |
2502 | struct rio_mport_mapping *map, *_map; |
2503 | |
2504 | rmcd_debug(EXIT, "Remove %s cdev" , md->mport->name); |
2505 | atomic_set(v: &md->active, i: 0); |
2506 | mport_cdev_terminate_dma(md); |
2507 | rio_del_mport_pw_handler(mport: md->mport, dev_id: md, pwcback: rio_mport_pw_handler); |
2508 | cdev_device_del(cdev: &md->cdev, dev: &md->dev); |
2509 | mport_cdev_kill_fasync(md); |
2510 | |
2511 | /* TODO: do we need to give clients some time to close file |
2512 | * descriptors? Simple wait for XX, or kref? |
2513 | */ |
2514 | |
2515 | /* |
2516 | * Release DMA buffers allocated for the mport device. |
2517 | * Disable associated inbound Rapidio requests mapping if applicable. |
2518 | */ |
2519 | mutex_lock(&md->buf_mutex); |
2520 | list_for_each_entry_safe(map, _map, &md->mappings, node) { |
2521 | kref_put(kref: &map->ref, release: mport_release_mapping); |
2522 | } |
2523 | mutex_unlock(lock: &md->buf_mutex); |
2524 | |
2525 | if (!list_empty(head: &md->mappings)) |
2526 | rmcd_warn("WARNING: %s pending mappings on removal" , |
2527 | md->mport->name); |
2528 | |
2529 | rio_release_inb_dbell(md->mport, 0, 0x0fff); |
2530 | |
2531 | put_device(dev: &md->dev); |
2532 | } |
2533 | |
2534 | /* |
2535 | * RIO rio_mport_interface driver |
2536 | */ |
2537 | |
2538 | /* |
2539 | * mport_add_mport() - Add rio_mport from LDM device struct |
2540 | * @dev: Linux device model struct |
2541 | */ |
2542 | static int mport_add_mport(struct device *dev) |
2543 | { |
2544 | struct rio_mport *mport = NULL; |
2545 | struct mport_dev *chdev = NULL; |
2546 | |
2547 | mport = to_rio_mport(dev); |
2548 | if (!mport) |
2549 | return -ENODEV; |
2550 | |
2551 | chdev = mport_cdev_add(mport); |
2552 | if (!chdev) |
2553 | return -ENODEV; |
2554 | |
2555 | return 0; |
2556 | } |
2557 | |
2558 | /* |
2559 | * mport_remove_mport() - Remove rio_mport from global list |
2560 | * TODO remove device from global mport_dev list |
2561 | */ |
2562 | static void mport_remove_mport(struct device *dev) |
2563 | { |
2564 | struct rio_mport *mport = NULL; |
2565 | struct mport_dev *chdev; |
2566 | int found = 0; |
2567 | |
2568 | mport = to_rio_mport(dev); |
2569 | rmcd_debug(EXIT, "Remove %s" , mport->name); |
2570 | |
2571 | mutex_lock(&mport_devs_lock); |
2572 | list_for_each_entry(chdev, &mport_devs, node) { |
2573 | if (chdev->mport->id == mport->id) { |
2574 | atomic_set(v: &chdev->active, i: 0); |
2575 | list_del(entry: &chdev->node); |
2576 | found = 1; |
2577 | break; |
2578 | } |
2579 | } |
2580 | mutex_unlock(lock: &mport_devs_lock); |
2581 | |
2582 | if (found) |
2583 | mport_cdev_remove(md: chdev); |
2584 | } |
2585 | |
2586 | /* the rio_mport_interface is used to handle local mport devices */ |
2587 | static struct class_interface rio_mport_interface __refdata = { |
2588 | .class = &rio_mport_class, |
2589 | .add_dev = mport_add_mport, |
2590 | .remove_dev = mport_remove_mport, |
2591 | }; |
2592 | |
2593 | /* |
2594 | * Linux kernel module |
2595 | */ |
2596 | |
2597 | /* |
2598 | * mport_init - Driver module loading |
2599 | */ |
2600 | static int __init mport_init(void) |
2601 | { |
2602 | int ret; |
2603 | |
2604 | /* Create device class needed by udev */ |
2605 | ret = class_register(class: &dev_class); |
2606 | if (ret) { |
2607 | rmcd_error("Unable to create " DRV_NAME " class" ); |
2608 | return ret; |
2609 | } |
2610 | |
2611 | ret = alloc_chrdev_region(&dev_number, 0, RIO_MAX_MPORTS, DRV_NAME); |
2612 | if (ret < 0) |
2613 | goto err_chr; |
2614 | |
2615 | rmcd_debug(INIT, "Registered class with major=%d" , MAJOR(dev_number)); |
2616 | |
2617 | /* Register to rio_mport_interface */ |
2618 | ret = class_interface_register(&rio_mport_interface); |
2619 | if (ret) { |
2620 | rmcd_error("class_interface_register() failed, err=%d" , ret); |
2621 | goto err_cli; |
2622 | } |
2623 | |
2624 | return 0; |
2625 | |
2626 | err_cli: |
2627 | unregister_chrdev_region(dev_number, RIO_MAX_MPORTS); |
2628 | err_chr: |
2629 | class_unregister(class: &dev_class); |
2630 | return ret; |
2631 | } |
2632 | |
2633 | /** |
2634 | * mport_exit - Driver module unloading |
2635 | */ |
2636 | static void __exit mport_exit(void) |
2637 | { |
2638 | class_interface_unregister(&rio_mport_interface); |
2639 | class_unregister(class: &dev_class); |
2640 | unregister_chrdev_region(dev_number, RIO_MAX_MPORTS); |
2641 | } |
2642 | |
2643 | module_init(mport_init); |
2644 | module_exit(mport_exit); |
2645 | |