1// SPDX-License-Identifier: GPL-2.0
2/*
3 * dim2.c - MediaLB DIM2 Hardware Dependent Module
4 *
5 * Copyright (C) 2015-2016, Microchip Technology Germany II GmbH & Co. KG
6 */
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10#include <linux/module.h>
11#include <linux/printk.h>
12#include <linux/kernel.h>
13#include <linux/init.h>
14#include <linux/platform_device.h>
15#include <linux/interrupt.h>
16#include <linux/slab.h>
17#include <linux/io.h>
18#include <linux/clk.h>
19#include <linux/dma-mapping.h>
20#include <linux/sched.h>
21#include <linux/kthread.h>
22#include <linux/most.h>
23#include <linux/of.h>
24#include "hal.h"
25#include "errors.h"
26#include "sysfs.h"
27
28#define DMA_CHANNELS (32 - 1) /* channel 0 is a system channel */
29
30#define MAX_BUFFERS_PACKET 32
31#define MAX_BUFFERS_STREAMING 32
32#define MAX_BUF_SIZE_PACKET 2048
33#define MAX_BUF_SIZE_STREAMING (8 * 1024)
34
35/*
36 * The parameter representing the number of frames per sub-buffer for
37 * synchronous channels. Valid values: [0 .. 6].
38 *
39 * The values 0, 1, 2, 3, 4, 5, 6 represent corresponding number of frames per
40 * sub-buffer 1, 2, 4, 8, 16, 32, 64.
41 */
42static u8 fcnt = 4; /* (1 << fcnt) frames per subbuffer */
43module_param(fcnt, byte, 0000);
44MODULE_PARM_DESC(fcnt, "Num of frames per sub-buffer for sync channels as a power of 2");
45
46static DEFINE_SPINLOCK(dim_lock);
47
48/**
49 * struct hdm_channel - private structure to keep channel specific data
50 * @name: channel name
51 * @is_initialized: identifier to know whether the channel is initialized
52 * @ch: HAL specific channel data
53 * @reset_dbr_size: reset DBR data buffer size
54 * @pending_list: list to keep MBO's before starting transfer
55 * @started_list: list to keep MBO's after starting transfer
56 * @direction: channel direction (TX or RX)
57 * @data_type: channel data type
58 */
59struct hdm_channel {
60 char name[sizeof "caNNN"];
61 bool is_initialized;
62 struct dim_channel ch;
63 u16 *reset_dbr_size;
64 struct list_head pending_list; /* before dim_enqueue_buffer() */
65 struct list_head started_list; /* after dim_enqueue_buffer() */
66 enum most_channel_direction direction;
67 enum most_channel_data_type data_type;
68};
69
70/*
71 * struct dim2_hdm - private structure to keep interface specific data
72 * @hch: an array of channel specific data
73 * @most_iface: most interface structure
74 * @capabilities: an array of channel capability data
75 * @io_base: I/O register base address
76 * @netinfo_task: thread to deliver network status
77 * @netinfo_waitq: waitq for the thread to sleep
78 * @deliver_netinfo: to identify whether network status received
79 * @mac_addrs: INIC mac address
80 * @link_state: network link state
81 * @atx_idx: index of async tx channel
82 */
83struct dim2_hdm {
84 struct device dev;
85 struct hdm_channel hch[DMA_CHANNELS];
86 struct most_channel_capability capabilities[DMA_CHANNELS];
87 struct most_interface most_iface;
88 char name[16 + sizeof "dim2-"];
89 void __iomem *io_base;
90 u8 clk_speed;
91 struct clk *clk;
92 struct clk *clk_pll;
93 struct task_struct *netinfo_task;
94 wait_queue_head_t netinfo_waitq;
95 int deliver_netinfo;
96 unsigned char mac_addrs[6];
97 unsigned char link_state;
98 int atx_idx;
99 struct medialb_bus bus;
100 void (*on_netinfo)(struct most_interface *most_iface,
101 unsigned char link_state, unsigned char *addrs);
102 void (*disable_platform)(struct platform_device *pdev);
103};
104
105struct dim2_platform_data {
106 int (*enable)(struct platform_device *pdev);
107 void (*disable)(struct platform_device *pdev);
108 u8 fcnt;
109};
110
111static inline struct dim2_hdm *iface_to_hdm(struct most_interface *iface)
112{
113 return container_of(iface, struct dim2_hdm, most_iface);
114}
115
116/* Macro to identify a network status message */
117#define PACKET_IS_NET_INFO(p) \
118 (((p)[1] == 0x18) && ((p)[2] == 0x05) && ((p)[3] == 0x0C) && \
119 ((p)[13] == 0x3C) && ((p)[14] == 0x00) && ((p)[15] == 0x0A))
120
121static ssize_t state_show(struct device *dev, struct device_attribute *attr,
122 char *buf)
123{
124 bool state;
125 unsigned long flags;
126
127 spin_lock_irqsave(&dim_lock, flags);
128 state = dim_get_lock_state();
129 spin_unlock_irqrestore(lock: &dim_lock, flags);
130
131 return sysfs_emit(buf, fmt: "%s\n", state ? "locked" : "");
132}
133
134static DEVICE_ATTR_RO(state);
135
136static struct attribute *dim2_attrs[] = {
137 &dev_attr_state.attr,
138 NULL,
139};
140
141ATTRIBUTE_GROUPS(dim2);
142
143/**
144 * dimcb_on_error - callback from HAL to report miscommunication between
145 * HDM and HAL
146 * @error_id: Error ID
147 * @error_message: Error message. Some text in a free format
148 */
149void dimcb_on_error(u8 error_id, const char *error_message)
150{
151 pr_err("%s: error_id - %d, error_message - %s\n", __func__, error_id,
152 error_message);
153}
154
155/**
156 * try_start_dim_transfer - try to transfer a buffer on a channel
157 * @hdm_ch: channel specific data
158 *
159 * Transfer a buffer from pending_list if the channel is ready
160 */
161static int try_start_dim_transfer(struct hdm_channel *hdm_ch)
162{
163 u16 buf_size;
164 struct list_head *head = &hdm_ch->pending_list;
165 struct mbo *mbo;
166 unsigned long flags;
167 struct dim_ch_state st;
168
169 BUG_ON(!hdm_ch);
170 BUG_ON(!hdm_ch->is_initialized);
171
172 spin_lock_irqsave(&dim_lock, flags);
173 if (list_empty(head)) {
174 spin_unlock_irqrestore(lock: &dim_lock, flags);
175 return -EAGAIN;
176 }
177
178 if (!dim_get_channel_state(ch: &hdm_ch->ch, state_ptr: &st)->ready) {
179 spin_unlock_irqrestore(lock: &dim_lock, flags);
180 return -EAGAIN;
181 }
182
183 mbo = list_first_entry(head, struct mbo, list);
184 buf_size = mbo->buffer_length;
185
186 if (dim_dbr_space(ch: &hdm_ch->ch) < buf_size) {
187 spin_unlock_irqrestore(lock: &dim_lock, flags);
188 return -EAGAIN;
189 }
190
191 BUG_ON(mbo->bus_address == 0);
192 if (!dim_enqueue_buffer(ch: &hdm_ch->ch, buffer_addr: mbo->bus_address, buffer_size: buf_size)) {
193 list_del(entry: head->next);
194 spin_unlock_irqrestore(lock: &dim_lock, flags);
195 mbo->processed_length = 0;
196 mbo->status = MBO_E_INVAL;
197 mbo->complete(mbo);
198 return -EFAULT;
199 }
200
201 list_move_tail(list: head->next, head: &hdm_ch->started_list);
202 spin_unlock_irqrestore(lock: &dim_lock, flags);
203
204 return 0;
205}
206
207/**
208 * deliver_netinfo_thread - thread to deliver network status to mostcore
209 * @data: private data
210 *
211 * Wait for network status and deliver it to mostcore once it is received
212 */
213static int deliver_netinfo_thread(void *data)
214{
215 struct dim2_hdm *dev = data;
216
217 while (!kthread_should_stop()) {
218 wait_event_interruptible(dev->netinfo_waitq,
219 dev->deliver_netinfo ||
220 kthread_should_stop());
221
222 if (dev->deliver_netinfo) {
223 dev->deliver_netinfo--;
224 if (dev->on_netinfo) {
225 dev->on_netinfo(&dev->most_iface,
226 dev->link_state,
227 dev->mac_addrs);
228 }
229 }
230 }
231
232 return 0;
233}
234
235/**
236 * retrieve_netinfo - retrieve network status from received buffer
237 * @dev: private data
238 * @mbo: received MBO
239 *
240 * Parse the message in buffer and get node address, link state, MAC address.
241 * Wake up a thread to deliver this status to mostcore
242 */
243static void retrieve_netinfo(struct dim2_hdm *dev, struct mbo *mbo)
244{
245 u8 *data = mbo->virt_address;
246
247 pr_info("Node Address: 0x%03x\n", (u16)data[16] << 8 | data[17]);
248 dev->link_state = data[18];
249 pr_info("NIState: %d\n", dev->link_state);
250 memcpy(dev->mac_addrs, data + 19, 6);
251 dev->deliver_netinfo++;
252 wake_up_interruptible(&dev->netinfo_waitq);
253}
254
255/**
256 * service_done_flag - handle completed buffers
257 * @dev: private data
258 * @ch_idx: channel index
259 *
260 * Return back the completed buffers to mostcore, using completion callback
261 */
262static void service_done_flag(struct dim2_hdm *dev, int ch_idx)
263{
264 struct hdm_channel *hdm_ch = dev->hch + ch_idx;
265 struct dim_ch_state st;
266 struct list_head *head;
267 struct mbo *mbo;
268 int done_buffers;
269 unsigned long flags;
270 u8 *data;
271
272 BUG_ON(!hdm_ch);
273 BUG_ON(!hdm_ch->is_initialized);
274
275 spin_lock_irqsave(&dim_lock, flags);
276
277 done_buffers = dim_get_channel_state(ch: &hdm_ch->ch, state_ptr: &st)->done_buffers;
278 if (!done_buffers) {
279 spin_unlock_irqrestore(lock: &dim_lock, flags);
280 return;
281 }
282
283 if (!dim_detach_buffers(ch: &hdm_ch->ch, buffers_number: done_buffers)) {
284 spin_unlock_irqrestore(lock: &dim_lock, flags);
285 return;
286 }
287 spin_unlock_irqrestore(lock: &dim_lock, flags);
288
289 head = &hdm_ch->started_list;
290
291 while (done_buffers) {
292 spin_lock_irqsave(&dim_lock, flags);
293 if (list_empty(head)) {
294 spin_unlock_irqrestore(lock: &dim_lock, flags);
295 pr_crit("hard error: started_mbo list is empty whereas DIM2 has sent buffers\n");
296 break;
297 }
298
299 mbo = list_first_entry(head, struct mbo, list);
300 list_del(entry: head->next);
301 spin_unlock_irqrestore(lock: &dim_lock, flags);
302
303 data = mbo->virt_address;
304
305 if (hdm_ch->data_type == MOST_CH_ASYNC &&
306 hdm_ch->direction == MOST_CH_RX &&
307 PACKET_IS_NET_INFO(data)) {
308 retrieve_netinfo(dev, mbo);
309
310 spin_lock_irqsave(&dim_lock, flags);
311 list_add_tail(new: &mbo->list, head: &hdm_ch->pending_list);
312 spin_unlock_irqrestore(lock: &dim_lock, flags);
313 } else {
314 if (hdm_ch->data_type == MOST_CH_CONTROL ||
315 hdm_ch->data_type == MOST_CH_ASYNC) {
316 u32 const data_size =
317 (u32)data[0] * 256 + data[1] + 2;
318
319 mbo->processed_length =
320 min_t(u32, data_size,
321 mbo->buffer_length);
322 } else {
323 mbo->processed_length = mbo->buffer_length;
324 }
325 mbo->status = MBO_SUCCESS;
326 mbo->complete(mbo);
327 }
328
329 done_buffers--;
330 }
331}
332
333static struct dim_channel **get_active_channels(struct dim2_hdm *dev,
334 struct dim_channel **buffer)
335{
336 int idx = 0;
337 int ch_idx;
338
339 for (ch_idx = 0; ch_idx < DMA_CHANNELS; ch_idx++) {
340 if (dev->hch[ch_idx].is_initialized)
341 buffer[idx++] = &dev->hch[ch_idx].ch;
342 }
343 buffer[idx++] = NULL;
344
345 return buffer;
346}
347
348static irqreturn_t dim2_mlb_isr(int irq, void *_dev)
349{
350 struct dim2_hdm *dev = _dev;
351 unsigned long flags;
352
353 spin_lock_irqsave(&dim_lock, flags);
354 dim_service_mlb_int_irq();
355 spin_unlock_irqrestore(lock: &dim_lock, flags);
356
357 if (dev->atx_idx >= 0 && dev->hch[dev->atx_idx].is_initialized)
358 while (!try_start_dim_transfer(hdm_ch: dev->hch + dev->atx_idx))
359 continue;
360
361 return IRQ_HANDLED;
362}
363
364static irqreturn_t dim2_task_irq(int irq, void *_dev)
365{
366 struct dim2_hdm *dev = _dev;
367 unsigned long flags;
368 int ch_idx;
369
370 for (ch_idx = 0; ch_idx < DMA_CHANNELS; ch_idx++) {
371 if (!dev->hch[ch_idx].is_initialized)
372 continue;
373
374 spin_lock_irqsave(&dim_lock, flags);
375 dim_service_channel(ch: &dev->hch[ch_idx].ch);
376 spin_unlock_irqrestore(lock: &dim_lock, flags);
377
378 service_done_flag(dev, ch_idx);
379 while (!try_start_dim_transfer(hdm_ch: dev->hch + ch_idx))
380 continue;
381 }
382
383 return IRQ_HANDLED;
384}
385
386/**
387 * dim2_ahb_isr - interrupt service routine
388 * @irq: irq number
389 * @_dev: private data
390 *
391 * Acknowledge the interrupt and service each initialized channel,
392 * if needed, in task context.
393 */
394static irqreturn_t dim2_ahb_isr(int irq, void *_dev)
395{
396 struct dim2_hdm *dev = _dev;
397 struct dim_channel *buffer[DMA_CHANNELS + 1];
398 unsigned long flags;
399
400 spin_lock_irqsave(&dim_lock, flags);
401 dim_service_ahb_int_irq(channels: get_active_channels(dev, buffer));
402 spin_unlock_irqrestore(lock: &dim_lock, flags);
403
404 return IRQ_WAKE_THREAD;
405}
406
407/**
408 * complete_all_mbos - complete MBO's in a list
409 * @head: list head
410 *
411 * Delete all the entries in list and return back MBO's to mostcore using
412 * completion call back.
413 */
414static void complete_all_mbos(struct list_head *head)
415{
416 unsigned long flags;
417 struct mbo *mbo;
418
419 for (;;) {
420 spin_lock_irqsave(&dim_lock, flags);
421 if (list_empty(head)) {
422 spin_unlock_irqrestore(lock: &dim_lock, flags);
423 break;
424 }
425
426 mbo = list_first_entry(head, struct mbo, list);
427 list_del(entry: head->next);
428 spin_unlock_irqrestore(lock: &dim_lock, flags);
429
430 mbo->processed_length = 0;
431 mbo->status = MBO_E_CLOSE;
432 mbo->complete(mbo);
433 }
434}
435
436/**
437 * configure_channel - initialize a channel
438 * @most_iface: interface the channel belongs to
439 * @ch_idx: channel index to be configured
440 * @ccfg: structure that holds the configuration information
441 *
442 * Receives configuration information from mostcore and initialize
443 * the corresponding channel. Return 0 on success, negative on failure.
444 */
445static int configure_channel(struct most_interface *most_iface, int ch_idx,
446 struct most_channel_config *ccfg)
447{
448 struct dim2_hdm *dev = iface_to_hdm(iface: most_iface);
449 bool const is_tx = ccfg->direction == MOST_CH_TX;
450 u16 const sub_size = ccfg->subbuffer_size;
451 u16 const buf_size = ccfg->buffer_size;
452 u16 new_size;
453 unsigned long flags;
454 u8 hal_ret;
455 int const ch_addr = ch_idx * 2 + 2;
456 struct hdm_channel *const hdm_ch = dev->hch + ch_idx;
457
458 BUG_ON(ch_idx < 0 || ch_idx >= DMA_CHANNELS);
459
460 if (hdm_ch->is_initialized)
461 return -EPERM;
462
463 /* do not reset if the property was set by user, see poison_channel */
464 hdm_ch->reset_dbr_size = ccfg->dbr_size ? NULL : &ccfg->dbr_size;
465
466 /* zero value is default dbr_size, see dim2 hal */
467 hdm_ch->ch.dbr_size = ccfg->dbr_size;
468
469 switch (ccfg->data_type) {
470 case MOST_CH_CONTROL:
471 new_size = dim_norm_ctrl_async_buffer_size(buf_size);
472 if (new_size == 0) {
473 pr_err("%s: too small buffer size\n", hdm_ch->name);
474 return -EINVAL;
475 }
476 ccfg->buffer_size = new_size;
477 if (new_size != buf_size)
478 pr_warn("%s: fixed buffer size (%d -> %d)\n",
479 hdm_ch->name, buf_size, new_size);
480 spin_lock_irqsave(&dim_lock, flags);
481 hal_ret = dim_init_control(ch: &hdm_ch->ch, is_tx, ch_address: ch_addr,
482 max_buffer_size: is_tx ? new_size * 2 : new_size);
483 break;
484 case MOST_CH_ASYNC:
485 new_size = dim_norm_ctrl_async_buffer_size(buf_size);
486 if (new_size == 0) {
487 pr_err("%s: too small buffer size\n", hdm_ch->name);
488 return -EINVAL;
489 }
490 ccfg->buffer_size = new_size;
491 if (new_size != buf_size)
492 pr_warn("%s: fixed buffer size (%d -> %d)\n",
493 hdm_ch->name, buf_size, new_size);
494 spin_lock_irqsave(&dim_lock, flags);
495 hal_ret = dim_init_async(ch: &hdm_ch->ch, is_tx, ch_address: ch_addr,
496 max_buffer_size: is_tx ? new_size * 2 : new_size);
497 break;
498 case MOST_CH_ISOC:
499 new_size = dim_norm_isoc_buffer_size(buf_size, packet_length: sub_size);
500 if (new_size == 0) {
501 pr_err("%s: invalid sub-buffer size or too small buffer size\n",
502 hdm_ch->name);
503 return -EINVAL;
504 }
505 ccfg->buffer_size = new_size;
506 if (new_size != buf_size)
507 pr_warn("%s: fixed buffer size (%d -> %d)\n",
508 hdm_ch->name, buf_size, new_size);
509 spin_lock_irqsave(&dim_lock, flags);
510 hal_ret = dim_init_isoc(ch: &hdm_ch->ch, is_tx, ch_address: ch_addr, packet_length: sub_size);
511 break;
512 case MOST_CH_SYNC:
513 new_size = dim_norm_sync_buffer_size(buf_size, bytes_per_frame: sub_size);
514 if (new_size == 0) {
515 pr_err("%s: invalid sub-buffer size or too small buffer size\n",
516 hdm_ch->name);
517 return -EINVAL;
518 }
519 ccfg->buffer_size = new_size;
520 if (new_size != buf_size)
521 pr_warn("%s: fixed buffer size (%d -> %d)\n",
522 hdm_ch->name, buf_size, new_size);
523 spin_lock_irqsave(&dim_lock, flags);
524 hal_ret = dim_init_sync(ch: &hdm_ch->ch, is_tx, ch_address: ch_addr, bytes_per_frame: sub_size);
525 break;
526 default:
527 pr_err("%s: configure failed, bad channel type: %d\n",
528 hdm_ch->name, ccfg->data_type);
529 return -EINVAL;
530 }
531
532 if (hal_ret != DIM_NO_ERROR) {
533 spin_unlock_irqrestore(lock: &dim_lock, flags);
534 pr_err("%s: configure failed (%d), type: %d, is_tx: %d\n",
535 hdm_ch->name, hal_ret, ccfg->data_type, (int)is_tx);
536 return -ENODEV;
537 }
538
539 hdm_ch->data_type = ccfg->data_type;
540 hdm_ch->direction = ccfg->direction;
541 hdm_ch->is_initialized = true;
542
543 if (hdm_ch->data_type == MOST_CH_ASYNC &&
544 hdm_ch->direction == MOST_CH_TX &&
545 dev->atx_idx < 0)
546 dev->atx_idx = ch_idx;
547
548 spin_unlock_irqrestore(lock: &dim_lock, flags);
549 ccfg->dbr_size = hdm_ch->ch.dbr_size;
550
551 return 0;
552}
553
554/**
555 * enqueue - enqueue a buffer for data transfer
556 * @most_iface: intended interface
557 * @ch_idx: ID of the channel the buffer is intended for
558 * @mbo: pointer to the buffer object
559 *
560 * Push the buffer into pending_list and try to transfer one buffer from
561 * pending_list. Return 0 on success, negative on failure.
562 */
563static int enqueue(struct most_interface *most_iface, int ch_idx,
564 struct mbo *mbo)
565{
566 struct dim2_hdm *dev = iface_to_hdm(iface: most_iface);
567 struct hdm_channel *hdm_ch = dev->hch + ch_idx;
568 unsigned long flags;
569
570 BUG_ON(ch_idx < 0 || ch_idx >= DMA_CHANNELS);
571
572 if (!hdm_ch->is_initialized)
573 return -EPERM;
574
575 if (mbo->bus_address == 0)
576 return -EFAULT;
577
578 spin_lock_irqsave(&dim_lock, flags);
579 list_add_tail(new: &mbo->list, head: &hdm_ch->pending_list);
580 spin_unlock_irqrestore(lock: &dim_lock, flags);
581
582 (void)try_start_dim_transfer(hdm_ch);
583
584 return 0;
585}
586
587/**
588 * request_netinfo - triggers retrieving of network info
589 * @most_iface: pointer to the interface
590 * @ch_idx: corresponding channel ID
591 * @on_netinfo: call-back used to deliver network status to mostcore
592 *
593 * Send a command to INIC which triggers retrieving of network info by means of
594 * "Message exchange over MDP/MEP". Return 0 on success, negative on failure.
595 */
596static void request_netinfo(struct most_interface *most_iface, int ch_idx,
597 void (*on_netinfo)(struct most_interface *,
598 unsigned char, unsigned char *))
599{
600 struct dim2_hdm *dev = iface_to_hdm(iface: most_iface);
601 struct mbo *mbo;
602 u8 *data;
603
604 dev->on_netinfo = on_netinfo;
605 if (!on_netinfo)
606 return;
607
608 if (dev->atx_idx < 0) {
609 pr_err("Async Tx Not initialized\n");
610 return;
611 }
612
613 mbo = most_get_mbo(iface: &dev->most_iface, channel_idx: dev->atx_idx, NULL);
614 if (!mbo)
615 return;
616
617 mbo->buffer_length = 5;
618
619 data = mbo->virt_address;
620
621 data[0] = 0x00; /* PML High byte */
622 data[1] = 0x03; /* PML Low byte */
623 data[2] = 0x02; /* PMHL */
624 data[3] = 0x08; /* FPH */
625 data[4] = 0x40; /* FMF (FIFO cmd msg - Triggers NAOverMDP) */
626
627 most_submit_mbo(mbo);
628}
629
630/**
631 * poison_channel - poison buffers of a channel
632 * @most_iface: pointer to the interface the channel to be poisoned belongs to
633 * @ch_idx: corresponding channel ID
634 *
635 * Destroy a channel and complete all the buffers in both started_list &
636 * pending_list. Return 0 on success, negative on failure.
637 */
638static int poison_channel(struct most_interface *most_iface, int ch_idx)
639{
640 struct dim2_hdm *dev = iface_to_hdm(iface: most_iface);
641 struct hdm_channel *hdm_ch = dev->hch + ch_idx;
642 unsigned long flags;
643 u8 hal_ret;
644 int ret = 0;
645
646 BUG_ON(ch_idx < 0 || ch_idx >= DMA_CHANNELS);
647
648 if (!hdm_ch->is_initialized)
649 return -EPERM;
650
651 spin_lock_irqsave(&dim_lock, flags);
652 hal_ret = dim_destroy_channel(ch: &hdm_ch->ch);
653 hdm_ch->is_initialized = false;
654 if (ch_idx == dev->atx_idx)
655 dev->atx_idx = -1;
656 spin_unlock_irqrestore(lock: &dim_lock, flags);
657 if (hal_ret != DIM_NO_ERROR) {
658 pr_err("HAL Failed to close channel %s\n", hdm_ch->name);
659 ret = -EFAULT;
660 }
661
662 complete_all_mbos(head: &hdm_ch->started_list);
663 complete_all_mbos(head: &hdm_ch->pending_list);
664 if (hdm_ch->reset_dbr_size)
665 *hdm_ch->reset_dbr_size = 0;
666
667 return ret;
668}
669
670static void *dma_alloc(struct mbo *mbo, u32 size)
671{
672 struct device *dev = mbo->ifp->driver_dev;
673
674 return dma_alloc_coherent(dev, size, dma_handle: &mbo->bus_address, GFP_KERNEL);
675}
676
677static void dma_free(struct mbo *mbo, u32 size)
678{
679 struct device *dev = mbo->ifp->driver_dev;
680
681 dma_free_coherent(dev, size, cpu_addr: mbo->virt_address, dma_handle: mbo->bus_address);
682}
683
684static const struct of_device_id dim2_of_match[];
685
686static struct {
687 const char *clock_speed;
688 u8 clk_speed;
689} clk_mt[] = {
690 { "256fs", CLK_256FS },
691 { "512fs", CLK_512FS },
692 { "1024fs", CLK_1024FS },
693 { "2048fs", CLK_2048FS },
694 { "3072fs", CLK_3072FS },
695 { "4096fs", CLK_4096FS },
696 { "6144fs", CLK_6144FS },
697 { "8192fs", CLK_8192FS },
698};
699
700/**
701 * get_dim2_clk_speed - converts string to DIM2 clock speed value
702 *
703 * @clock_speed: string in the format "{NUMBER}fs"
704 * @val: pointer to get one of the CLK_{NUMBER}FS values
705 *
706 * By success stores one of the CLK_{NUMBER}FS in the *val and returns 0,
707 * otherwise returns -EINVAL.
708 */
709static int get_dim2_clk_speed(const char *clock_speed, u8 *val)
710{
711 int i;
712
713 for (i = 0; i < ARRAY_SIZE(clk_mt); i++) {
714 if (!strcmp(clock_speed, clk_mt[i].clock_speed)) {
715 *val = clk_mt[i].clk_speed;
716 return 0;
717 }
718 }
719 return -EINVAL;
720}
721
722static void dim2_release(struct device *d)
723{
724 struct dim2_hdm *dev = container_of(d, struct dim2_hdm, dev);
725 unsigned long flags;
726
727 kthread_stop(k: dev->netinfo_task);
728
729 spin_lock_irqsave(&dim_lock, flags);
730 dim_shutdown();
731 spin_unlock_irqrestore(lock: &dim_lock, flags);
732
733 if (dev->disable_platform)
734 dev->disable_platform(to_platform_device(d->parent));
735
736 kfree(objp: dev);
737}
738
739/*
740 * dim2_probe - dim2 probe handler
741 * @pdev: platform device structure
742 *
743 * Register the dim2 interface with mostcore and initialize it.
744 * Return 0 on success, negative on failure.
745 */
746static int dim2_probe(struct platform_device *pdev)
747{
748 const struct dim2_platform_data *pdata;
749 const struct of_device_id *of_id;
750 const char *clock_speed;
751 struct dim2_hdm *dev;
752 struct resource *res;
753 int ret, i;
754 u8 hal_ret;
755 u8 dev_fcnt = fcnt;
756 int irq;
757
758 enum { MLB_INT_IDX, AHB0_INT_IDX };
759
760 dev = kzalloc(size: sizeof(*dev), GFP_KERNEL);
761 if (!dev)
762 return -ENOMEM;
763
764 dev->atx_idx = -1;
765
766 platform_set_drvdata(pdev, data: dev);
767
768 ret = of_property_read_string(np: pdev->dev.of_node,
769 propname: "microchip,clock-speed", out_string: &clock_speed);
770 if (ret) {
771 dev_err(&pdev->dev, "missing dt property clock-speed\n");
772 goto err_free_dev;
773 }
774
775 ret = get_dim2_clk_speed(clock_speed, val: &dev->clk_speed);
776 if (ret) {
777 dev_err(&pdev->dev, "bad dt property clock-speed\n");
778 goto err_free_dev;
779 }
780
781 dev->io_base = devm_platform_get_and_ioremap_resource(pdev, index: 0, res: &res);
782 if (IS_ERR(ptr: dev->io_base)) {
783 ret = PTR_ERR(ptr: dev->io_base);
784 goto err_free_dev;
785 }
786
787 of_id = of_match_node(matches: dim2_of_match, node: pdev->dev.of_node);
788 pdata = of_id->data;
789 if (pdata) {
790 if (pdata->enable) {
791 ret = pdata->enable(pdev);
792 if (ret)
793 goto err_free_dev;
794 }
795 dev->disable_platform = pdata->disable;
796 if (pdata->fcnt)
797 dev_fcnt = pdata->fcnt;
798 }
799
800 dev_info(&pdev->dev, "sync: num of frames per sub-buffer: %u\n",
801 dev_fcnt);
802 hal_ret = dim_startup(dim_base_address: dev->io_base, mlb_clock: dev->clk_speed, fcnt: dev_fcnt);
803 if (hal_ret != DIM_NO_ERROR) {
804 dev_err(&pdev->dev, "dim_startup failed: %d\n", hal_ret);
805 ret = -ENODEV;
806 goto err_disable_platform;
807 }
808
809 irq = platform_get_irq(pdev, AHB0_INT_IDX);
810 if (irq < 0) {
811 ret = irq;
812 goto err_shutdown_dim;
813 }
814
815 ret = devm_request_threaded_irq(dev: &pdev->dev, irq, handler: dim2_ahb_isr,
816 thread_fn: dim2_task_irq, irqflags: 0, devname: "dim2_ahb0_int", dev_id: dev);
817 if (ret) {
818 dev_err(&pdev->dev, "failed to request ahb0_int irq %d\n", irq);
819 goto err_shutdown_dim;
820 }
821
822 irq = platform_get_irq(pdev, MLB_INT_IDX);
823 if (irq < 0) {
824 ret = irq;
825 goto err_shutdown_dim;
826 }
827
828 ret = devm_request_irq(dev: &pdev->dev, irq, handler: dim2_mlb_isr, irqflags: 0,
829 devname: "dim2_mlb_int", dev_id: dev);
830 if (ret) {
831 dev_err(&pdev->dev, "failed to request mlb_int irq %d\n", irq);
832 goto err_shutdown_dim;
833 }
834
835 init_waitqueue_head(&dev->netinfo_waitq);
836 dev->deliver_netinfo = 0;
837 dev->netinfo_task = kthread_run(&deliver_netinfo_thread, dev,
838 "dim2_netinfo");
839 if (IS_ERR(ptr: dev->netinfo_task)) {
840 ret = PTR_ERR(ptr: dev->netinfo_task);
841 goto err_shutdown_dim;
842 }
843
844 for (i = 0; i < DMA_CHANNELS; i++) {
845 struct most_channel_capability *cap = dev->capabilities + i;
846 struct hdm_channel *hdm_ch = dev->hch + i;
847
848 INIT_LIST_HEAD(list: &hdm_ch->pending_list);
849 INIT_LIST_HEAD(list: &hdm_ch->started_list);
850 hdm_ch->is_initialized = false;
851 snprintf(buf: hdm_ch->name, size: sizeof(hdm_ch->name), fmt: "ca%d", i * 2 + 2);
852
853 cap->name_suffix = hdm_ch->name;
854 cap->direction = MOST_CH_RX | MOST_CH_TX;
855 cap->data_type = MOST_CH_CONTROL | MOST_CH_ASYNC |
856 MOST_CH_ISOC | MOST_CH_SYNC;
857 cap->num_buffers_packet = MAX_BUFFERS_PACKET;
858 cap->buffer_size_packet = MAX_BUF_SIZE_PACKET;
859 cap->num_buffers_streaming = MAX_BUFFERS_STREAMING;
860 cap->buffer_size_streaming = MAX_BUF_SIZE_STREAMING;
861 }
862
863 {
864 const char *fmt;
865
866 if (sizeof(res->start) == sizeof(long long))
867 fmt = "dim2-%016llx";
868 else if (sizeof(res->start) == sizeof(long))
869 fmt = "dim2-%016lx";
870 else
871 fmt = "dim2-%016x";
872
873 snprintf(buf: dev->name, size: sizeof(dev->name), fmt, res->start);
874 }
875
876 dev->most_iface.interface = ITYPE_MEDIALB_DIM2;
877 dev->most_iface.description = dev->name;
878 dev->most_iface.num_channels = DMA_CHANNELS;
879 dev->most_iface.channel_vector = dev->capabilities;
880 dev->most_iface.configure = configure_channel;
881 dev->most_iface.enqueue = enqueue;
882 dev->most_iface.dma_alloc = dma_alloc;
883 dev->most_iface.dma_free = dma_free;
884 dev->most_iface.poison_channel = poison_channel;
885 dev->most_iface.request_netinfo = request_netinfo;
886 dev->most_iface.driver_dev = &pdev->dev;
887 dev->most_iface.dev = &dev->dev;
888 dev->dev.init_name = dev->name;
889 dev->dev.parent = &pdev->dev;
890 dev->dev.release = dim2_release;
891
892 return most_register_interface(iface: &dev->most_iface);
893
894err_shutdown_dim:
895 dim_shutdown();
896err_disable_platform:
897 if (dev->disable_platform)
898 dev->disable_platform(pdev);
899err_free_dev:
900 kfree(objp: dev);
901
902 return ret;
903}
904
905/**
906 * dim2_remove - dim2 remove handler
907 * @pdev: platform device structure
908 *
909 * Unregister the interface from mostcore
910 */
911static void dim2_remove(struct platform_device *pdev)
912{
913 struct dim2_hdm *dev = platform_get_drvdata(pdev);
914
915 most_deregister_interface(iface: &dev->most_iface);
916}
917
918/* platform specific functions [[ */
919
920static int fsl_mx6_enable(struct platform_device *pdev)
921{
922 struct dim2_hdm *dev = platform_get_drvdata(pdev);
923 int ret;
924
925 dev->clk = devm_clk_get(dev: &pdev->dev, id: "mlb");
926 if (IS_ERR_OR_NULL(ptr: dev->clk)) {
927 dev_err(&pdev->dev, "unable to get mlb clock\n");
928 return -EFAULT;
929 }
930
931 ret = clk_prepare_enable(clk: dev->clk);
932 if (ret) {
933 dev_err(&pdev->dev, "%s\n", "clk_prepare_enable failed");
934 return ret;
935 }
936
937 if (dev->clk_speed >= CLK_2048FS) {
938 /* enable pll */
939 dev->clk_pll = devm_clk_get(dev: &pdev->dev, id: "pll8_mlb");
940 if (IS_ERR_OR_NULL(ptr: dev->clk_pll)) {
941 dev_err(&pdev->dev, "unable to get mlb pll clock\n");
942 clk_disable_unprepare(clk: dev->clk);
943 return -EFAULT;
944 }
945
946 writel(val: 0x888, addr: dev->io_base + 0x38);
947 clk_prepare_enable(clk: dev->clk_pll);
948 }
949
950 return 0;
951}
952
953static void fsl_mx6_disable(struct platform_device *pdev)
954{
955 struct dim2_hdm *dev = platform_get_drvdata(pdev);
956
957 if (dev->clk_speed >= CLK_2048FS)
958 clk_disable_unprepare(clk: dev->clk_pll);
959
960 clk_disable_unprepare(clk: dev->clk);
961}
962
963static int rcar_gen2_enable(struct platform_device *pdev)
964{
965 struct dim2_hdm *dev = platform_get_drvdata(pdev);
966 int ret;
967
968 dev->clk = devm_clk_get(dev: &pdev->dev, NULL);
969 if (IS_ERR(ptr: dev->clk)) {
970 dev_err(&pdev->dev, "cannot get clock\n");
971 return PTR_ERR(ptr: dev->clk);
972 }
973
974 ret = clk_prepare_enable(clk: dev->clk);
975 if (ret) {
976 dev_err(&pdev->dev, "%s\n", "clk_prepare_enable failed");
977 return ret;
978 }
979
980 if (dev->clk_speed >= CLK_2048FS) {
981 /* enable MLP pll and LVDS drivers */
982 writel(val: 0x03, addr: dev->io_base + 0x600);
983 /* set bias */
984 writel(val: 0x888, addr: dev->io_base + 0x38);
985 } else {
986 /* PLL */
987 writel(val: 0x04, addr: dev->io_base + 0x600);
988 }
989
990 /* BBCR = 0b11 */
991 writel(val: 0x03, addr: dev->io_base + 0x500);
992 writel(val: 0x0002FF02, addr: dev->io_base + 0x508);
993
994 return 0;
995}
996
997static void rcar_gen2_disable(struct platform_device *pdev)
998{
999 struct dim2_hdm *dev = platform_get_drvdata(pdev);
1000
1001 clk_disable_unprepare(clk: dev->clk);
1002
1003 /* disable PLLs and LVDS drivers */
1004 writel(val: 0x0, addr: dev->io_base + 0x600);
1005}
1006
1007static int rcar_gen3_enable(struct platform_device *pdev)
1008{
1009 struct dim2_hdm *dev = platform_get_drvdata(pdev);
1010 u32 enable_512fs = dev->clk_speed == CLK_512FS;
1011 int ret;
1012
1013 dev->clk = devm_clk_get(dev: &pdev->dev, NULL);
1014 if (IS_ERR(ptr: dev->clk)) {
1015 dev_err(&pdev->dev, "cannot get clock\n");
1016 return PTR_ERR(ptr: dev->clk);
1017 }
1018
1019 ret = clk_prepare_enable(clk: dev->clk);
1020 if (ret) {
1021 dev_err(&pdev->dev, "%s\n", "clk_prepare_enable failed");
1022 return ret;
1023 }
1024
1025 /* PLL */
1026 writel(val: 0x04, addr: dev->io_base + 0x600);
1027
1028 writel(val: enable_512fs, addr: dev->io_base + 0x604);
1029
1030 /* BBCR = 0b11 */
1031 writel(val: 0x03, addr: dev->io_base + 0x500);
1032 writel(val: 0x0002FF02, addr: dev->io_base + 0x508);
1033
1034 return 0;
1035}
1036
1037static void rcar_gen3_disable(struct platform_device *pdev)
1038{
1039 struct dim2_hdm *dev = platform_get_drvdata(pdev);
1040
1041 clk_disable_unprepare(clk: dev->clk);
1042
1043 /* disable PLLs and LVDS drivers */
1044 writel(val: 0x0, addr: dev->io_base + 0x600);
1045}
1046
1047/* ]] platform specific functions */
1048
1049enum dim2_platforms { FSL_MX6, RCAR_GEN2, RCAR_GEN3 };
1050
1051static struct dim2_platform_data plat_data[] = {
1052 [FSL_MX6] = {
1053 .enable = fsl_mx6_enable,
1054 .disable = fsl_mx6_disable,
1055 },
1056 [RCAR_GEN2] = {
1057 .enable = rcar_gen2_enable,
1058 .disable = rcar_gen2_disable,
1059 },
1060 [RCAR_GEN3] = {
1061 .enable = rcar_gen3_enable,
1062 .disable = rcar_gen3_disable,
1063 .fcnt = 3,
1064 },
1065};
1066
1067static const struct of_device_id dim2_of_match[] = {
1068 {
1069 .compatible = "fsl,imx6q-mlb150",
1070 .data = plat_data + FSL_MX6
1071 },
1072 {
1073 .compatible = "renesas,mlp",
1074 .data = plat_data + RCAR_GEN2
1075 },
1076 {
1077 .compatible = "renesas,rcar-gen3-mlp",
1078 .data = plat_data + RCAR_GEN3
1079 },
1080 {
1081 .compatible = "xlnx,axi4-os62420_3pin-1.00.a",
1082 },
1083 {
1084 .compatible = "xlnx,axi4-os62420_6pin-1.00.a",
1085 },
1086 {},
1087};
1088
1089MODULE_DEVICE_TABLE(of, dim2_of_match);
1090
1091static struct platform_driver dim2_driver = {
1092 .probe = dim2_probe,
1093 .remove_new = dim2_remove,
1094 .driver = {
1095 .name = "hdm_dim2",
1096 .of_match_table = dim2_of_match,
1097 .dev_groups = dim2_groups,
1098 },
1099};
1100
1101module_platform_driver(dim2_driver);
1102
1103MODULE_AUTHOR("Andrey Shvetsov <andrey.shvetsov@k2l.de>");
1104MODULE_DESCRIPTION("MediaLB DIM2 Hardware Dependent Module");
1105MODULE_LICENSE("GPL");
1106

source code of linux/drivers/staging/most/dim2/dim2.c