1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved. |
4 | * |
5 | * Authors: Shlomi Gridish <gridish@freescale.com> |
6 | * Li Yang <leoli@freescale.com> |
7 | * |
8 | * Description: |
9 | * QE UCC Slow API Set - UCC Slow specific routines implementations. |
10 | */ |
11 | #include <linux/kernel.h> |
12 | #include <linux/errno.h> |
13 | #include <linux/slab.h> |
14 | #include <linux/stddef.h> |
15 | #include <linux/interrupt.h> |
16 | #include <linux/err.h> |
17 | #include <linux/export.h> |
18 | |
19 | #include <asm/io.h> |
20 | #include <soc/fsl/qe/immap_qe.h> |
21 | #include <soc/fsl/qe/qe.h> |
22 | |
23 | #include <soc/fsl/qe/ucc.h> |
24 | #include <soc/fsl/qe/ucc_slow.h> |
25 | |
26 | u32 ucc_slow_get_qe_cr_subblock(int uccs_num) |
27 | { |
28 | switch (uccs_num) { |
29 | case 0: return QE_CR_SUBBLOCK_UCCSLOW1; |
30 | case 1: return QE_CR_SUBBLOCK_UCCSLOW2; |
31 | case 2: return QE_CR_SUBBLOCK_UCCSLOW3; |
32 | case 3: return QE_CR_SUBBLOCK_UCCSLOW4; |
33 | case 4: return QE_CR_SUBBLOCK_UCCSLOW5; |
34 | case 5: return QE_CR_SUBBLOCK_UCCSLOW6; |
35 | case 6: return QE_CR_SUBBLOCK_UCCSLOW7; |
36 | case 7: return QE_CR_SUBBLOCK_UCCSLOW8; |
37 | default: return QE_CR_SUBBLOCK_INVALID; |
38 | } |
39 | } |
40 | EXPORT_SYMBOL(ucc_slow_get_qe_cr_subblock); |
41 | |
42 | void ucc_slow_graceful_stop_tx(struct ucc_slow_private * uccs) |
43 | { |
44 | struct ucc_slow_info *us_info = uccs->us_info; |
45 | u32 id; |
46 | |
47 | id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num); |
48 | qe_issue_cmd(QE_GRACEFUL_STOP_TX, device: id, |
49 | QE_CR_PROTOCOL_UNSPECIFIED, cmd_input: 0); |
50 | } |
51 | EXPORT_SYMBOL(ucc_slow_graceful_stop_tx); |
52 | |
53 | void ucc_slow_stop_tx(struct ucc_slow_private * uccs) |
54 | { |
55 | struct ucc_slow_info *us_info = uccs->us_info; |
56 | u32 id; |
57 | |
58 | id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num); |
59 | qe_issue_cmd(QE_STOP_TX, device: id, QE_CR_PROTOCOL_UNSPECIFIED, cmd_input: 0); |
60 | } |
61 | EXPORT_SYMBOL(ucc_slow_stop_tx); |
62 | |
63 | void ucc_slow_restart_tx(struct ucc_slow_private * uccs) |
64 | { |
65 | struct ucc_slow_info *us_info = uccs->us_info; |
66 | u32 id; |
67 | |
68 | id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num); |
69 | qe_issue_cmd(QE_RESTART_TX, device: id, QE_CR_PROTOCOL_UNSPECIFIED, cmd_input: 0); |
70 | } |
71 | EXPORT_SYMBOL(ucc_slow_restart_tx); |
72 | |
73 | void ucc_slow_enable(struct ucc_slow_private * uccs, enum comm_dir mode) |
74 | { |
75 | struct ucc_slow __iomem *us_regs; |
76 | u32 gumr_l; |
77 | |
78 | us_regs = uccs->us_regs; |
79 | |
80 | /* Enable reception and/or transmission on this UCC. */ |
81 | gumr_l = ioread32be(&us_regs->gumr_l); |
82 | if (mode & COMM_DIR_TX) { |
83 | gumr_l |= UCC_SLOW_GUMR_L_ENT; |
84 | uccs->enabled_tx = 1; |
85 | } |
86 | if (mode & COMM_DIR_RX) { |
87 | gumr_l |= UCC_SLOW_GUMR_L_ENR; |
88 | uccs->enabled_rx = 1; |
89 | } |
90 | iowrite32be(gumr_l, &us_regs->gumr_l); |
91 | } |
92 | EXPORT_SYMBOL(ucc_slow_enable); |
93 | |
94 | void ucc_slow_disable(struct ucc_slow_private * uccs, enum comm_dir mode) |
95 | { |
96 | struct ucc_slow __iomem *us_regs; |
97 | u32 gumr_l; |
98 | |
99 | us_regs = uccs->us_regs; |
100 | |
101 | /* Disable reception and/or transmission on this UCC. */ |
102 | gumr_l = ioread32be(&us_regs->gumr_l); |
103 | if (mode & COMM_DIR_TX) { |
104 | gumr_l &= ~UCC_SLOW_GUMR_L_ENT; |
105 | uccs->enabled_tx = 0; |
106 | } |
107 | if (mode & COMM_DIR_RX) { |
108 | gumr_l &= ~UCC_SLOW_GUMR_L_ENR; |
109 | uccs->enabled_rx = 0; |
110 | } |
111 | iowrite32be(gumr_l, &us_regs->gumr_l); |
112 | } |
113 | EXPORT_SYMBOL(ucc_slow_disable); |
114 | |
115 | /* Initialize the UCC for Slow operations |
116 | * |
117 | * The caller should initialize the following us_info |
118 | */ |
119 | int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** uccs_ret) |
120 | { |
121 | struct ucc_slow_private *uccs; |
122 | u32 i; |
123 | struct ucc_slow __iomem *us_regs; |
124 | u32 gumr; |
125 | struct qe_bd __iomem *bd; |
126 | u32 id; |
127 | u32 command; |
128 | int ret = 0; |
129 | |
130 | if (!us_info) |
131 | return -EINVAL; |
132 | |
133 | /* check if the UCC port number is in range. */ |
134 | if ((us_info->ucc_num < 0) || (us_info->ucc_num > UCC_MAX_NUM - 1)) { |
135 | printk(KERN_ERR "%s: illegal UCC number\n" , __func__); |
136 | return -EINVAL; |
137 | } |
138 | |
139 | /* |
140 | * Set mrblr |
141 | * Check that 'max_rx_buf_length' is properly aligned (4), unless |
142 | * rfw is 1, meaning that QE accepts one byte at a time, unlike normal |
143 | * case when QE accepts 32 bits at a time. |
144 | */ |
145 | if ((!us_info->rfw) && |
146 | (us_info->max_rx_buf_length & (UCC_SLOW_MRBLR_ALIGNMENT - 1))) { |
147 | printk(KERN_ERR "max_rx_buf_length not aligned.\n" ); |
148 | return -EINVAL; |
149 | } |
150 | |
151 | uccs = kzalloc(size: sizeof(struct ucc_slow_private), GFP_KERNEL); |
152 | if (!uccs) { |
153 | printk(KERN_ERR "%s: Cannot allocate private data\n" , |
154 | __func__); |
155 | return -ENOMEM; |
156 | } |
157 | uccs->rx_base_offset = -1; |
158 | uccs->tx_base_offset = -1; |
159 | uccs->us_pram_offset = -1; |
160 | |
161 | /* Fill slow UCC structure */ |
162 | uccs->us_info = us_info; |
163 | /* Set the PHY base address */ |
164 | uccs->us_regs = ioremap(offset: us_info->regs, size: sizeof(struct ucc_slow)); |
165 | if (uccs->us_regs == NULL) { |
166 | printk(KERN_ERR "%s: Cannot map UCC registers\n" , __func__); |
167 | kfree(objp: uccs); |
168 | return -ENOMEM; |
169 | } |
170 | |
171 | us_regs = uccs->us_regs; |
172 | uccs->p_ucce = &us_regs->ucce; |
173 | uccs->p_uccm = &us_regs->uccm; |
174 | |
175 | /* Get PRAM base */ |
176 | uccs->us_pram_offset = |
177 | qe_muram_alloc(UCC_SLOW_PRAM_SIZE, ALIGNMENT_OF_UCC_SLOW_PRAM); |
178 | if (uccs->us_pram_offset < 0) { |
179 | printk(KERN_ERR "%s: cannot allocate MURAM for PRAM" , __func__); |
180 | ucc_slow_free(uccs); |
181 | return -ENOMEM; |
182 | } |
183 | id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num); |
184 | qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, device: id, mcn_protocol: us_info->protocol, |
185 | cmd_input: uccs->us_pram_offset); |
186 | |
187 | uccs->us_pram = qe_muram_addr(offset: uccs->us_pram_offset); |
188 | |
189 | /* Set UCC to slow type */ |
190 | ret = ucc_set_type(ucc_num: us_info->ucc_num, speed: UCC_SPEED_TYPE_SLOW); |
191 | if (ret) { |
192 | printk(KERN_ERR "%s: cannot set UCC type" , __func__); |
193 | ucc_slow_free(uccs); |
194 | return ret; |
195 | } |
196 | |
197 | iowrite16be(us_info->max_rx_buf_length, &uccs->us_pram->mrblr); |
198 | |
199 | INIT_LIST_HEAD(list: &uccs->confQ); |
200 | |
201 | /* Allocate BDs. */ |
202 | uccs->rx_base_offset = |
203 | qe_muram_alloc(size: us_info->rx_bd_ring_len * sizeof(struct qe_bd), |
204 | QE_ALIGNMENT_OF_BD); |
205 | if (uccs->rx_base_offset < 0) { |
206 | printk(KERN_ERR "%s: cannot allocate %u RX BDs\n" , __func__, |
207 | us_info->rx_bd_ring_len); |
208 | ucc_slow_free(uccs); |
209 | return -ENOMEM; |
210 | } |
211 | |
212 | uccs->tx_base_offset = |
213 | qe_muram_alloc(size: us_info->tx_bd_ring_len * sizeof(struct qe_bd), |
214 | QE_ALIGNMENT_OF_BD); |
215 | if (uccs->tx_base_offset < 0) { |
216 | printk(KERN_ERR "%s: cannot allocate TX BDs" , __func__); |
217 | ucc_slow_free(uccs); |
218 | return -ENOMEM; |
219 | } |
220 | |
221 | /* Init Tx bds */ |
222 | bd = uccs->confBd = uccs->tx_bd = qe_muram_addr(offset: uccs->tx_base_offset); |
223 | for (i = 0; i < us_info->tx_bd_ring_len - 1; i++) { |
224 | /* clear bd buffer */ |
225 | iowrite32be(0, &bd->buf); |
226 | /* set bd status and length */ |
227 | iowrite32be(0, (u32 __iomem *)bd); |
228 | bd++; |
229 | } |
230 | /* for last BD set Wrap bit */ |
231 | iowrite32be(0, &bd->buf); |
232 | iowrite32be(T_W, (u32 __iomem *)bd); |
233 | |
234 | /* Init Rx bds */ |
235 | bd = uccs->rx_bd = qe_muram_addr(offset: uccs->rx_base_offset); |
236 | for (i = 0; i < us_info->rx_bd_ring_len - 1; i++) { |
237 | /* set bd status and length */ |
238 | iowrite32be(0, (u32 __iomem *)bd); |
239 | /* clear bd buffer */ |
240 | iowrite32be(0, &bd->buf); |
241 | bd++; |
242 | } |
243 | /* for last BD set Wrap bit */ |
244 | iowrite32be(R_W, (u32 __iomem *)bd); |
245 | iowrite32be(0, &bd->buf); |
246 | |
247 | /* Set GUMR (For more details see the hardware spec.). */ |
248 | /* gumr_h */ |
249 | gumr = us_info->tcrc; |
250 | if (us_info->cdp) |
251 | gumr |= UCC_SLOW_GUMR_H_CDP; |
252 | if (us_info->ctsp) |
253 | gumr |= UCC_SLOW_GUMR_H_CTSP; |
254 | if (us_info->cds) |
255 | gumr |= UCC_SLOW_GUMR_H_CDS; |
256 | if (us_info->ctss) |
257 | gumr |= UCC_SLOW_GUMR_H_CTSS; |
258 | if (us_info->tfl) |
259 | gumr |= UCC_SLOW_GUMR_H_TFL; |
260 | if (us_info->rfw) |
261 | gumr |= UCC_SLOW_GUMR_H_RFW; |
262 | if (us_info->txsy) |
263 | gumr |= UCC_SLOW_GUMR_H_TXSY; |
264 | if (us_info->rtsm) |
265 | gumr |= UCC_SLOW_GUMR_H_RTSM; |
266 | iowrite32be(gumr, &us_regs->gumr_h); |
267 | |
268 | /* gumr_l */ |
269 | gumr = (u32)us_info->tdcr | (u32)us_info->rdcr | (u32)us_info->tenc | |
270 | (u32)us_info->renc | (u32)us_info->diag | (u32)us_info->mode; |
271 | if (us_info->tci) |
272 | gumr |= UCC_SLOW_GUMR_L_TCI; |
273 | if (us_info->rinv) |
274 | gumr |= UCC_SLOW_GUMR_L_RINV; |
275 | if (us_info->tinv) |
276 | gumr |= UCC_SLOW_GUMR_L_TINV; |
277 | if (us_info->tend) |
278 | gumr |= UCC_SLOW_GUMR_L_TEND; |
279 | iowrite32be(gumr, &us_regs->gumr_l); |
280 | |
281 | /* Function code registers */ |
282 | |
283 | /* if the data is in cachable memory, the 'global' */ |
284 | /* in the function code should be set. */ |
285 | iowrite8(UCC_BMR_BO_BE, &uccs->us_pram->tbmr); |
286 | iowrite8(UCC_BMR_BO_BE, &uccs->us_pram->rbmr); |
287 | |
288 | /* rbase, tbase are offsets from MURAM base */ |
289 | iowrite16be(uccs->rx_base_offset, &uccs->us_pram->rbase); |
290 | iowrite16be(uccs->tx_base_offset, &uccs->us_pram->tbase); |
291 | |
292 | /* Mux clocking */ |
293 | /* Grant Support */ |
294 | ucc_set_qe_mux_grant(ucc_num: us_info->ucc_num, set: us_info->grant_support); |
295 | /* Breakpoint Support */ |
296 | ucc_set_qe_mux_bkpt(ucc_num: us_info->ucc_num, set: us_info->brkpt_support); |
297 | /* Set Tsa or NMSI mode. */ |
298 | ucc_set_qe_mux_tsa(ucc_num: us_info->ucc_num, set: us_info->tsa); |
299 | /* If NMSI (not Tsa), set Tx and Rx clock. */ |
300 | if (!us_info->tsa) { |
301 | /* Rx clock routing */ |
302 | if (ucc_set_qe_mux_rxtx(ucc_num: us_info->ucc_num, clock: us_info->rx_clock, |
303 | mode: COMM_DIR_RX)) { |
304 | printk(KERN_ERR "%s: illegal value for RX clock\n" , |
305 | __func__); |
306 | ucc_slow_free(uccs); |
307 | return -EINVAL; |
308 | } |
309 | /* Tx clock routing */ |
310 | if (ucc_set_qe_mux_rxtx(ucc_num: us_info->ucc_num, clock: us_info->tx_clock, |
311 | mode: COMM_DIR_TX)) { |
312 | printk(KERN_ERR "%s: illegal value for TX clock\n" , |
313 | __func__); |
314 | ucc_slow_free(uccs); |
315 | return -EINVAL; |
316 | } |
317 | } |
318 | |
319 | /* Set interrupt mask register at UCC level. */ |
320 | iowrite16be(us_info->uccm_mask, &us_regs->uccm); |
321 | |
322 | /* First, clear anything pending at UCC level, |
323 | * otherwise, old garbage may come through |
324 | * as soon as the dam is opened. */ |
325 | |
326 | /* Writing '1' clears */ |
327 | iowrite16be(0xffff, &us_regs->ucce); |
328 | |
329 | /* Issue QE Init command */ |
330 | if (us_info->init_tx && us_info->init_rx) |
331 | command = QE_INIT_TX_RX; |
332 | else if (us_info->init_tx) |
333 | command = QE_INIT_TX; |
334 | else |
335 | command = QE_INIT_RX; /* We know at least one is TRUE */ |
336 | |
337 | qe_issue_cmd(cmd: command, device: id, mcn_protocol: us_info->protocol, cmd_input: 0); |
338 | |
339 | *uccs_ret = uccs; |
340 | return 0; |
341 | } |
342 | EXPORT_SYMBOL(ucc_slow_init); |
343 | |
344 | void ucc_slow_free(struct ucc_slow_private * uccs) |
345 | { |
346 | if (!uccs) |
347 | return; |
348 | |
349 | qe_muram_free(offset: uccs->rx_base_offset); |
350 | qe_muram_free(offset: uccs->tx_base_offset); |
351 | qe_muram_free(offset: uccs->us_pram_offset); |
352 | |
353 | if (uccs->us_regs) |
354 | iounmap(addr: uccs->us_regs); |
355 | |
356 | kfree(objp: uccs); |
357 | } |
358 | EXPORT_SYMBOL(ucc_slow_free); |
359 | |
360 | |