1 | // SPDX-License-Identifier: GPL-2.0 |
2 | // |
3 | // Register map access API - SPI AVMM support |
4 | // |
5 | // Copyright (C) 2018-2020 Intel Corporation. All rights reserved. |
6 | |
7 | #include <linux/module.h> |
8 | #include <linux/regmap.h> |
9 | #include <linux/spi/spi.h> |
10 | #include <linux/swab.h> |
11 | |
12 | /* |
13 | * This driver implements the regmap operations for a generic SPI |
14 | * master to access the registers of the spi slave chip which has an |
15 | * Avalone bus in it. |
16 | * |
17 | * The "SPI slave to Avalon Master Bridge" (spi-avmm) IP should be integrated |
18 | * in the spi slave chip. The IP acts as a bridge to convert encoded streams of |
19 | * bytes from the host to the internal register read/write on Avalon bus. In |
20 | * order to issue register access requests to the slave chip, the host should |
21 | * send formatted bytes that conform to the transfer protocol. |
22 | * The transfer protocol contains 3 layers: transaction layer, packet layer |
23 | * and physical layer. |
24 | * |
25 | * Reference Documents could be found at: |
26 | * https://www.intel.com/content/www/us/en/programmable/documentation/sfo1400787952932.html |
27 | * |
28 | * Chapter "SPI Slave/JTAG to Avalon Master Bridge Cores" is a general |
29 | * introduction to the protocol. |
30 | * |
31 | * Chapter "Avalon Packets to Transactions Converter Core" describes |
32 | * the transaction layer. |
33 | * |
34 | * Chapter "Avalon-ST Bytes to Packets and Packets to Bytes Converter Cores" |
35 | * describes the packet layer. |
36 | * |
37 | * Chapter "Avalon-ST Serial Peripheral Interface Core" describes the |
38 | * physical layer. |
39 | * |
40 | * |
41 | * When host issues a regmap read/write, the driver will transform the request |
42 | * to byte stream layer by layer. It formats the register addr, value and |
43 | * length to the transaction layer request, then converts the request to packet |
44 | * layer bytes stream and then to physical layer bytes stream. Finally the |
45 | * driver sends the formatted byte stream over SPI bus to the slave chip. |
46 | * |
47 | * The spi-avmm IP on the slave chip decodes the byte stream and initiates |
48 | * register read/write on its internal Avalon bus, and then encodes the |
49 | * response to byte stream and sends back to host. |
50 | * |
51 | * The driver receives the byte stream, reverses the 3 layers transformation, |
52 | * and finally gets the response value (read out data for register read, |
53 | * successful written size for register write). |
54 | */ |
55 | |
56 | #define PKT_SOP 0x7a |
57 | #define PKT_EOP 0x7b |
58 | #define PKT_CHANNEL 0x7c |
59 | #define PKT_ESC 0x7d |
60 | |
61 | #define PHY_IDLE 0x4a |
62 | #define PHY_ESC 0x4d |
63 | |
64 | #define TRANS_CODE_WRITE 0x0 |
65 | #define TRANS_CODE_SEQ_WRITE 0x4 |
66 | #define TRANS_CODE_READ 0x10 |
67 | #define TRANS_CODE_SEQ_READ 0x14 |
68 | #define TRANS_CODE_NO_TRANS 0x7f |
69 | |
70 | #define SPI_AVMM_XFER_TIMEOUT (msecs_to_jiffies(200)) |
71 | |
72 | /* slave's register addr is 32 bits */ |
73 | #define SPI_AVMM_REG_SIZE 4UL |
74 | /* slave's register value is 32 bits */ |
75 | #define SPI_AVMM_VAL_SIZE 4UL |
76 | |
77 | /* |
78 | * max rx size could be larger. But considering the buffer consuming, |
79 | * it is proper that we limit 1KB xfer at max. |
80 | */ |
81 | #define MAX_READ_CNT 256UL |
82 | #define MAX_WRITE_CNT 1UL |
83 | |
84 | struct { |
85 | u8 ; |
86 | u8 ; |
87 | __be16 ; |
88 | __be32 ; |
89 | } __packed; |
90 | |
91 | struct { |
92 | u8 ; |
93 | u8 ; |
94 | __be16 ; |
95 | } __packed; |
96 | |
97 | #define TRANS_REQ_HD_SIZE (sizeof(struct trans_req_header)) |
98 | #define TRANS_RESP_HD_SIZE (sizeof(struct trans_resp_header)) |
99 | |
100 | /* |
101 | * In transaction layer, |
102 | * the write request format is: Transaction request header + data |
103 | * the read request format is: Transaction request header |
104 | * the write response format is: Transaction response header |
105 | * the read response format is: pure data, no Transaction response header |
106 | */ |
107 | #define TRANS_WR_TX_SIZE(n) (TRANS_REQ_HD_SIZE + SPI_AVMM_VAL_SIZE * (n)) |
108 | #define TRANS_RD_TX_SIZE TRANS_REQ_HD_SIZE |
109 | #define TRANS_TX_MAX TRANS_WR_TX_SIZE(MAX_WRITE_CNT) |
110 | |
111 | #define TRANS_RD_RX_SIZE(n) (SPI_AVMM_VAL_SIZE * (n)) |
112 | #define TRANS_WR_RX_SIZE TRANS_RESP_HD_SIZE |
113 | #define TRANS_RX_MAX TRANS_RD_RX_SIZE(MAX_READ_CNT) |
114 | |
115 | /* tx & rx share one transaction layer buffer */ |
116 | #define TRANS_BUF_SIZE ((TRANS_TX_MAX > TRANS_RX_MAX) ? \ |
117 | TRANS_TX_MAX : TRANS_RX_MAX) |
118 | |
119 | /* |
120 | * In tx phase, the host prepares all the phy layer bytes of a request in the |
121 | * phy buffer and sends them in a batch. |
122 | * |
123 | * The packet layer and physical layer defines several special chars for |
124 | * various purpose, when a transaction layer byte hits one of these special |
125 | * chars, it should be escaped. The escape rule is, "Escape char first, |
126 | * following the byte XOR'ed with 0x20". |
127 | * |
128 | * This macro defines the max possible length of the phy data. In the worst |
129 | * case, all transaction layer bytes need to be escaped (so the data length |
130 | * doubles), plus 4 special chars (SOP, CHANNEL, CHANNEL_NUM, EOP). Finally |
131 | * we should make sure the length is aligned to SPI BPW. |
132 | */ |
133 | #define PHY_TX_MAX ALIGN(2 * TRANS_TX_MAX + 4, 4) |
134 | |
135 | /* |
136 | * Unlike tx, phy rx is affected by possible PHY_IDLE bytes from slave, the max |
137 | * length of the rx bit stream is unpredictable. So the driver reads the words |
138 | * one by one, and parses each word immediately into transaction layer buffer. |
139 | * Only one word length of phy buffer is used for rx. |
140 | */ |
141 | #define PHY_BUF_SIZE PHY_TX_MAX |
142 | |
143 | /** |
144 | * struct spi_avmm_bridge - SPI slave to AVMM bus master bridge |
145 | * |
146 | * @spi: spi slave associated with this bridge. |
147 | * @word_len: bytes of word for spi transfer. |
148 | * @trans_len: length of valid data in trans_buf. |
149 | * @phy_len: length of valid data in phy_buf. |
150 | * @trans_buf: the bridge buffer for transaction layer data. |
151 | * @phy_buf: the bridge buffer for physical layer data. |
152 | * @swap_words: the word swapping cb for phy data. NULL if not needed. |
153 | * |
154 | * As a device's registers are implemented on the AVMM bus address space, it |
155 | * requires the driver to issue formatted requests to spi slave to AVMM bus |
156 | * master bridge to perform register access. |
157 | */ |
158 | struct spi_avmm_bridge { |
159 | struct spi_device *spi; |
160 | unsigned char word_len; |
161 | unsigned int trans_len; |
162 | unsigned int phy_len; |
163 | /* bridge buffer used in translation between protocol layers */ |
164 | char trans_buf[TRANS_BUF_SIZE]; |
165 | char phy_buf[PHY_BUF_SIZE]; |
166 | void (*swap_words)(void *buf, unsigned int len); |
167 | }; |
168 | |
169 | static void br_swap_words_32(void *buf, unsigned int len) |
170 | { |
171 | swab32_array(buf, words: len / 4); |
172 | } |
173 | |
174 | /* |
175 | * Format transaction layer data in br->trans_buf according to the register |
176 | * access request, Store valid transaction layer data length in br->trans_len. |
177 | */ |
178 | static int br_trans_tx_prepare(struct spi_avmm_bridge *br, bool is_read, u32 reg, |
179 | u32 *wr_val, u32 count) |
180 | { |
181 | struct trans_req_header *; |
182 | unsigned int trans_len; |
183 | u8 code; |
184 | __le32 *data; |
185 | int i; |
186 | |
187 | if (is_read) { |
188 | if (count == 1) |
189 | code = TRANS_CODE_READ; |
190 | else |
191 | code = TRANS_CODE_SEQ_READ; |
192 | } else { |
193 | if (count == 1) |
194 | code = TRANS_CODE_WRITE; |
195 | else |
196 | code = TRANS_CODE_SEQ_WRITE; |
197 | } |
198 | |
199 | header = (struct trans_req_header *)br->trans_buf; |
200 | header->code = code; |
201 | header->rsvd = 0; |
202 | header->size = cpu_to_be16((u16)count * SPI_AVMM_VAL_SIZE); |
203 | header->addr = cpu_to_be32(reg); |
204 | |
205 | trans_len = TRANS_REQ_HD_SIZE; |
206 | |
207 | if (!is_read) { |
208 | trans_len += SPI_AVMM_VAL_SIZE * count; |
209 | if (trans_len > sizeof(br->trans_buf)) |
210 | return -ENOMEM; |
211 | |
212 | data = (__le32 *)(br->trans_buf + TRANS_REQ_HD_SIZE); |
213 | |
214 | for (i = 0; i < count; i++) |
215 | *data++ = cpu_to_le32(*wr_val++); |
216 | } |
217 | |
218 | /* Store valid trans data length for next layer */ |
219 | br->trans_len = trans_len; |
220 | |
221 | return 0; |
222 | } |
223 | |
224 | /* |
225 | * Convert transaction layer data (in br->trans_buf) to phy layer data, store |
226 | * them in br->phy_buf. Pad the phy_buf aligned with SPI's BPW. Store valid phy |
227 | * layer data length in br->phy_len. |
228 | * |
229 | * phy_buf len should be aligned with SPI's BPW. Spare bytes should be padded |
230 | * with PHY_IDLE, then the slave will just drop them. |
231 | * |
232 | * The driver will not simply pad 4a at the tail. The concern is that driver |
233 | * will not store MISO data during tx phase, if the driver pads 4a at the tail, |
234 | * it is possible that if the slave is fast enough to response at the padding |
235 | * time. As a result these rx bytes are lost. In the following case, 7a,7c,00 |
236 | * will lost. |
237 | * MOSI ...|7a|7c|00|10| |00|00|04|02| |4b|7d|5a|7b| |40|4a|4a|4a| |XX|XX|... |
238 | * MISO ...|4a|4a|4a|4a| |4a|4a|4a|4a| |4a|4a|4a|4a| |4a|7a|7c|00| |78|56|... |
239 | * |
240 | * So the driver moves EOP and bytes after EOP to the end of the aligned size, |
241 | * then fill the hole with PHY_IDLE. As following: |
242 | * before pad ...|7a|7c|00|10| |00|00|04|02| |4b|7d|5a|7b| |40| |
243 | * after pad ...|7a|7c|00|10| |00|00|04|02| |4b|7d|5a|4a| |4a|4a|7b|40| |
244 | * Then if the slave will not get the entire packet before the tx phase is |
245 | * over, it can't responsed to anything either. |
246 | */ |
247 | static int br_pkt_phy_tx_prepare(struct spi_avmm_bridge *br) |
248 | { |
249 | char *tb, *tb_end, *pb, *pb_limit, *pb_eop = NULL; |
250 | unsigned int aligned_phy_len, move_size; |
251 | bool need_esc = false; |
252 | |
253 | tb = br->trans_buf; |
254 | tb_end = tb + br->trans_len; |
255 | pb = br->phy_buf; |
256 | pb_limit = pb + ARRAY_SIZE(br->phy_buf); |
257 | |
258 | *pb++ = PKT_SOP; |
259 | |
260 | /* |
261 | * The driver doesn't support multiple channels so the channel number |
262 | * is always 0. |
263 | */ |
264 | *pb++ = PKT_CHANNEL; |
265 | *pb++ = 0x0; |
266 | |
267 | for (; pb < pb_limit && tb < tb_end; pb++) { |
268 | if (need_esc) { |
269 | *pb = *tb++ ^ 0x20; |
270 | need_esc = false; |
271 | continue; |
272 | } |
273 | |
274 | /* EOP should be inserted before the last valid char */ |
275 | if (tb == tb_end - 1 && !pb_eop) { |
276 | *pb = PKT_EOP; |
277 | pb_eop = pb; |
278 | continue; |
279 | } |
280 | |
281 | /* |
282 | * insert an ESCAPE char if the data value equals any special |
283 | * char. |
284 | */ |
285 | switch (*tb) { |
286 | case PKT_SOP: |
287 | case PKT_EOP: |
288 | case PKT_CHANNEL: |
289 | case PKT_ESC: |
290 | *pb = PKT_ESC; |
291 | need_esc = true; |
292 | break; |
293 | case PHY_IDLE: |
294 | case PHY_ESC: |
295 | *pb = PHY_ESC; |
296 | need_esc = true; |
297 | break; |
298 | default: |
299 | *pb = *tb++; |
300 | break; |
301 | } |
302 | } |
303 | |
304 | /* The phy buffer is used out but transaction layer data remains */ |
305 | if (tb < tb_end) |
306 | return -ENOMEM; |
307 | |
308 | /* Store valid phy data length for spi transfer */ |
309 | br->phy_len = pb - br->phy_buf; |
310 | |
311 | if (br->word_len == 1) |
312 | return 0; |
313 | |
314 | /* Do phy buf padding if word_len > 1 byte. */ |
315 | aligned_phy_len = ALIGN(br->phy_len, br->word_len); |
316 | if (aligned_phy_len > sizeof(br->phy_buf)) |
317 | return -ENOMEM; |
318 | |
319 | if (aligned_phy_len == br->phy_len) |
320 | return 0; |
321 | |
322 | /* move EOP and bytes after EOP to the end of aligned size */ |
323 | move_size = pb - pb_eop; |
324 | memmove(&br->phy_buf[aligned_phy_len - move_size], pb_eop, move_size); |
325 | |
326 | /* fill the hole with PHY_IDLEs */ |
327 | memset(pb_eop, PHY_IDLE, aligned_phy_len - br->phy_len); |
328 | |
329 | /* update the phy data length */ |
330 | br->phy_len = aligned_phy_len; |
331 | |
332 | return 0; |
333 | } |
334 | |
335 | /* |
336 | * In tx phase, the slave only returns PHY_IDLE (0x4a). So the driver will |
337 | * ignore rx in tx phase. |
338 | */ |
339 | static int br_do_tx(struct spi_avmm_bridge *br) |
340 | { |
341 | /* reorder words for spi transfer */ |
342 | if (br->swap_words) |
343 | br->swap_words(br->phy_buf, br->phy_len); |
344 | |
345 | /* send all data in phy_buf */ |
346 | return spi_write(spi: br->spi, buf: br->phy_buf, len: br->phy_len); |
347 | } |
348 | |
349 | /* |
350 | * This function read the rx byte stream from SPI word by word and convert |
351 | * them to transaction layer data in br->trans_buf. It also stores the length |
352 | * of rx transaction layer data in br->trans_len |
353 | * |
354 | * The slave may send an unknown number of PHY_IDLEs in rx phase, so we cannot |
355 | * prepare a fixed length buffer to receive all of the rx data in a batch. We |
356 | * have to read word by word and convert them to transaction layer data at |
357 | * once. |
358 | */ |
359 | static int br_do_rx_and_pkt_phy_parse(struct spi_avmm_bridge *br) |
360 | { |
361 | bool eop_found = false, channel_found = false, esc_found = false; |
362 | bool valid_word = false, last_try = false; |
363 | struct device *dev = &br->spi->dev; |
364 | char *pb, *tb_limit, *tb = NULL; |
365 | unsigned long poll_timeout; |
366 | int ret, i; |
367 | |
368 | tb_limit = br->trans_buf + ARRAY_SIZE(br->trans_buf); |
369 | pb = br->phy_buf; |
370 | poll_timeout = jiffies + SPI_AVMM_XFER_TIMEOUT; |
371 | while (tb < tb_limit) { |
372 | ret = spi_read(spi: br->spi, buf: pb, len: br->word_len); |
373 | if (ret) |
374 | return ret; |
375 | |
376 | /* reorder the word back */ |
377 | if (br->swap_words) |
378 | br->swap_words(pb, br->word_len); |
379 | |
380 | valid_word = false; |
381 | for (i = 0; i < br->word_len; i++) { |
382 | /* drop everything before first SOP */ |
383 | if (!tb && pb[i] != PKT_SOP) |
384 | continue; |
385 | |
386 | /* drop PHY_IDLE */ |
387 | if (pb[i] == PHY_IDLE) |
388 | continue; |
389 | |
390 | valid_word = true; |
391 | |
392 | /* |
393 | * We don't support multiple channels, so error out if |
394 | * a non-zero channel number is found. |
395 | */ |
396 | if (channel_found) { |
397 | if (pb[i] != 0) { |
398 | dev_err(dev, "%s channel num != 0\n" , |
399 | __func__); |
400 | return -EFAULT; |
401 | } |
402 | |
403 | channel_found = false; |
404 | continue; |
405 | } |
406 | |
407 | switch (pb[i]) { |
408 | case PKT_SOP: |
409 | /* |
410 | * reset the parsing if a second SOP appears. |
411 | */ |
412 | tb = br->trans_buf; |
413 | eop_found = false; |
414 | channel_found = false; |
415 | esc_found = false; |
416 | break; |
417 | case PKT_EOP: |
418 | /* |
419 | * No special char is expected after ESC char. |
420 | * No special char (except ESC & PHY_IDLE) is |
421 | * expected after EOP char. |
422 | * |
423 | * The special chars are all dropped. |
424 | */ |
425 | if (esc_found || eop_found) |
426 | return -EFAULT; |
427 | |
428 | eop_found = true; |
429 | break; |
430 | case PKT_CHANNEL: |
431 | if (esc_found || eop_found) |
432 | return -EFAULT; |
433 | |
434 | channel_found = true; |
435 | break; |
436 | case PKT_ESC: |
437 | case PHY_ESC: |
438 | if (esc_found) |
439 | return -EFAULT; |
440 | |
441 | esc_found = true; |
442 | break; |
443 | default: |
444 | /* Record the normal byte in trans_buf. */ |
445 | if (esc_found) { |
446 | *tb++ = pb[i] ^ 0x20; |
447 | esc_found = false; |
448 | } else { |
449 | *tb++ = pb[i]; |
450 | } |
451 | |
452 | /* |
453 | * We get the last normal byte after EOP, it is |
454 | * time we finish. Normally the function should |
455 | * return here. |
456 | */ |
457 | if (eop_found) { |
458 | br->trans_len = tb - br->trans_buf; |
459 | return 0; |
460 | } |
461 | } |
462 | } |
463 | |
464 | if (valid_word) { |
465 | /* update poll timeout when we get valid word */ |
466 | poll_timeout = jiffies + SPI_AVMM_XFER_TIMEOUT; |
467 | last_try = false; |
468 | } else { |
469 | /* |
470 | * We timeout when rx keeps invalid for some time. But |
471 | * it is possible we are scheduled out for long time |
472 | * after a spi_read. So when we are scheduled in, a SW |
473 | * timeout happens. But actually HW may have worked fine and |
474 | * has been ready long time ago. So we need to do an extra |
475 | * read, if we get a valid word then we could continue rx, |
476 | * otherwise real a HW issue happens. |
477 | */ |
478 | if (last_try) |
479 | return -ETIMEDOUT; |
480 | |
481 | if (time_after(jiffies, poll_timeout)) |
482 | last_try = true; |
483 | } |
484 | } |
485 | |
486 | /* |
487 | * We have used out all transfer layer buffer but cannot find the end |
488 | * of the byte stream. |
489 | */ |
490 | dev_err(dev, "%s transfer buffer is full but rx doesn't end\n" , |
491 | __func__); |
492 | |
493 | return -EFAULT; |
494 | } |
495 | |
496 | /* |
497 | * For read transactions, the avmm bus will directly return register values |
498 | * without transaction response header. |
499 | */ |
500 | static int br_rd_trans_rx_parse(struct spi_avmm_bridge *br, |
501 | u32 *val, unsigned int expected_count) |
502 | { |
503 | unsigned int i, trans_len = br->trans_len; |
504 | __le32 *data; |
505 | |
506 | if (expected_count * SPI_AVMM_VAL_SIZE != trans_len) |
507 | return -EFAULT; |
508 | |
509 | data = (__le32 *)br->trans_buf; |
510 | for (i = 0; i < expected_count; i++) |
511 | *val++ = le32_to_cpu(*data++); |
512 | |
513 | return 0; |
514 | } |
515 | |
516 | /* |
517 | * For write transactions, the slave will return a transaction response |
518 | * header. |
519 | */ |
520 | static int br_wr_trans_rx_parse(struct spi_avmm_bridge *br, |
521 | unsigned int expected_count) |
522 | { |
523 | unsigned int trans_len = br->trans_len; |
524 | struct trans_resp_header *resp; |
525 | u8 code; |
526 | u16 val_len; |
527 | |
528 | if (trans_len != TRANS_RESP_HD_SIZE) |
529 | return -EFAULT; |
530 | |
531 | resp = (struct trans_resp_header *)br->trans_buf; |
532 | |
533 | code = resp->r_code ^ 0x80; |
534 | val_len = be16_to_cpu(resp->size); |
535 | if (!val_len || val_len != expected_count * SPI_AVMM_VAL_SIZE) |
536 | return -EFAULT; |
537 | |
538 | /* error out if the trans code doesn't align with the val size */ |
539 | if ((val_len == SPI_AVMM_VAL_SIZE && code != TRANS_CODE_WRITE) || |
540 | (val_len > SPI_AVMM_VAL_SIZE && code != TRANS_CODE_SEQ_WRITE)) |
541 | return -EFAULT; |
542 | |
543 | return 0; |
544 | } |
545 | |
546 | static int do_reg_access(void *context, bool is_read, unsigned int reg, |
547 | unsigned int *value, unsigned int count) |
548 | { |
549 | struct spi_avmm_bridge *br = context; |
550 | int ret; |
551 | |
552 | /* invalidate bridge buffers first */ |
553 | br->trans_len = 0; |
554 | br->phy_len = 0; |
555 | |
556 | ret = br_trans_tx_prepare(br, is_read, reg, wr_val: value, count); |
557 | if (ret) |
558 | return ret; |
559 | |
560 | ret = br_pkt_phy_tx_prepare(br); |
561 | if (ret) |
562 | return ret; |
563 | |
564 | ret = br_do_tx(br); |
565 | if (ret) |
566 | return ret; |
567 | |
568 | ret = br_do_rx_and_pkt_phy_parse(br); |
569 | if (ret) |
570 | return ret; |
571 | |
572 | if (is_read) |
573 | return br_rd_trans_rx_parse(br, val: value, expected_count: count); |
574 | else |
575 | return br_wr_trans_rx_parse(br, expected_count: count); |
576 | } |
577 | |
578 | static int regmap_spi_avmm_gather_write(void *context, |
579 | const void *reg_buf, size_t reg_len, |
580 | const void *val_buf, size_t val_len) |
581 | { |
582 | if (reg_len != SPI_AVMM_REG_SIZE) |
583 | return -EINVAL; |
584 | |
585 | if (!IS_ALIGNED(val_len, SPI_AVMM_VAL_SIZE)) |
586 | return -EINVAL; |
587 | |
588 | return do_reg_access(context, is_read: false, reg: *(u32 *)reg_buf, value: (u32 *)val_buf, |
589 | count: val_len / SPI_AVMM_VAL_SIZE); |
590 | } |
591 | |
592 | static int regmap_spi_avmm_write(void *context, const void *data, size_t bytes) |
593 | { |
594 | if (bytes < SPI_AVMM_REG_SIZE + SPI_AVMM_VAL_SIZE) |
595 | return -EINVAL; |
596 | |
597 | return regmap_spi_avmm_gather_write(context, reg_buf: data, SPI_AVMM_REG_SIZE, |
598 | val_buf: data + SPI_AVMM_REG_SIZE, |
599 | val_len: bytes - SPI_AVMM_REG_SIZE); |
600 | } |
601 | |
602 | static int regmap_spi_avmm_read(void *context, |
603 | const void *reg_buf, size_t reg_len, |
604 | void *val_buf, size_t val_len) |
605 | { |
606 | if (reg_len != SPI_AVMM_REG_SIZE) |
607 | return -EINVAL; |
608 | |
609 | if (!IS_ALIGNED(val_len, SPI_AVMM_VAL_SIZE)) |
610 | return -EINVAL; |
611 | |
612 | return do_reg_access(context, is_read: true, reg: *(u32 *)reg_buf, value: val_buf, |
613 | count: (val_len / SPI_AVMM_VAL_SIZE)); |
614 | } |
615 | |
616 | static struct spi_avmm_bridge * |
617 | spi_avmm_bridge_ctx_gen(struct spi_device *spi) |
618 | { |
619 | struct spi_avmm_bridge *br; |
620 | |
621 | if (!spi) |
622 | return ERR_PTR(error: -ENODEV); |
623 | |
624 | /* Only support BPW == 8 or 32 now. Try 32 BPW first. */ |
625 | spi->mode = SPI_MODE_1; |
626 | spi->bits_per_word = 32; |
627 | if (spi_setup(spi)) { |
628 | spi->bits_per_word = 8; |
629 | if (spi_setup(spi)) |
630 | return ERR_PTR(error: -EINVAL); |
631 | } |
632 | |
633 | br = kzalloc(size: sizeof(*br), GFP_KERNEL); |
634 | if (!br) |
635 | return ERR_PTR(error: -ENOMEM); |
636 | |
637 | br->spi = spi; |
638 | br->word_len = spi->bits_per_word / 8; |
639 | if (br->word_len == 4) { |
640 | /* |
641 | * The protocol requires little endian byte order but MSB |
642 | * first. So driver needs to swap the byte order word by word |
643 | * if word length > 1. |
644 | */ |
645 | br->swap_words = br_swap_words_32; |
646 | } |
647 | |
648 | return br; |
649 | } |
650 | |
651 | static void spi_avmm_bridge_ctx_free(void *context) |
652 | { |
653 | kfree(objp: context); |
654 | } |
655 | |
656 | static const struct regmap_bus regmap_spi_avmm_bus = { |
657 | .write = regmap_spi_avmm_write, |
658 | .gather_write = regmap_spi_avmm_gather_write, |
659 | .read = regmap_spi_avmm_read, |
660 | .reg_format_endian_default = REGMAP_ENDIAN_NATIVE, |
661 | .val_format_endian_default = REGMAP_ENDIAN_NATIVE, |
662 | .max_raw_read = SPI_AVMM_VAL_SIZE * MAX_READ_CNT, |
663 | .max_raw_write = SPI_AVMM_VAL_SIZE * MAX_WRITE_CNT, |
664 | .free_context = spi_avmm_bridge_ctx_free, |
665 | }; |
666 | |
667 | struct regmap *__regmap_init_spi_avmm(struct spi_device *spi, |
668 | const struct regmap_config *config, |
669 | struct lock_class_key *lock_key, |
670 | const char *lock_name) |
671 | { |
672 | struct spi_avmm_bridge *bridge; |
673 | struct regmap *map; |
674 | |
675 | bridge = spi_avmm_bridge_ctx_gen(spi); |
676 | if (IS_ERR(ptr: bridge)) |
677 | return ERR_CAST(ptr: bridge); |
678 | |
679 | map = __regmap_init(dev: &spi->dev, bus: ®map_spi_avmm_bus, |
680 | bus_context: bridge, config, lock_key, lock_name); |
681 | if (IS_ERR(ptr: map)) { |
682 | spi_avmm_bridge_ctx_free(context: bridge); |
683 | return ERR_CAST(ptr: map); |
684 | } |
685 | |
686 | return map; |
687 | } |
688 | EXPORT_SYMBOL_GPL(__regmap_init_spi_avmm); |
689 | |
690 | struct regmap *__devm_regmap_init_spi_avmm(struct spi_device *spi, |
691 | const struct regmap_config *config, |
692 | struct lock_class_key *lock_key, |
693 | const char *lock_name) |
694 | { |
695 | struct spi_avmm_bridge *bridge; |
696 | struct regmap *map; |
697 | |
698 | bridge = spi_avmm_bridge_ctx_gen(spi); |
699 | if (IS_ERR(ptr: bridge)) |
700 | return ERR_CAST(ptr: bridge); |
701 | |
702 | map = __devm_regmap_init(dev: &spi->dev, bus: ®map_spi_avmm_bus, |
703 | bus_context: bridge, config, lock_key, lock_name); |
704 | if (IS_ERR(ptr: map)) { |
705 | spi_avmm_bridge_ctx_free(context: bridge); |
706 | return ERR_CAST(ptr: map); |
707 | } |
708 | |
709 | return map; |
710 | } |
711 | EXPORT_SYMBOL_GPL(__devm_regmap_init_spi_avmm); |
712 | |
713 | MODULE_LICENSE("GPL v2" ); |
714 | |