1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | // SPI init/core code |
3 | // |
4 | // Copyright (C) 2005 David Brownell |
5 | // Copyright (C) 2008 Secret Lab Technologies Ltd. |
6 | |
7 | #include <linux/acpi.h> |
8 | #include <linux/cache.h> |
9 | #include <linux/clk/clk-conf.h> |
10 | #include <linux/delay.h> |
11 | #include <linux/device.h> |
12 | #include <linux/dmaengine.h> |
13 | #include <linux/dma-mapping.h> |
14 | #include <linux/export.h> |
15 | #include <linux/gpio/consumer.h> |
16 | #include <linux/highmem.h> |
17 | #include <linux/idr.h> |
18 | #include <linux/init.h> |
19 | #include <linux/ioport.h> |
20 | #include <linux/kernel.h> |
21 | #include <linux/kthread.h> |
22 | #include <linux/mod_devicetable.h> |
23 | #include <linux/mutex.h> |
24 | #include <linux/of_device.h> |
25 | #include <linux/of_irq.h> |
26 | #include <linux/percpu.h> |
27 | #include <linux/platform_data/x86/apple.h> |
28 | #include <linux/pm_domain.h> |
29 | #include <linux/pm_runtime.h> |
30 | #include <linux/property.h> |
31 | #include <linux/ptp_clock_kernel.h> |
32 | #include <linux/sched/rt.h> |
33 | #include <linux/slab.h> |
34 | #include <linux/spi/spi.h> |
35 | #include <linux/spi/spi-mem.h> |
36 | #include <uapi/linux/sched/types.h> |
37 | |
38 | #define CREATE_TRACE_POINTS |
39 | #include <trace/events/spi.h> |
40 | EXPORT_TRACEPOINT_SYMBOL(spi_transfer_start); |
41 | EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop); |
42 | |
43 | #include "internals.h" |
44 | |
45 | static DEFINE_IDR(spi_master_idr); |
46 | |
47 | static void spidev_release(struct device *dev) |
48 | { |
49 | struct spi_device *spi = to_spi_device(dev); |
50 | |
51 | spi_controller_put(ctlr: spi->controller); |
52 | kfree(objp: spi->driver_override); |
53 | free_percpu(pdata: spi->pcpu_statistics); |
54 | kfree(objp: spi); |
55 | } |
56 | |
57 | static ssize_t |
58 | modalias_show(struct device *dev, struct device_attribute *a, char *buf) |
59 | { |
60 | const struct spi_device *spi = to_spi_device(dev); |
61 | int len; |
62 | |
63 | len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1); |
64 | if (len != -ENODEV) |
65 | return len; |
66 | |
67 | return sysfs_emit(buf, fmt: "%s%s\n" , SPI_MODULE_PREFIX, spi->modalias); |
68 | } |
69 | static DEVICE_ATTR_RO(modalias); |
70 | |
71 | static ssize_t driver_override_store(struct device *dev, |
72 | struct device_attribute *a, |
73 | const char *buf, size_t count) |
74 | { |
75 | struct spi_device *spi = to_spi_device(dev); |
76 | int ret; |
77 | |
78 | ret = driver_set_override(dev, override: &spi->driver_override, s: buf, len: count); |
79 | if (ret) |
80 | return ret; |
81 | |
82 | return count; |
83 | } |
84 | |
85 | static ssize_t driver_override_show(struct device *dev, |
86 | struct device_attribute *a, char *buf) |
87 | { |
88 | const struct spi_device *spi = to_spi_device(dev); |
89 | ssize_t len; |
90 | |
91 | device_lock(dev); |
92 | len = sysfs_emit(buf, fmt: "%s\n" , spi->driver_override ? : "" ); |
93 | device_unlock(dev); |
94 | return len; |
95 | } |
96 | static DEVICE_ATTR_RW(driver_override); |
97 | |
98 | static struct spi_statistics __percpu *spi_alloc_pcpu_stats(struct device *dev) |
99 | { |
100 | struct spi_statistics __percpu *pcpu_stats; |
101 | |
102 | if (dev) |
103 | pcpu_stats = devm_alloc_percpu(dev, struct spi_statistics); |
104 | else |
105 | pcpu_stats = alloc_percpu_gfp(struct spi_statistics, GFP_KERNEL); |
106 | |
107 | if (pcpu_stats) { |
108 | int cpu; |
109 | |
110 | for_each_possible_cpu(cpu) { |
111 | struct spi_statistics *stat; |
112 | |
113 | stat = per_cpu_ptr(pcpu_stats, cpu); |
114 | u64_stats_init(syncp: &stat->syncp); |
115 | } |
116 | } |
117 | return pcpu_stats; |
118 | } |
119 | |
120 | static ssize_t spi_emit_pcpu_stats(struct spi_statistics __percpu *stat, |
121 | char *buf, size_t offset) |
122 | { |
123 | u64 val = 0; |
124 | int i; |
125 | |
126 | for_each_possible_cpu(i) { |
127 | const struct spi_statistics *pcpu_stats; |
128 | u64_stats_t *field; |
129 | unsigned int start; |
130 | u64 inc; |
131 | |
132 | pcpu_stats = per_cpu_ptr(stat, i); |
133 | field = (void *)pcpu_stats + offset; |
134 | do { |
135 | start = u64_stats_fetch_begin(syncp: &pcpu_stats->syncp); |
136 | inc = u64_stats_read(p: field); |
137 | } while (u64_stats_fetch_retry(syncp: &pcpu_stats->syncp, start)); |
138 | val += inc; |
139 | } |
140 | return sysfs_emit(buf, fmt: "%llu\n" , val); |
141 | } |
142 | |
143 | #define SPI_STATISTICS_ATTRS(field, file) \ |
144 | static ssize_t spi_controller_##field##_show(struct device *dev, \ |
145 | struct device_attribute *attr, \ |
146 | char *buf) \ |
147 | { \ |
148 | struct spi_controller *ctlr = container_of(dev, \ |
149 | struct spi_controller, dev); \ |
150 | return spi_statistics_##field##_show(ctlr->pcpu_statistics, buf); \ |
151 | } \ |
152 | static struct device_attribute dev_attr_spi_controller_##field = { \ |
153 | .attr = { .name = file, .mode = 0444 }, \ |
154 | .show = spi_controller_##field##_show, \ |
155 | }; \ |
156 | static ssize_t spi_device_##field##_show(struct device *dev, \ |
157 | struct device_attribute *attr, \ |
158 | char *buf) \ |
159 | { \ |
160 | struct spi_device *spi = to_spi_device(dev); \ |
161 | return spi_statistics_##field##_show(spi->pcpu_statistics, buf); \ |
162 | } \ |
163 | static struct device_attribute dev_attr_spi_device_##field = { \ |
164 | .attr = { .name = file, .mode = 0444 }, \ |
165 | .show = spi_device_##field##_show, \ |
166 | } |
167 | |
168 | #define SPI_STATISTICS_SHOW_NAME(name, file, field) \ |
169 | static ssize_t spi_statistics_##name##_show(struct spi_statistics __percpu *stat, \ |
170 | char *buf) \ |
171 | { \ |
172 | return spi_emit_pcpu_stats(stat, buf, \ |
173 | offsetof(struct spi_statistics, field)); \ |
174 | } \ |
175 | SPI_STATISTICS_ATTRS(name, file) |
176 | |
177 | #define SPI_STATISTICS_SHOW(field) \ |
178 | SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \ |
179 | field) |
180 | |
181 | SPI_STATISTICS_SHOW(messages); |
182 | SPI_STATISTICS_SHOW(transfers); |
183 | SPI_STATISTICS_SHOW(errors); |
184 | SPI_STATISTICS_SHOW(timedout); |
185 | |
186 | SPI_STATISTICS_SHOW(spi_sync); |
187 | SPI_STATISTICS_SHOW(spi_sync_immediate); |
188 | SPI_STATISTICS_SHOW(spi_async); |
189 | |
190 | SPI_STATISTICS_SHOW(bytes); |
191 | SPI_STATISTICS_SHOW(bytes_rx); |
192 | SPI_STATISTICS_SHOW(bytes_tx); |
193 | |
194 | #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \ |
195 | SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \ |
196 | "transfer_bytes_histo_" number, \ |
197 | transfer_bytes_histo[index]) |
198 | SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1" ); |
199 | SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3" ); |
200 | SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7" ); |
201 | SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15" ); |
202 | SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31" ); |
203 | SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63" ); |
204 | SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127" ); |
205 | SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255" ); |
206 | SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511" ); |
207 | SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023" ); |
208 | SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047" ); |
209 | SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095" ); |
210 | SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191" ); |
211 | SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383" ); |
212 | SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767" ); |
213 | SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535" ); |
214 | SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+" ); |
215 | |
216 | SPI_STATISTICS_SHOW(transfers_split_maxsize); |
217 | |
218 | static struct attribute *spi_dev_attrs[] = { |
219 | &dev_attr_modalias.attr, |
220 | &dev_attr_driver_override.attr, |
221 | NULL, |
222 | }; |
223 | |
224 | static const struct attribute_group spi_dev_group = { |
225 | .attrs = spi_dev_attrs, |
226 | }; |
227 | |
228 | static struct attribute *spi_device_statistics_attrs[] = { |
229 | &dev_attr_spi_device_messages.attr, |
230 | &dev_attr_spi_device_transfers.attr, |
231 | &dev_attr_spi_device_errors.attr, |
232 | &dev_attr_spi_device_timedout.attr, |
233 | &dev_attr_spi_device_spi_sync.attr, |
234 | &dev_attr_spi_device_spi_sync_immediate.attr, |
235 | &dev_attr_spi_device_spi_async.attr, |
236 | &dev_attr_spi_device_bytes.attr, |
237 | &dev_attr_spi_device_bytes_rx.attr, |
238 | &dev_attr_spi_device_bytes_tx.attr, |
239 | &dev_attr_spi_device_transfer_bytes_histo0.attr, |
240 | &dev_attr_spi_device_transfer_bytes_histo1.attr, |
241 | &dev_attr_spi_device_transfer_bytes_histo2.attr, |
242 | &dev_attr_spi_device_transfer_bytes_histo3.attr, |
243 | &dev_attr_spi_device_transfer_bytes_histo4.attr, |
244 | &dev_attr_spi_device_transfer_bytes_histo5.attr, |
245 | &dev_attr_spi_device_transfer_bytes_histo6.attr, |
246 | &dev_attr_spi_device_transfer_bytes_histo7.attr, |
247 | &dev_attr_spi_device_transfer_bytes_histo8.attr, |
248 | &dev_attr_spi_device_transfer_bytes_histo9.attr, |
249 | &dev_attr_spi_device_transfer_bytes_histo10.attr, |
250 | &dev_attr_spi_device_transfer_bytes_histo11.attr, |
251 | &dev_attr_spi_device_transfer_bytes_histo12.attr, |
252 | &dev_attr_spi_device_transfer_bytes_histo13.attr, |
253 | &dev_attr_spi_device_transfer_bytes_histo14.attr, |
254 | &dev_attr_spi_device_transfer_bytes_histo15.attr, |
255 | &dev_attr_spi_device_transfer_bytes_histo16.attr, |
256 | &dev_attr_spi_device_transfers_split_maxsize.attr, |
257 | NULL, |
258 | }; |
259 | |
260 | static const struct attribute_group spi_device_statistics_group = { |
261 | .name = "statistics" , |
262 | .attrs = spi_device_statistics_attrs, |
263 | }; |
264 | |
265 | static const struct attribute_group *spi_dev_groups[] = { |
266 | &spi_dev_group, |
267 | &spi_device_statistics_group, |
268 | NULL, |
269 | }; |
270 | |
271 | static struct attribute *spi_controller_statistics_attrs[] = { |
272 | &dev_attr_spi_controller_messages.attr, |
273 | &dev_attr_spi_controller_transfers.attr, |
274 | &dev_attr_spi_controller_errors.attr, |
275 | &dev_attr_spi_controller_timedout.attr, |
276 | &dev_attr_spi_controller_spi_sync.attr, |
277 | &dev_attr_spi_controller_spi_sync_immediate.attr, |
278 | &dev_attr_spi_controller_spi_async.attr, |
279 | &dev_attr_spi_controller_bytes.attr, |
280 | &dev_attr_spi_controller_bytes_rx.attr, |
281 | &dev_attr_spi_controller_bytes_tx.attr, |
282 | &dev_attr_spi_controller_transfer_bytes_histo0.attr, |
283 | &dev_attr_spi_controller_transfer_bytes_histo1.attr, |
284 | &dev_attr_spi_controller_transfer_bytes_histo2.attr, |
285 | &dev_attr_spi_controller_transfer_bytes_histo3.attr, |
286 | &dev_attr_spi_controller_transfer_bytes_histo4.attr, |
287 | &dev_attr_spi_controller_transfer_bytes_histo5.attr, |
288 | &dev_attr_spi_controller_transfer_bytes_histo6.attr, |
289 | &dev_attr_spi_controller_transfer_bytes_histo7.attr, |
290 | &dev_attr_spi_controller_transfer_bytes_histo8.attr, |
291 | &dev_attr_spi_controller_transfer_bytes_histo9.attr, |
292 | &dev_attr_spi_controller_transfer_bytes_histo10.attr, |
293 | &dev_attr_spi_controller_transfer_bytes_histo11.attr, |
294 | &dev_attr_spi_controller_transfer_bytes_histo12.attr, |
295 | &dev_attr_spi_controller_transfer_bytes_histo13.attr, |
296 | &dev_attr_spi_controller_transfer_bytes_histo14.attr, |
297 | &dev_attr_spi_controller_transfer_bytes_histo15.attr, |
298 | &dev_attr_spi_controller_transfer_bytes_histo16.attr, |
299 | &dev_attr_spi_controller_transfers_split_maxsize.attr, |
300 | NULL, |
301 | }; |
302 | |
303 | static const struct attribute_group spi_controller_statistics_group = { |
304 | .name = "statistics" , |
305 | .attrs = spi_controller_statistics_attrs, |
306 | }; |
307 | |
308 | static const struct attribute_group *spi_master_groups[] = { |
309 | &spi_controller_statistics_group, |
310 | NULL, |
311 | }; |
312 | |
313 | static void spi_statistics_add_transfer_stats(struct spi_statistics __percpu *pcpu_stats, |
314 | struct spi_transfer *xfer, |
315 | struct spi_controller *ctlr) |
316 | { |
317 | int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1; |
318 | struct spi_statistics *stats; |
319 | |
320 | if (l2len < 0) |
321 | l2len = 0; |
322 | |
323 | get_cpu(); |
324 | stats = this_cpu_ptr(pcpu_stats); |
325 | u64_stats_update_begin(syncp: &stats->syncp); |
326 | |
327 | u64_stats_inc(p: &stats->transfers); |
328 | u64_stats_inc(p: &stats->transfer_bytes_histo[l2len]); |
329 | |
330 | u64_stats_add(p: &stats->bytes, val: xfer->len); |
331 | if ((xfer->tx_buf) && |
332 | (xfer->tx_buf != ctlr->dummy_tx)) |
333 | u64_stats_add(p: &stats->bytes_tx, val: xfer->len); |
334 | if ((xfer->rx_buf) && |
335 | (xfer->rx_buf != ctlr->dummy_rx)) |
336 | u64_stats_add(p: &stats->bytes_rx, val: xfer->len); |
337 | |
338 | u64_stats_update_end(syncp: &stats->syncp); |
339 | put_cpu(); |
340 | } |
341 | |
342 | /* |
343 | * modalias support makes "modprobe $MODALIAS" new-style hotplug work, |
344 | * and the sysfs version makes coldplug work too. |
345 | */ |
346 | static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, const char *name) |
347 | { |
348 | while (id->name[0]) { |
349 | if (!strcmp(name, id->name)) |
350 | return id; |
351 | id++; |
352 | } |
353 | return NULL; |
354 | } |
355 | |
356 | const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev) |
357 | { |
358 | const struct spi_driver *sdrv = to_spi_driver(drv: sdev->dev.driver); |
359 | |
360 | return spi_match_id(id: sdrv->id_table, name: sdev->modalias); |
361 | } |
362 | EXPORT_SYMBOL_GPL(spi_get_device_id); |
363 | |
364 | const void *spi_get_device_match_data(const struct spi_device *sdev) |
365 | { |
366 | const void *match; |
367 | |
368 | match = device_get_match_data(dev: &sdev->dev); |
369 | if (match) |
370 | return match; |
371 | |
372 | return (const void *)spi_get_device_id(sdev)->driver_data; |
373 | } |
374 | EXPORT_SYMBOL_GPL(spi_get_device_match_data); |
375 | |
376 | static int spi_match_device(struct device *dev, struct device_driver *drv) |
377 | { |
378 | const struct spi_device *spi = to_spi_device(dev); |
379 | const struct spi_driver *sdrv = to_spi_driver(drv); |
380 | |
381 | /* Check override first, and if set, only use the named driver */ |
382 | if (spi->driver_override) |
383 | return strcmp(spi->driver_override, drv->name) == 0; |
384 | |
385 | /* Attempt an OF style match */ |
386 | if (of_driver_match_device(dev, drv)) |
387 | return 1; |
388 | |
389 | /* Then try ACPI */ |
390 | if (acpi_driver_match_device(dev, drv)) |
391 | return 1; |
392 | |
393 | if (sdrv->id_table) |
394 | return !!spi_match_id(id: sdrv->id_table, name: spi->modalias); |
395 | |
396 | return strcmp(spi->modalias, drv->name) == 0; |
397 | } |
398 | |
399 | static int spi_uevent(const struct device *dev, struct kobj_uevent_env *env) |
400 | { |
401 | const struct spi_device *spi = to_spi_device(dev); |
402 | int rc; |
403 | |
404 | rc = acpi_device_uevent_modalias(dev, env); |
405 | if (rc != -ENODEV) |
406 | return rc; |
407 | |
408 | return add_uevent_var(env, format: "MODALIAS=%s%s" , SPI_MODULE_PREFIX, spi->modalias); |
409 | } |
410 | |
411 | static int spi_probe(struct device *dev) |
412 | { |
413 | const struct spi_driver *sdrv = to_spi_driver(drv: dev->driver); |
414 | struct spi_device *spi = to_spi_device(dev); |
415 | int ret; |
416 | |
417 | ret = of_clk_set_defaults(node: dev->of_node, clk_supplier: false); |
418 | if (ret) |
419 | return ret; |
420 | |
421 | if (dev->of_node) { |
422 | spi->irq = of_irq_get(dev: dev->of_node, index: 0); |
423 | if (spi->irq == -EPROBE_DEFER) |
424 | return -EPROBE_DEFER; |
425 | if (spi->irq < 0) |
426 | spi->irq = 0; |
427 | } |
428 | |
429 | ret = dev_pm_domain_attach(dev, power_on: true); |
430 | if (ret) |
431 | return ret; |
432 | |
433 | if (sdrv->probe) { |
434 | ret = sdrv->probe(spi); |
435 | if (ret) |
436 | dev_pm_domain_detach(dev, power_off: true); |
437 | } |
438 | |
439 | return ret; |
440 | } |
441 | |
442 | static void spi_remove(struct device *dev) |
443 | { |
444 | const struct spi_driver *sdrv = to_spi_driver(drv: dev->driver); |
445 | |
446 | if (sdrv->remove) |
447 | sdrv->remove(to_spi_device(dev)); |
448 | |
449 | dev_pm_domain_detach(dev, power_off: true); |
450 | } |
451 | |
452 | static void spi_shutdown(struct device *dev) |
453 | { |
454 | if (dev->driver) { |
455 | const struct spi_driver *sdrv = to_spi_driver(drv: dev->driver); |
456 | |
457 | if (sdrv->shutdown) |
458 | sdrv->shutdown(to_spi_device(dev)); |
459 | } |
460 | } |
461 | |
462 | const struct bus_type spi_bus_type = { |
463 | .name = "spi" , |
464 | .dev_groups = spi_dev_groups, |
465 | .match = spi_match_device, |
466 | .uevent = spi_uevent, |
467 | .probe = spi_probe, |
468 | .remove = spi_remove, |
469 | .shutdown = spi_shutdown, |
470 | }; |
471 | EXPORT_SYMBOL_GPL(spi_bus_type); |
472 | |
473 | /** |
474 | * __spi_register_driver - register a SPI driver |
475 | * @owner: owner module of the driver to register |
476 | * @sdrv: the driver to register |
477 | * Context: can sleep |
478 | * |
479 | * Return: zero on success, else a negative error code. |
480 | */ |
481 | int __spi_register_driver(struct module *owner, struct spi_driver *sdrv) |
482 | { |
483 | sdrv->driver.owner = owner; |
484 | sdrv->driver.bus = &spi_bus_type; |
485 | |
486 | /* |
487 | * For Really Good Reasons we use spi: modaliases not of: |
488 | * modaliases for DT so module autoloading won't work if we |
489 | * don't have a spi_device_id as well as a compatible string. |
490 | */ |
491 | if (sdrv->driver.of_match_table) { |
492 | const struct of_device_id *of_id; |
493 | |
494 | for (of_id = sdrv->driver.of_match_table; of_id->compatible[0]; |
495 | of_id++) { |
496 | const char *of_name; |
497 | |
498 | /* Strip off any vendor prefix */ |
499 | of_name = strnchr(of_id->compatible, |
500 | sizeof(of_id->compatible), ','); |
501 | if (of_name) |
502 | of_name++; |
503 | else |
504 | of_name = of_id->compatible; |
505 | |
506 | if (sdrv->id_table) { |
507 | const struct spi_device_id *spi_id; |
508 | |
509 | spi_id = spi_match_id(id: sdrv->id_table, name: of_name); |
510 | if (spi_id) |
511 | continue; |
512 | } else { |
513 | if (strcmp(sdrv->driver.name, of_name) == 0) |
514 | continue; |
515 | } |
516 | |
517 | pr_warn("SPI driver %s has no spi_device_id for %s\n" , |
518 | sdrv->driver.name, of_id->compatible); |
519 | } |
520 | } |
521 | |
522 | return driver_register(drv: &sdrv->driver); |
523 | } |
524 | EXPORT_SYMBOL_GPL(__spi_register_driver); |
525 | |
526 | /*-------------------------------------------------------------------------*/ |
527 | |
528 | /* |
529 | * SPI devices should normally not be created by SPI device drivers; that |
530 | * would make them board-specific. Similarly with SPI controller drivers. |
531 | * Device registration normally goes into like arch/.../mach.../board-YYY.c |
532 | * with other readonly (flashable) information about mainboard devices. |
533 | */ |
534 | |
535 | struct boardinfo { |
536 | struct list_head list; |
537 | struct spi_board_info board_info; |
538 | }; |
539 | |
540 | static LIST_HEAD(board_list); |
541 | static LIST_HEAD(spi_controller_list); |
542 | |
543 | /* |
544 | * Used to protect add/del operation for board_info list and |
545 | * spi_controller list, and their matching process also used |
546 | * to protect object of type struct idr. |
547 | */ |
548 | static DEFINE_MUTEX(board_lock); |
549 | |
550 | /** |
551 | * spi_alloc_device - Allocate a new SPI device |
552 | * @ctlr: Controller to which device is connected |
553 | * Context: can sleep |
554 | * |
555 | * Allows a driver to allocate and initialize a spi_device without |
556 | * registering it immediately. This allows a driver to directly |
557 | * fill the spi_device with device parameters before calling |
558 | * spi_add_device() on it. |
559 | * |
560 | * Caller is responsible to call spi_add_device() on the returned |
561 | * spi_device structure to add it to the SPI controller. If the caller |
562 | * needs to discard the spi_device without adding it, then it should |
563 | * call spi_dev_put() on it. |
564 | * |
565 | * Return: a pointer to the new device, or NULL. |
566 | */ |
567 | struct spi_device *spi_alloc_device(struct spi_controller *ctlr) |
568 | { |
569 | struct spi_device *spi; |
570 | |
571 | if (!spi_controller_get(ctlr)) |
572 | return NULL; |
573 | |
574 | spi = kzalloc(size: sizeof(*spi), GFP_KERNEL); |
575 | if (!spi) { |
576 | spi_controller_put(ctlr); |
577 | return NULL; |
578 | } |
579 | |
580 | spi->pcpu_statistics = spi_alloc_pcpu_stats(NULL); |
581 | if (!spi->pcpu_statistics) { |
582 | kfree(objp: spi); |
583 | spi_controller_put(ctlr); |
584 | return NULL; |
585 | } |
586 | |
587 | spi->controller = ctlr; |
588 | spi->dev.parent = &ctlr->dev; |
589 | spi->dev.bus = &spi_bus_type; |
590 | spi->dev.release = spidev_release; |
591 | spi->mode = ctlr->buswidth_override_bits; |
592 | |
593 | device_initialize(dev: &spi->dev); |
594 | return spi; |
595 | } |
596 | EXPORT_SYMBOL_GPL(spi_alloc_device); |
597 | |
598 | static void spi_dev_set_name(struct spi_device *spi) |
599 | { |
600 | struct acpi_device *adev = ACPI_COMPANION(&spi->dev); |
601 | |
602 | if (adev) { |
603 | dev_set_name(dev: &spi->dev, name: "spi-%s" , acpi_dev_name(adev)); |
604 | return; |
605 | } |
606 | |
607 | dev_set_name(dev: &spi->dev, name: "%s.%u" , dev_name(dev: &spi->controller->dev), |
608 | spi_get_chipselect(spi, idx: 0)); |
609 | } |
610 | |
611 | /* |
612 | * Zero(0) is a valid physical CS value and can be located at any |
613 | * logical CS in the spi->chip_select[]. If all the physical CS |
614 | * are initialized to 0 then It would be difficult to differentiate |
615 | * between a valid physical CS 0 & an unused logical CS whose physical |
616 | * CS can be 0. As a solution to this issue initialize all the CS to -1. |
617 | * Now all the unused logical CS will have -1 physical CS value & can be |
618 | * ignored while performing physical CS validity checks. |
619 | */ |
620 | #define SPI_INVALID_CS ((s8)-1) |
621 | |
622 | static inline bool is_valid_cs(s8 chip_select) |
623 | { |
624 | return chip_select != SPI_INVALID_CS; |
625 | } |
626 | |
627 | static inline int spi_dev_check_cs(struct device *dev, |
628 | struct spi_device *spi, u8 idx, |
629 | struct spi_device *new_spi, u8 new_idx) |
630 | { |
631 | u8 cs, cs_new; |
632 | u8 idx_new; |
633 | |
634 | cs = spi_get_chipselect(spi, idx); |
635 | for (idx_new = new_idx; idx_new < SPI_CS_CNT_MAX; idx_new++) { |
636 | cs_new = spi_get_chipselect(spi: new_spi, idx: idx_new); |
637 | if (is_valid_cs(chip_select: cs) && is_valid_cs(chip_select: cs_new) && cs == cs_new) { |
638 | dev_err(dev, "chipselect %u already in use\n" , cs_new); |
639 | return -EBUSY; |
640 | } |
641 | } |
642 | return 0; |
643 | } |
644 | |
645 | static int spi_dev_check(struct device *dev, void *data) |
646 | { |
647 | struct spi_device *spi = to_spi_device(dev); |
648 | struct spi_device *new_spi = data; |
649 | int status, idx; |
650 | |
651 | if (spi->controller == new_spi->controller) { |
652 | for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) { |
653 | status = spi_dev_check_cs(dev, spi, idx, new_spi, new_idx: 0); |
654 | if (status) |
655 | return status; |
656 | } |
657 | } |
658 | return 0; |
659 | } |
660 | |
661 | static void spi_cleanup(struct spi_device *spi) |
662 | { |
663 | if (spi->controller->cleanup) |
664 | spi->controller->cleanup(spi); |
665 | } |
666 | |
667 | static int __spi_add_device(struct spi_device *spi) |
668 | { |
669 | struct spi_controller *ctlr = spi->controller; |
670 | struct device *dev = ctlr->dev.parent; |
671 | int status, idx; |
672 | u8 cs; |
673 | |
674 | for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) { |
675 | /* Chipselects are numbered 0..max; validate. */ |
676 | cs = spi_get_chipselect(spi, idx); |
677 | if (is_valid_cs(chip_select: cs) && cs >= ctlr->num_chipselect) { |
678 | dev_err(dev, "cs%d >= max %d\n" , spi_get_chipselect(spi, idx), |
679 | ctlr->num_chipselect); |
680 | return -EINVAL; |
681 | } |
682 | } |
683 | |
684 | /* |
685 | * Make sure that multiple logical CS doesn't map to the same physical CS. |
686 | * For example, spi->chip_select[0] != spi->chip_select[1] and so on. |
687 | */ |
688 | for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) { |
689 | status = spi_dev_check_cs(dev, spi, idx, new_spi: spi, new_idx: idx + 1); |
690 | if (status) |
691 | return status; |
692 | } |
693 | |
694 | /* Set the bus ID string */ |
695 | spi_dev_set_name(spi); |
696 | |
697 | /* |
698 | * We need to make sure there's no other device with this |
699 | * chipselect **BEFORE** we call setup(), else we'll trash |
700 | * its configuration. |
701 | */ |
702 | status = bus_for_each_dev(bus: &spi_bus_type, NULL, data: spi, fn: spi_dev_check); |
703 | if (status) |
704 | return status; |
705 | |
706 | /* Controller may unregister concurrently */ |
707 | if (IS_ENABLED(CONFIG_SPI_DYNAMIC) && |
708 | !device_is_registered(dev: &ctlr->dev)) { |
709 | return -ENODEV; |
710 | } |
711 | |
712 | if (ctlr->cs_gpiods) { |
713 | u8 cs; |
714 | |
715 | for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) { |
716 | cs = spi_get_chipselect(spi, idx); |
717 | if (is_valid_cs(chip_select: cs)) |
718 | spi_set_csgpiod(spi, idx, csgpiod: ctlr->cs_gpiods[cs]); |
719 | } |
720 | } |
721 | |
722 | /* |
723 | * Drivers may modify this initial i/o setup, but will |
724 | * normally rely on the device being setup. Devices |
725 | * using SPI_CS_HIGH can't coexist well otherwise... |
726 | */ |
727 | status = spi_setup(spi); |
728 | if (status < 0) { |
729 | dev_err(dev, "can't setup %s, status %d\n" , |
730 | dev_name(&spi->dev), status); |
731 | return status; |
732 | } |
733 | |
734 | /* Device may be bound to an active driver when this returns */ |
735 | status = device_add(dev: &spi->dev); |
736 | if (status < 0) { |
737 | dev_err(dev, "can't add %s, status %d\n" , |
738 | dev_name(&spi->dev), status); |
739 | spi_cleanup(spi); |
740 | } else { |
741 | dev_dbg(dev, "registered child %s\n" , dev_name(&spi->dev)); |
742 | } |
743 | |
744 | return status; |
745 | } |
746 | |
747 | /** |
748 | * spi_add_device - Add spi_device allocated with spi_alloc_device |
749 | * @spi: spi_device to register |
750 | * |
751 | * Companion function to spi_alloc_device. Devices allocated with |
752 | * spi_alloc_device can be added onto the SPI bus with this function. |
753 | * |
754 | * Return: 0 on success; negative errno on failure |
755 | */ |
756 | int spi_add_device(struct spi_device *spi) |
757 | { |
758 | struct spi_controller *ctlr = spi->controller; |
759 | int status; |
760 | |
761 | /* Set the bus ID string */ |
762 | spi_dev_set_name(spi); |
763 | |
764 | mutex_lock(&ctlr->add_lock); |
765 | status = __spi_add_device(spi); |
766 | mutex_unlock(lock: &ctlr->add_lock); |
767 | return status; |
768 | } |
769 | EXPORT_SYMBOL_GPL(spi_add_device); |
770 | |
771 | static void spi_set_all_cs_unused(struct spi_device *spi) |
772 | { |
773 | u8 idx; |
774 | |
775 | for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) |
776 | spi_set_chipselect(spi, idx, SPI_INVALID_CS); |
777 | } |
778 | |
779 | /** |
780 | * spi_new_device - instantiate one new SPI device |
781 | * @ctlr: Controller to which device is connected |
782 | * @chip: Describes the SPI device |
783 | * Context: can sleep |
784 | * |
785 | * On typical mainboards, this is purely internal; and it's not needed |
786 | * after board init creates the hard-wired devices. Some development |
787 | * platforms may not be able to use spi_register_board_info though, and |
788 | * this is exported so that for example a USB or parport based adapter |
789 | * driver could add devices (which it would learn about out-of-band). |
790 | * |
791 | * Return: the new device, or NULL. |
792 | */ |
793 | struct spi_device *spi_new_device(struct spi_controller *ctlr, |
794 | struct spi_board_info *chip) |
795 | { |
796 | struct spi_device *proxy; |
797 | int status; |
798 | |
799 | /* |
800 | * NOTE: caller did any chip->bus_num checks necessary. |
801 | * |
802 | * Also, unless we change the return value convention to use |
803 | * error-or-pointer (not NULL-or-pointer), troubleshootability |
804 | * suggests syslogged diagnostics are best here (ugh). |
805 | */ |
806 | |
807 | proxy = spi_alloc_device(ctlr); |
808 | if (!proxy) |
809 | return NULL; |
810 | |
811 | WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias)); |
812 | |
813 | /* Use provided chip-select for proxy device */ |
814 | spi_set_all_cs_unused(spi: proxy); |
815 | spi_set_chipselect(spi: proxy, idx: 0, chipselect: chip->chip_select); |
816 | |
817 | proxy->max_speed_hz = chip->max_speed_hz; |
818 | proxy->mode = chip->mode; |
819 | proxy->irq = chip->irq; |
820 | strscpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias)); |
821 | proxy->dev.platform_data = (void *) chip->platform_data; |
822 | proxy->controller_data = chip->controller_data; |
823 | proxy->controller_state = NULL; |
824 | /* |
825 | * spi->chip_select[i] gives the corresponding physical CS for logical CS i |
826 | * logical CS number is represented by setting the ith bit in spi->cs_index_mask |
827 | * So, for example, if spi->cs_index_mask = 0x01 then logical CS number is 0 and |
828 | * spi->chip_select[0] will give the physical CS. |
829 | * By default spi->chip_select[0] will hold the physical CS number so, set |
830 | * spi->cs_index_mask as 0x01. |
831 | */ |
832 | proxy->cs_index_mask = 0x01; |
833 | |
834 | if (chip->swnode) { |
835 | status = device_add_software_node(dev: &proxy->dev, node: chip->swnode); |
836 | if (status) { |
837 | dev_err(&ctlr->dev, "failed to add software node to '%s': %d\n" , |
838 | chip->modalias, status); |
839 | goto err_dev_put; |
840 | } |
841 | } |
842 | |
843 | status = spi_add_device(proxy); |
844 | if (status < 0) |
845 | goto err_dev_put; |
846 | |
847 | return proxy; |
848 | |
849 | err_dev_put: |
850 | device_remove_software_node(dev: &proxy->dev); |
851 | spi_dev_put(spi: proxy); |
852 | return NULL; |
853 | } |
854 | EXPORT_SYMBOL_GPL(spi_new_device); |
855 | |
856 | /** |
857 | * spi_unregister_device - unregister a single SPI device |
858 | * @spi: spi_device to unregister |
859 | * |
860 | * Start making the passed SPI device vanish. Normally this would be handled |
861 | * by spi_unregister_controller(). |
862 | */ |
863 | void spi_unregister_device(struct spi_device *spi) |
864 | { |
865 | if (!spi) |
866 | return; |
867 | |
868 | if (spi->dev.of_node) { |
869 | of_node_clear_flag(n: spi->dev.of_node, OF_POPULATED); |
870 | of_node_put(node: spi->dev.of_node); |
871 | } |
872 | if (ACPI_COMPANION(&spi->dev)) |
873 | acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev)); |
874 | device_remove_software_node(dev: &spi->dev); |
875 | device_del(dev: &spi->dev); |
876 | spi_cleanup(spi); |
877 | put_device(dev: &spi->dev); |
878 | } |
879 | EXPORT_SYMBOL_GPL(spi_unregister_device); |
880 | |
881 | static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr, |
882 | struct spi_board_info *bi) |
883 | { |
884 | struct spi_device *dev; |
885 | |
886 | if (ctlr->bus_num != bi->bus_num) |
887 | return; |
888 | |
889 | dev = spi_new_device(ctlr, bi); |
890 | if (!dev) |
891 | dev_err(ctlr->dev.parent, "can't create new device for %s\n" , |
892 | bi->modalias); |
893 | } |
894 | |
895 | /** |
896 | * spi_register_board_info - register SPI devices for a given board |
897 | * @info: array of chip descriptors |
898 | * @n: how many descriptors are provided |
899 | * Context: can sleep |
900 | * |
901 | * Board-specific early init code calls this (probably during arch_initcall) |
902 | * with segments of the SPI device table. Any device nodes are created later, |
903 | * after the relevant parent SPI controller (bus_num) is defined. We keep |
904 | * this table of devices forever, so that reloading a controller driver will |
905 | * not make Linux forget about these hard-wired devices. |
906 | * |
907 | * Other code can also call this, e.g. a particular add-on board might provide |
908 | * SPI devices through its expansion connector, so code initializing that board |
909 | * would naturally declare its SPI devices. |
910 | * |
911 | * The board info passed can safely be __initdata ... but be careful of |
912 | * any embedded pointers (platform_data, etc), they're copied as-is. |
913 | * |
914 | * Return: zero on success, else a negative error code. |
915 | */ |
916 | int spi_register_board_info(struct spi_board_info const *info, unsigned n) |
917 | { |
918 | struct boardinfo *bi; |
919 | int i; |
920 | |
921 | if (!n) |
922 | return 0; |
923 | |
924 | bi = kcalloc(n, size: sizeof(*bi), GFP_KERNEL); |
925 | if (!bi) |
926 | return -ENOMEM; |
927 | |
928 | for (i = 0; i < n; i++, bi++, info++) { |
929 | struct spi_controller *ctlr; |
930 | |
931 | memcpy(&bi->board_info, info, sizeof(*info)); |
932 | |
933 | mutex_lock(&board_lock); |
934 | list_add_tail(new: &bi->list, head: &board_list); |
935 | list_for_each_entry(ctlr, &spi_controller_list, list) |
936 | spi_match_controller_to_boardinfo(ctlr, |
937 | bi: &bi->board_info); |
938 | mutex_unlock(lock: &board_lock); |
939 | } |
940 | |
941 | return 0; |
942 | } |
943 | |
944 | /*-------------------------------------------------------------------------*/ |
945 | |
946 | /* Core methods for SPI resource management */ |
947 | |
948 | /** |
949 | * spi_res_alloc - allocate a spi resource that is life-cycle managed |
950 | * during the processing of a spi_message while using |
951 | * spi_transfer_one |
952 | * @spi: the SPI device for which we allocate memory |
953 | * @release: the release code to execute for this resource |
954 | * @size: size to alloc and return |
955 | * @gfp: GFP allocation flags |
956 | * |
957 | * Return: the pointer to the allocated data |
958 | * |
959 | * This may get enhanced in the future to allocate from a memory pool |
960 | * of the @spi_device or @spi_controller to avoid repeated allocations. |
961 | */ |
962 | static void *spi_res_alloc(struct spi_device *spi, spi_res_release_t release, |
963 | size_t size, gfp_t gfp) |
964 | { |
965 | struct spi_res *sres; |
966 | |
967 | sres = kzalloc(size: sizeof(*sres) + size, flags: gfp); |
968 | if (!sres) |
969 | return NULL; |
970 | |
971 | INIT_LIST_HEAD(list: &sres->entry); |
972 | sres->release = release; |
973 | |
974 | return sres->data; |
975 | } |
976 | |
977 | /** |
978 | * spi_res_free - free an SPI resource |
979 | * @res: pointer to the custom data of a resource |
980 | */ |
981 | static void spi_res_free(void *res) |
982 | { |
983 | struct spi_res *sres = container_of(res, struct spi_res, data); |
984 | |
985 | if (!res) |
986 | return; |
987 | |
988 | WARN_ON(!list_empty(&sres->entry)); |
989 | kfree(objp: sres); |
990 | } |
991 | |
992 | /** |
993 | * spi_res_add - add a spi_res to the spi_message |
994 | * @message: the SPI message |
995 | * @res: the spi_resource |
996 | */ |
997 | static void spi_res_add(struct spi_message *message, void *res) |
998 | { |
999 | struct spi_res *sres = container_of(res, struct spi_res, data); |
1000 | |
1001 | WARN_ON(!list_empty(&sres->entry)); |
1002 | list_add_tail(new: &sres->entry, head: &message->resources); |
1003 | } |
1004 | |
1005 | /** |
1006 | * spi_res_release - release all SPI resources for this message |
1007 | * @ctlr: the @spi_controller |
1008 | * @message: the @spi_message |
1009 | */ |
1010 | static void spi_res_release(struct spi_controller *ctlr, struct spi_message *message) |
1011 | { |
1012 | struct spi_res *res, *tmp; |
1013 | |
1014 | list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) { |
1015 | if (res->release) |
1016 | res->release(ctlr, message, res->data); |
1017 | |
1018 | list_del(entry: &res->entry); |
1019 | |
1020 | kfree(objp: res); |
1021 | } |
1022 | } |
1023 | |
1024 | /*-------------------------------------------------------------------------*/ |
1025 | static inline bool spi_is_last_cs(struct spi_device *spi) |
1026 | { |
1027 | u8 idx; |
1028 | bool last = false; |
1029 | |
1030 | for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) { |
1031 | if (spi->cs_index_mask & BIT(idx)) { |
1032 | if (spi->controller->last_cs[idx] == spi_get_chipselect(spi, idx)) |
1033 | last = true; |
1034 | } |
1035 | } |
1036 | return last; |
1037 | } |
1038 | |
1039 | |
1040 | static void spi_set_cs(struct spi_device *spi, bool enable, bool force) |
1041 | { |
1042 | bool activate = enable; |
1043 | u8 idx; |
1044 | |
1045 | /* |
1046 | * Avoid calling into the driver (or doing delays) if the chip select |
1047 | * isn't actually changing from the last time this was called. |
1048 | */ |
1049 | if (!force && ((enable && spi->controller->last_cs_index_mask == spi->cs_index_mask && |
1050 | spi_is_last_cs(spi)) || |
1051 | (!enable && spi->controller->last_cs_index_mask == spi->cs_index_mask && |
1052 | !spi_is_last_cs(spi))) && |
1053 | (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH))) |
1054 | return; |
1055 | |
1056 | trace_spi_set_cs(spi, enable: activate); |
1057 | |
1058 | spi->controller->last_cs_index_mask = spi->cs_index_mask; |
1059 | for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) |
1060 | spi->controller->last_cs[idx] = enable ? spi_get_chipselect(spi, idx: 0) : SPI_INVALID_CS; |
1061 | spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH; |
1062 | |
1063 | if (spi->mode & SPI_CS_HIGH) |
1064 | enable = !enable; |
1065 | |
1066 | /* |
1067 | * Handle chip select delays for GPIO based CS or controllers without |
1068 | * programmable chip select timing. |
1069 | */ |
1070 | if ((spi_is_csgpiod(spi) || !spi->controller->set_cs_timing) && !activate) |
1071 | spi_delay_exec(delay: &spi->cs_hold, NULL); |
1072 | |
1073 | if (spi_is_csgpiod(spi)) { |
1074 | if (!(spi->mode & SPI_NO_CS)) { |
1075 | /* |
1076 | * Historically ACPI has no means of the GPIO polarity and |
1077 | * thus the SPISerialBus() resource defines it on the per-chip |
1078 | * basis. In order to avoid a chain of negations, the GPIO |
1079 | * polarity is considered being Active High. Even for the cases |
1080 | * when _DSD() is involved (in the updated versions of ACPI) |
1081 | * the GPIO CS polarity must be defined Active High to avoid |
1082 | * ambiguity. That's why we use enable, that takes SPI_CS_HIGH |
1083 | * into account. |
1084 | */ |
1085 | for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) { |
1086 | if ((spi->cs_index_mask & BIT(idx)) && spi_get_csgpiod(spi, idx)) { |
1087 | if (has_acpi_companion(dev: &spi->dev)) |
1088 | gpiod_set_value_cansleep(desc: spi_get_csgpiod(spi, idx), |
1089 | value: !enable); |
1090 | else |
1091 | /* Polarity handled by GPIO library */ |
1092 | gpiod_set_value_cansleep(desc: spi_get_csgpiod(spi, idx), |
1093 | value: activate); |
1094 | |
1095 | if (activate) |
1096 | spi_delay_exec(delay: &spi->cs_setup, NULL); |
1097 | else |
1098 | spi_delay_exec(delay: &spi->cs_inactive, NULL); |
1099 | } |
1100 | } |
1101 | } |
1102 | /* Some SPI masters need both GPIO CS & slave_select */ |
1103 | if ((spi->controller->flags & SPI_CONTROLLER_GPIO_SS) && |
1104 | spi->controller->set_cs) |
1105 | spi->controller->set_cs(spi, !enable); |
1106 | } else if (spi->controller->set_cs) { |
1107 | spi->controller->set_cs(spi, !enable); |
1108 | } |
1109 | |
1110 | if (spi_is_csgpiod(spi) || !spi->controller->set_cs_timing) { |
1111 | if (activate) |
1112 | spi_delay_exec(delay: &spi->cs_setup, NULL); |
1113 | else |
1114 | spi_delay_exec(delay: &spi->cs_inactive, NULL); |
1115 | } |
1116 | } |
1117 | |
1118 | #ifdef CONFIG_HAS_DMA |
1119 | static int spi_map_buf_attrs(struct spi_controller *ctlr, struct device *dev, |
1120 | struct sg_table *sgt, void *buf, size_t len, |
1121 | enum dma_data_direction dir, unsigned long attrs) |
1122 | { |
1123 | const bool vmalloced_buf = is_vmalloc_addr(x: buf); |
1124 | unsigned int max_seg_size = dma_get_max_seg_size(dev); |
1125 | #ifdef CONFIG_HIGHMEM |
1126 | const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE && |
1127 | (unsigned long)buf < (PKMAP_BASE + |
1128 | (LAST_PKMAP * PAGE_SIZE))); |
1129 | #else |
1130 | const bool kmap_buf = false; |
1131 | #endif |
1132 | int desc_len; |
1133 | int sgs; |
1134 | struct page *vm_page; |
1135 | struct scatterlist *sg; |
1136 | void *sg_buf; |
1137 | size_t min; |
1138 | int i, ret; |
1139 | |
1140 | if (vmalloced_buf || kmap_buf) { |
1141 | desc_len = min_t(unsigned long, max_seg_size, PAGE_SIZE); |
1142 | sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len); |
1143 | } else if (virt_addr_valid(buf)) { |
1144 | desc_len = min_t(size_t, max_seg_size, ctlr->max_dma_len); |
1145 | sgs = DIV_ROUND_UP(len, desc_len); |
1146 | } else { |
1147 | return -EINVAL; |
1148 | } |
1149 | |
1150 | ret = sg_alloc_table(sgt, sgs, GFP_KERNEL); |
1151 | if (ret != 0) |
1152 | return ret; |
1153 | |
1154 | sg = &sgt->sgl[0]; |
1155 | for (i = 0; i < sgs; i++) { |
1156 | |
1157 | if (vmalloced_buf || kmap_buf) { |
1158 | /* |
1159 | * Next scatterlist entry size is the minimum between |
1160 | * the desc_len and the remaining buffer length that |
1161 | * fits in a page. |
1162 | */ |
1163 | min = min_t(size_t, desc_len, |
1164 | min_t(size_t, len, |
1165 | PAGE_SIZE - offset_in_page(buf))); |
1166 | if (vmalloced_buf) |
1167 | vm_page = vmalloc_to_page(addr: buf); |
1168 | else |
1169 | vm_page = kmap_to_page(addr: buf); |
1170 | if (!vm_page) { |
1171 | sg_free_table(sgt); |
1172 | return -ENOMEM; |
1173 | } |
1174 | sg_set_page(sg, page: vm_page, |
1175 | len: min, offset_in_page(buf)); |
1176 | } else { |
1177 | min = min_t(size_t, len, desc_len); |
1178 | sg_buf = buf; |
1179 | sg_set_buf(sg, buf: sg_buf, buflen: min); |
1180 | } |
1181 | |
1182 | buf += min; |
1183 | len -= min; |
1184 | sg = sg_next(sg); |
1185 | } |
1186 | |
1187 | ret = dma_map_sgtable(dev, sgt, dir, attrs); |
1188 | if (ret < 0) { |
1189 | sg_free_table(sgt); |
1190 | return ret; |
1191 | } |
1192 | |
1193 | return 0; |
1194 | } |
1195 | |
1196 | int spi_map_buf(struct spi_controller *ctlr, struct device *dev, |
1197 | struct sg_table *sgt, void *buf, size_t len, |
1198 | enum dma_data_direction dir) |
1199 | { |
1200 | return spi_map_buf_attrs(ctlr, dev, sgt, buf, len, dir, attrs: 0); |
1201 | } |
1202 | |
1203 | static void spi_unmap_buf_attrs(struct spi_controller *ctlr, |
1204 | struct device *dev, struct sg_table *sgt, |
1205 | enum dma_data_direction dir, |
1206 | unsigned long attrs) |
1207 | { |
1208 | if (sgt->orig_nents) { |
1209 | dma_unmap_sgtable(dev, sgt, dir, attrs); |
1210 | sg_free_table(sgt); |
1211 | sgt->orig_nents = 0; |
1212 | sgt->nents = 0; |
1213 | } |
1214 | } |
1215 | |
1216 | void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev, |
1217 | struct sg_table *sgt, enum dma_data_direction dir) |
1218 | { |
1219 | spi_unmap_buf_attrs(ctlr, dev, sgt, dir, attrs: 0); |
1220 | } |
1221 | |
1222 | static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg) |
1223 | { |
1224 | struct device *tx_dev, *rx_dev; |
1225 | struct spi_transfer *xfer; |
1226 | int ret; |
1227 | |
1228 | if (!ctlr->can_dma) |
1229 | return 0; |
1230 | |
1231 | if (ctlr->dma_tx) |
1232 | tx_dev = ctlr->dma_tx->device->dev; |
1233 | else if (ctlr->dma_map_dev) |
1234 | tx_dev = ctlr->dma_map_dev; |
1235 | else |
1236 | tx_dev = ctlr->dev.parent; |
1237 | |
1238 | if (ctlr->dma_rx) |
1239 | rx_dev = ctlr->dma_rx->device->dev; |
1240 | else if (ctlr->dma_map_dev) |
1241 | rx_dev = ctlr->dma_map_dev; |
1242 | else |
1243 | rx_dev = ctlr->dev.parent; |
1244 | |
1245 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { |
1246 | /* The sync is done before each transfer. */ |
1247 | unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC; |
1248 | |
1249 | if (!ctlr->can_dma(ctlr, msg->spi, xfer)) |
1250 | continue; |
1251 | |
1252 | if (xfer->tx_buf != NULL) { |
1253 | ret = spi_map_buf_attrs(ctlr, dev: tx_dev, sgt: &xfer->tx_sg, |
1254 | buf: (void *)xfer->tx_buf, |
1255 | len: xfer->len, dir: DMA_TO_DEVICE, |
1256 | attrs); |
1257 | if (ret != 0) |
1258 | return ret; |
1259 | } |
1260 | |
1261 | if (xfer->rx_buf != NULL) { |
1262 | ret = spi_map_buf_attrs(ctlr, dev: rx_dev, sgt: &xfer->rx_sg, |
1263 | buf: xfer->rx_buf, len: xfer->len, |
1264 | dir: DMA_FROM_DEVICE, attrs); |
1265 | if (ret != 0) { |
1266 | spi_unmap_buf_attrs(ctlr, dev: tx_dev, |
1267 | sgt: &xfer->tx_sg, dir: DMA_TO_DEVICE, |
1268 | attrs); |
1269 | |
1270 | return ret; |
1271 | } |
1272 | } |
1273 | } |
1274 | |
1275 | ctlr->cur_rx_dma_dev = rx_dev; |
1276 | ctlr->cur_tx_dma_dev = tx_dev; |
1277 | ctlr->cur_msg_mapped = true; |
1278 | |
1279 | return 0; |
1280 | } |
1281 | |
1282 | static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg) |
1283 | { |
1284 | struct device *rx_dev = ctlr->cur_rx_dma_dev; |
1285 | struct device *tx_dev = ctlr->cur_tx_dma_dev; |
1286 | struct spi_transfer *xfer; |
1287 | |
1288 | if (!ctlr->cur_msg_mapped || !ctlr->can_dma) |
1289 | return 0; |
1290 | |
1291 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { |
1292 | /* The sync has already been done after each transfer. */ |
1293 | unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC; |
1294 | |
1295 | if (!ctlr->can_dma(ctlr, msg->spi, xfer)) |
1296 | continue; |
1297 | |
1298 | spi_unmap_buf_attrs(ctlr, dev: rx_dev, sgt: &xfer->rx_sg, |
1299 | dir: DMA_FROM_DEVICE, attrs); |
1300 | spi_unmap_buf_attrs(ctlr, dev: tx_dev, sgt: &xfer->tx_sg, |
1301 | dir: DMA_TO_DEVICE, attrs); |
1302 | } |
1303 | |
1304 | ctlr->cur_msg_mapped = false; |
1305 | |
1306 | return 0; |
1307 | } |
1308 | |
1309 | static void spi_dma_sync_for_device(struct spi_controller *ctlr, |
1310 | struct spi_transfer *xfer) |
1311 | { |
1312 | struct device *rx_dev = ctlr->cur_rx_dma_dev; |
1313 | struct device *tx_dev = ctlr->cur_tx_dma_dev; |
1314 | |
1315 | if (!ctlr->cur_msg_mapped) |
1316 | return; |
1317 | |
1318 | if (xfer->tx_sg.orig_nents) |
1319 | dma_sync_sgtable_for_device(dev: tx_dev, sgt: &xfer->tx_sg, dir: DMA_TO_DEVICE); |
1320 | if (xfer->rx_sg.orig_nents) |
1321 | dma_sync_sgtable_for_device(dev: rx_dev, sgt: &xfer->rx_sg, dir: DMA_FROM_DEVICE); |
1322 | } |
1323 | |
1324 | static void spi_dma_sync_for_cpu(struct spi_controller *ctlr, |
1325 | struct spi_transfer *xfer) |
1326 | { |
1327 | struct device *rx_dev = ctlr->cur_rx_dma_dev; |
1328 | struct device *tx_dev = ctlr->cur_tx_dma_dev; |
1329 | |
1330 | if (!ctlr->cur_msg_mapped) |
1331 | return; |
1332 | |
1333 | if (xfer->rx_sg.orig_nents) |
1334 | dma_sync_sgtable_for_cpu(dev: rx_dev, sgt: &xfer->rx_sg, dir: DMA_FROM_DEVICE); |
1335 | if (xfer->tx_sg.orig_nents) |
1336 | dma_sync_sgtable_for_cpu(dev: tx_dev, sgt: &xfer->tx_sg, dir: DMA_TO_DEVICE); |
1337 | } |
1338 | #else /* !CONFIG_HAS_DMA */ |
1339 | static inline int __spi_map_msg(struct spi_controller *ctlr, |
1340 | struct spi_message *msg) |
1341 | { |
1342 | return 0; |
1343 | } |
1344 | |
1345 | static inline int __spi_unmap_msg(struct spi_controller *ctlr, |
1346 | struct spi_message *msg) |
1347 | { |
1348 | return 0; |
1349 | } |
1350 | |
1351 | static void spi_dma_sync_for_device(struct spi_controller *ctrl, |
1352 | struct spi_transfer *xfer) |
1353 | { |
1354 | } |
1355 | |
1356 | static void spi_dma_sync_for_cpu(struct spi_controller *ctrl, |
1357 | struct spi_transfer *xfer) |
1358 | { |
1359 | } |
1360 | #endif /* !CONFIG_HAS_DMA */ |
1361 | |
1362 | static inline int spi_unmap_msg(struct spi_controller *ctlr, |
1363 | struct spi_message *msg) |
1364 | { |
1365 | struct spi_transfer *xfer; |
1366 | |
1367 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { |
1368 | /* |
1369 | * Restore the original value of tx_buf or rx_buf if they are |
1370 | * NULL. |
1371 | */ |
1372 | if (xfer->tx_buf == ctlr->dummy_tx) |
1373 | xfer->tx_buf = NULL; |
1374 | if (xfer->rx_buf == ctlr->dummy_rx) |
1375 | xfer->rx_buf = NULL; |
1376 | } |
1377 | |
1378 | return __spi_unmap_msg(ctlr, msg); |
1379 | } |
1380 | |
1381 | static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg) |
1382 | { |
1383 | struct spi_transfer *xfer; |
1384 | void *tmp; |
1385 | unsigned int max_tx, max_rx; |
1386 | |
1387 | if ((ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX)) |
1388 | && !(msg->spi->mode & SPI_3WIRE)) { |
1389 | max_tx = 0; |
1390 | max_rx = 0; |
1391 | |
1392 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { |
1393 | if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) && |
1394 | !xfer->tx_buf) |
1395 | max_tx = max(xfer->len, max_tx); |
1396 | if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) && |
1397 | !xfer->rx_buf) |
1398 | max_rx = max(xfer->len, max_rx); |
1399 | } |
1400 | |
1401 | if (max_tx) { |
1402 | tmp = krealloc(objp: ctlr->dummy_tx, new_size: max_tx, |
1403 | GFP_KERNEL | GFP_DMA | __GFP_ZERO); |
1404 | if (!tmp) |
1405 | return -ENOMEM; |
1406 | ctlr->dummy_tx = tmp; |
1407 | } |
1408 | |
1409 | if (max_rx) { |
1410 | tmp = krealloc(objp: ctlr->dummy_rx, new_size: max_rx, |
1411 | GFP_KERNEL | GFP_DMA); |
1412 | if (!tmp) |
1413 | return -ENOMEM; |
1414 | ctlr->dummy_rx = tmp; |
1415 | } |
1416 | |
1417 | if (max_tx || max_rx) { |
1418 | list_for_each_entry(xfer, &msg->transfers, |
1419 | transfer_list) { |
1420 | if (!xfer->len) |
1421 | continue; |
1422 | if (!xfer->tx_buf) |
1423 | xfer->tx_buf = ctlr->dummy_tx; |
1424 | if (!xfer->rx_buf) |
1425 | xfer->rx_buf = ctlr->dummy_rx; |
1426 | } |
1427 | } |
1428 | } |
1429 | |
1430 | return __spi_map_msg(ctlr, msg); |
1431 | } |
1432 | |
1433 | static int spi_transfer_wait(struct spi_controller *ctlr, |
1434 | struct spi_message *msg, |
1435 | struct spi_transfer *xfer) |
1436 | { |
1437 | struct spi_statistics __percpu *statm = ctlr->pcpu_statistics; |
1438 | struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics; |
1439 | u32 speed_hz = xfer->speed_hz; |
1440 | unsigned long long ms; |
1441 | |
1442 | if (spi_controller_is_slave(ctlr)) { |
1443 | if (wait_for_completion_interruptible(x: &ctlr->xfer_completion)) { |
1444 | dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n" ); |
1445 | return -EINTR; |
1446 | } |
1447 | } else { |
1448 | if (!speed_hz) |
1449 | speed_hz = 100000; |
1450 | |
1451 | /* |
1452 | * For each byte we wait for 8 cycles of the SPI clock. |
1453 | * Since speed is defined in Hz and we want milliseconds, |
1454 | * use respective multiplier, but before the division, |
1455 | * otherwise we may get 0 for short transfers. |
1456 | */ |
1457 | ms = 8LL * MSEC_PER_SEC * xfer->len; |
1458 | do_div(ms, speed_hz); |
1459 | |
1460 | /* |
1461 | * Increase it twice and add 200 ms tolerance, use |
1462 | * predefined maximum in case of overflow. |
1463 | */ |
1464 | ms += ms + 200; |
1465 | if (ms > UINT_MAX) |
1466 | ms = UINT_MAX; |
1467 | |
1468 | ms = wait_for_completion_timeout(x: &ctlr->xfer_completion, |
1469 | timeout: msecs_to_jiffies(m: ms)); |
1470 | |
1471 | if (ms == 0) { |
1472 | SPI_STATISTICS_INCREMENT_FIELD(statm, timedout); |
1473 | SPI_STATISTICS_INCREMENT_FIELD(stats, timedout); |
1474 | dev_err(&msg->spi->dev, |
1475 | "SPI transfer timed out\n" ); |
1476 | return -ETIMEDOUT; |
1477 | } |
1478 | |
1479 | if (xfer->error & SPI_TRANS_FAIL_IO) |
1480 | return -EIO; |
1481 | } |
1482 | |
1483 | return 0; |
1484 | } |
1485 | |
1486 | static void _spi_transfer_delay_ns(u32 ns) |
1487 | { |
1488 | if (!ns) |
1489 | return; |
1490 | if (ns <= NSEC_PER_USEC) { |
1491 | ndelay(ns); |
1492 | } else { |
1493 | u32 us = DIV_ROUND_UP(ns, NSEC_PER_USEC); |
1494 | |
1495 | if (us <= 10) |
1496 | udelay(us); |
1497 | else |
1498 | usleep_range(min: us, max: us + DIV_ROUND_UP(us, 10)); |
1499 | } |
1500 | } |
1501 | |
1502 | int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer) |
1503 | { |
1504 | u32 delay = _delay->value; |
1505 | u32 unit = _delay->unit; |
1506 | u32 hz; |
1507 | |
1508 | if (!delay) |
1509 | return 0; |
1510 | |
1511 | switch (unit) { |
1512 | case SPI_DELAY_UNIT_USECS: |
1513 | delay *= NSEC_PER_USEC; |
1514 | break; |
1515 | case SPI_DELAY_UNIT_NSECS: |
1516 | /* Nothing to do here */ |
1517 | break; |
1518 | case SPI_DELAY_UNIT_SCK: |
1519 | /* Clock cycles need to be obtained from spi_transfer */ |
1520 | if (!xfer) |
1521 | return -EINVAL; |
1522 | /* |
1523 | * If there is unknown effective speed, approximate it |
1524 | * by underestimating with half of the requested Hz. |
1525 | */ |
1526 | hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2; |
1527 | if (!hz) |
1528 | return -EINVAL; |
1529 | |
1530 | /* Convert delay to nanoseconds */ |
1531 | delay *= DIV_ROUND_UP(NSEC_PER_SEC, hz); |
1532 | break; |
1533 | default: |
1534 | return -EINVAL; |
1535 | } |
1536 | |
1537 | return delay; |
1538 | } |
1539 | EXPORT_SYMBOL_GPL(spi_delay_to_ns); |
1540 | |
1541 | int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer) |
1542 | { |
1543 | int delay; |
1544 | |
1545 | might_sleep(); |
1546 | |
1547 | if (!_delay) |
1548 | return -EINVAL; |
1549 | |
1550 | delay = spi_delay_to_ns(_delay, xfer); |
1551 | if (delay < 0) |
1552 | return delay; |
1553 | |
1554 | _spi_transfer_delay_ns(ns: delay); |
1555 | |
1556 | return 0; |
1557 | } |
1558 | EXPORT_SYMBOL_GPL(spi_delay_exec); |
1559 | |
1560 | static void _spi_transfer_cs_change_delay(struct spi_message *msg, |
1561 | struct spi_transfer *xfer) |
1562 | { |
1563 | u32 default_delay_ns = 10 * NSEC_PER_USEC; |
1564 | u32 delay = xfer->cs_change_delay.value; |
1565 | u32 unit = xfer->cs_change_delay.unit; |
1566 | int ret; |
1567 | |
1568 | /* Return early on "fast" mode - for everything but USECS */ |
1569 | if (!delay) { |
1570 | if (unit == SPI_DELAY_UNIT_USECS) |
1571 | _spi_transfer_delay_ns(ns: default_delay_ns); |
1572 | return; |
1573 | } |
1574 | |
1575 | ret = spi_delay_exec(&xfer->cs_change_delay, xfer); |
1576 | if (ret) { |
1577 | dev_err_once(&msg->spi->dev, |
1578 | "Use of unsupported delay unit %i, using default of %luus\n" , |
1579 | unit, default_delay_ns / NSEC_PER_USEC); |
1580 | _spi_transfer_delay_ns(ns: default_delay_ns); |
1581 | } |
1582 | } |
1583 | |
1584 | void spi_transfer_cs_change_delay_exec(struct spi_message *msg, |
1585 | struct spi_transfer *xfer) |
1586 | { |
1587 | _spi_transfer_cs_change_delay(msg, xfer); |
1588 | } |
1589 | EXPORT_SYMBOL_GPL(spi_transfer_cs_change_delay_exec); |
1590 | |
1591 | /* |
1592 | * spi_transfer_one_message - Default implementation of transfer_one_message() |
1593 | * |
1594 | * This is a standard implementation of transfer_one_message() for |
1595 | * drivers which implement a transfer_one() operation. It provides |
1596 | * standard handling of delays and chip select management. |
1597 | */ |
1598 | static int spi_transfer_one_message(struct spi_controller *ctlr, |
1599 | struct spi_message *msg) |
1600 | { |
1601 | struct spi_transfer *xfer; |
1602 | bool keep_cs = false; |
1603 | int ret = 0; |
1604 | struct spi_statistics __percpu *statm = ctlr->pcpu_statistics; |
1605 | struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics; |
1606 | |
1607 | xfer = list_first_entry(&msg->transfers, struct spi_transfer, transfer_list); |
1608 | spi_set_cs(spi: msg->spi, enable: !xfer->cs_off, force: false); |
1609 | |
1610 | SPI_STATISTICS_INCREMENT_FIELD(statm, messages); |
1611 | SPI_STATISTICS_INCREMENT_FIELD(stats, messages); |
1612 | |
1613 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { |
1614 | trace_spi_transfer_start(msg, xfer); |
1615 | |
1616 | spi_statistics_add_transfer_stats(pcpu_stats: statm, xfer, ctlr); |
1617 | spi_statistics_add_transfer_stats(pcpu_stats: stats, xfer, ctlr); |
1618 | |
1619 | if (!ctlr->ptp_sts_supported) { |
1620 | xfer->ptp_sts_word_pre = 0; |
1621 | ptp_read_system_prets(sts: xfer->ptp_sts); |
1622 | } |
1623 | |
1624 | if ((xfer->tx_buf || xfer->rx_buf) && xfer->len) { |
1625 | reinit_completion(x: &ctlr->xfer_completion); |
1626 | |
1627 | fallback_pio: |
1628 | spi_dma_sync_for_device(ctlr, xfer); |
1629 | ret = ctlr->transfer_one(ctlr, msg->spi, xfer); |
1630 | if (ret < 0) { |
1631 | spi_dma_sync_for_cpu(ctlr, xfer); |
1632 | |
1633 | if (ctlr->cur_msg_mapped && |
1634 | (xfer->error & SPI_TRANS_FAIL_NO_START)) { |
1635 | __spi_unmap_msg(ctlr, msg); |
1636 | ctlr->fallback = true; |
1637 | xfer->error &= ~SPI_TRANS_FAIL_NO_START; |
1638 | goto fallback_pio; |
1639 | } |
1640 | |
1641 | SPI_STATISTICS_INCREMENT_FIELD(statm, |
1642 | errors); |
1643 | SPI_STATISTICS_INCREMENT_FIELD(stats, |
1644 | errors); |
1645 | dev_err(&msg->spi->dev, |
1646 | "SPI transfer failed: %d\n" , ret); |
1647 | goto out; |
1648 | } |
1649 | |
1650 | if (ret > 0) { |
1651 | ret = spi_transfer_wait(ctlr, msg, xfer); |
1652 | if (ret < 0) |
1653 | msg->status = ret; |
1654 | } |
1655 | |
1656 | spi_dma_sync_for_cpu(ctlr, xfer); |
1657 | } else { |
1658 | if (xfer->len) |
1659 | dev_err(&msg->spi->dev, |
1660 | "Bufferless transfer has length %u\n" , |
1661 | xfer->len); |
1662 | } |
1663 | |
1664 | if (!ctlr->ptp_sts_supported) { |
1665 | ptp_read_system_postts(sts: xfer->ptp_sts); |
1666 | xfer->ptp_sts_word_post = xfer->len; |
1667 | } |
1668 | |
1669 | trace_spi_transfer_stop(msg, xfer); |
1670 | |
1671 | if (msg->status != -EINPROGRESS) |
1672 | goto out; |
1673 | |
1674 | spi_transfer_delay_exec(t: xfer); |
1675 | |
1676 | if (xfer->cs_change) { |
1677 | if (list_is_last(list: &xfer->transfer_list, |
1678 | head: &msg->transfers)) { |
1679 | keep_cs = true; |
1680 | } else { |
1681 | if (!xfer->cs_off) |
1682 | spi_set_cs(spi: msg->spi, enable: false, force: false); |
1683 | _spi_transfer_cs_change_delay(msg, xfer); |
1684 | if (!list_next_entry(xfer, transfer_list)->cs_off) |
1685 | spi_set_cs(spi: msg->spi, enable: true, force: false); |
1686 | } |
1687 | } else if (!list_is_last(list: &xfer->transfer_list, head: &msg->transfers) && |
1688 | xfer->cs_off != list_next_entry(xfer, transfer_list)->cs_off) { |
1689 | spi_set_cs(spi: msg->spi, enable: xfer->cs_off, force: false); |
1690 | } |
1691 | |
1692 | msg->actual_length += xfer->len; |
1693 | } |
1694 | |
1695 | out: |
1696 | if (ret != 0 || !keep_cs) |
1697 | spi_set_cs(spi: msg->spi, enable: false, force: false); |
1698 | |
1699 | if (msg->status == -EINPROGRESS) |
1700 | msg->status = ret; |
1701 | |
1702 | if (msg->status && ctlr->handle_err) |
1703 | ctlr->handle_err(ctlr, msg); |
1704 | |
1705 | spi_finalize_current_message(ctlr); |
1706 | |
1707 | return ret; |
1708 | } |
1709 | |
1710 | /** |
1711 | * spi_finalize_current_transfer - report completion of a transfer |
1712 | * @ctlr: the controller reporting completion |
1713 | * |
1714 | * Called by SPI drivers using the core transfer_one_message() |
1715 | * implementation to notify it that the current interrupt driven |
1716 | * transfer has finished and the next one may be scheduled. |
1717 | */ |
1718 | void spi_finalize_current_transfer(struct spi_controller *ctlr) |
1719 | { |
1720 | complete(&ctlr->xfer_completion); |
1721 | } |
1722 | EXPORT_SYMBOL_GPL(spi_finalize_current_transfer); |
1723 | |
1724 | static void spi_idle_runtime_pm(struct spi_controller *ctlr) |
1725 | { |
1726 | if (ctlr->auto_runtime_pm) { |
1727 | pm_runtime_mark_last_busy(dev: ctlr->dev.parent); |
1728 | pm_runtime_put_autosuspend(dev: ctlr->dev.parent); |
1729 | } |
1730 | } |
1731 | |
1732 | static int __spi_pump_transfer_message(struct spi_controller *ctlr, |
1733 | struct spi_message *msg, bool was_busy) |
1734 | { |
1735 | struct spi_transfer *xfer; |
1736 | int ret; |
1737 | |
1738 | if (!was_busy && ctlr->auto_runtime_pm) { |
1739 | ret = pm_runtime_get_sync(dev: ctlr->dev.parent); |
1740 | if (ret < 0) { |
1741 | pm_runtime_put_noidle(dev: ctlr->dev.parent); |
1742 | dev_err(&ctlr->dev, "Failed to power device: %d\n" , |
1743 | ret); |
1744 | |
1745 | msg->status = ret; |
1746 | spi_finalize_current_message(ctlr); |
1747 | |
1748 | return ret; |
1749 | } |
1750 | } |
1751 | |
1752 | if (!was_busy) |
1753 | trace_spi_controller_busy(controller: ctlr); |
1754 | |
1755 | if (!was_busy && ctlr->prepare_transfer_hardware) { |
1756 | ret = ctlr->prepare_transfer_hardware(ctlr); |
1757 | if (ret) { |
1758 | dev_err(&ctlr->dev, |
1759 | "failed to prepare transfer hardware: %d\n" , |
1760 | ret); |
1761 | |
1762 | if (ctlr->auto_runtime_pm) |
1763 | pm_runtime_put(dev: ctlr->dev.parent); |
1764 | |
1765 | msg->status = ret; |
1766 | spi_finalize_current_message(ctlr); |
1767 | |
1768 | return ret; |
1769 | } |
1770 | } |
1771 | |
1772 | trace_spi_message_start(msg); |
1773 | |
1774 | if (ctlr->prepare_message) { |
1775 | ret = ctlr->prepare_message(ctlr, msg); |
1776 | if (ret) { |
1777 | dev_err(&ctlr->dev, "failed to prepare message: %d\n" , |
1778 | ret); |
1779 | msg->status = ret; |
1780 | spi_finalize_current_message(ctlr); |
1781 | return ret; |
1782 | } |
1783 | msg->prepared = true; |
1784 | } |
1785 | |
1786 | ret = spi_map_msg(ctlr, msg); |
1787 | if (ret) { |
1788 | msg->status = ret; |
1789 | spi_finalize_current_message(ctlr); |
1790 | return ret; |
1791 | } |
1792 | |
1793 | if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) { |
1794 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { |
1795 | xfer->ptp_sts_word_pre = 0; |
1796 | ptp_read_system_prets(sts: xfer->ptp_sts); |
1797 | } |
1798 | } |
1799 | |
1800 | /* |
1801 | * Drivers implementation of transfer_one_message() must arrange for |
1802 | * spi_finalize_current_message() to get called. Most drivers will do |
1803 | * this in the calling context, but some don't. For those cases, a |
1804 | * completion is used to guarantee that this function does not return |
1805 | * until spi_finalize_current_message() is done accessing |
1806 | * ctlr->cur_msg. |
1807 | * Use of the following two flags enable to opportunistically skip the |
1808 | * use of the completion since its use involves expensive spin locks. |
1809 | * In case of a race with the context that calls |
1810 | * spi_finalize_current_message() the completion will always be used, |
1811 | * due to strict ordering of these flags using barriers. |
1812 | */ |
1813 | WRITE_ONCE(ctlr->cur_msg_incomplete, true); |
1814 | WRITE_ONCE(ctlr->cur_msg_need_completion, false); |
1815 | reinit_completion(x: &ctlr->cur_msg_completion); |
1816 | smp_wmb(); /* Make these available to spi_finalize_current_message() */ |
1817 | |
1818 | ret = ctlr->transfer_one_message(ctlr, msg); |
1819 | if (ret) { |
1820 | dev_err(&ctlr->dev, |
1821 | "failed to transfer one message from queue\n" ); |
1822 | return ret; |
1823 | } |
1824 | |
1825 | WRITE_ONCE(ctlr->cur_msg_need_completion, true); |
1826 | smp_mb(); /* See spi_finalize_current_message()... */ |
1827 | if (READ_ONCE(ctlr->cur_msg_incomplete)) |
1828 | wait_for_completion(&ctlr->cur_msg_completion); |
1829 | |
1830 | return 0; |
1831 | } |
1832 | |
1833 | /** |
1834 | * __spi_pump_messages - function which processes SPI message queue |
1835 | * @ctlr: controller to process queue for |
1836 | * @in_kthread: true if we are in the context of the message pump thread |
1837 | * |
1838 | * This function checks if there is any SPI message in the queue that |
1839 | * needs processing and if so call out to the driver to initialize hardware |
1840 | * and transfer each message. |
1841 | * |
1842 | * Note that it is called both from the kthread itself and also from |
1843 | * inside spi_sync(); the queue extraction handling at the top of the |
1844 | * function should deal with this safely. |
1845 | */ |
1846 | static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread) |
1847 | { |
1848 | struct spi_message *msg; |
1849 | bool was_busy = false; |
1850 | unsigned long flags; |
1851 | int ret; |
1852 | |
1853 | /* Take the I/O mutex */ |
1854 | mutex_lock(&ctlr->io_mutex); |
1855 | |
1856 | /* Lock queue */ |
1857 | spin_lock_irqsave(&ctlr->queue_lock, flags); |
1858 | |
1859 | /* Make sure we are not already running a message */ |
1860 | if (ctlr->cur_msg) |
1861 | goto out_unlock; |
1862 | |
1863 | /* Check if the queue is idle */ |
1864 | if (list_empty(head: &ctlr->queue) || !ctlr->running) { |
1865 | if (!ctlr->busy) |
1866 | goto out_unlock; |
1867 | |
1868 | /* Defer any non-atomic teardown to the thread */ |
1869 | if (!in_kthread) { |
1870 | if (!ctlr->dummy_rx && !ctlr->dummy_tx && |
1871 | !ctlr->unprepare_transfer_hardware) { |
1872 | spi_idle_runtime_pm(ctlr); |
1873 | ctlr->busy = false; |
1874 | ctlr->queue_empty = true; |
1875 | trace_spi_controller_idle(controller: ctlr); |
1876 | } else { |
1877 | kthread_queue_work(worker: ctlr->kworker, |
1878 | work: &ctlr->pump_messages); |
1879 | } |
1880 | goto out_unlock; |
1881 | } |
1882 | |
1883 | ctlr->busy = false; |
1884 | spin_unlock_irqrestore(lock: &ctlr->queue_lock, flags); |
1885 | |
1886 | kfree(objp: ctlr->dummy_rx); |
1887 | ctlr->dummy_rx = NULL; |
1888 | kfree(objp: ctlr->dummy_tx); |
1889 | ctlr->dummy_tx = NULL; |
1890 | if (ctlr->unprepare_transfer_hardware && |
1891 | ctlr->unprepare_transfer_hardware(ctlr)) |
1892 | dev_err(&ctlr->dev, |
1893 | "failed to unprepare transfer hardware\n" ); |
1894 | spi_idle_runtime_pm(ctlr); |
1895 | trace_spi_controller_idle(controller: ctlr); |
1896 | |
1897 | spin_lock_irqsave(&ctlr->queue_lock, flags); |
1898 | ctlr->queue_empty = true; |
1899 | goto out_unlock; |
1900 | } |
1901 | |
1902 | /* Extract head of queue */ |
1903 | msg = list_first_entry(&ctlr->queue, struct spi_message, queue); |
1904 | ctlr->cur_msg = msg; |
1905 | |
1906 | list_del_init(entry: &msg->queue); |
1907 | if (ctlr->busy) |
1908 | was_busy = true; |
1909 | else |
1910 | ctlr->busy = true; |
1911 | spin_unlock_irqrestore(lock: &ctlr->queue_lock, flags); |
1912 | |
1913 | ret = __spi_pump_transfer_message(ctlr, msg, was_busy); |
1914 | kthread_queue_work(worker: ctlr->kworker, work: &ctlr->pump_messages); |
1915 | |
1916 | ctlr->cur_msg = NULL; |
1917 | ctlr->fallback = false; |
1918 | |
1919 | mutex_unlock(lock: &ctlr->io_mutex); |
1920 | |
1921 | /* Prod the scheduler in case transfer_one() was busy waiting */ |
1922 | if (!ret) |
1923 | cond_resched(); |
1924 | return; |
1925 | |
1926 | out_unlock: |
1927 | spin_unlock_irqrestore(lock: &ctlr->queue_lock, flags); |
1928 | mutex_unlock(lock: &ctlr->io_mutex); |
1929 | } |
1930 | |
1931 | /** |
1932 | * spi_pump_messages - kthread work function which processes spi message queue |
1933 | * @work: pointer to kthread work struct contained in the controller struct |
1934 | */ |
1935 | static void spi_pump_messages(struct kthread_work *work) |
1936 | { |
1937 | struct spi_controller *ctlr = |
1938 | container_of(work, struct spi_controller, pump_messages); |
1939 | |
1940 | __spi_pump_messages(ctlr, in_kthread: true); |
1941 | } |
1942 | |
1943 | /** |
1944 | * spi_take_timestamp_pre - helper to collect the beginning of the TX timestamp |
1945 | * @ctlr: Pointer to the spi_controller structure of the driver |
1946 | * @xfer: Pointer to the transfer being timestamped |
1947 | * @progress: How many words (not bytes) have been transferred so far |
1948 | * @irqs_off: If true, will disable IRQs and preemption for the duration of the |
1949 | * transfer, for less jitter in time measurement. Only compatible |
1950 | * with PIO drivers. If true, must follow up with |
1951 | * spi_take_timestamp_post or otherwise system will crash. |
1952 | * WARNING: for fully predictable results, the CPU frequency must |
1953 | * also be under control (governor). |
1954 | * |
1955 | * This is a helper for drivers to collect the beginning of the TX timestamp |
1956 | * for the requested byte from the SPI transfer. The frequency with which this |
1957 | * function must be called (once per word, once for the whole transfer, once |
1958 | * per batch of words etc) is arbitrary as long as the @tx buffer offset is |
1959 | * greater than or equal to the requested byte at the time of the call. The |
1960 | * timestamp is only taken once, at the first such call. It is assumed that |
1961 | * the driver advances its @tx buffer pointer monotonically. |
1962 | */ |
1963 | void spi_take_timestamp_pre(struct spi_controller *ctlr, |
1964 | struct spi_transfer *xfer, |
1965 | size_t progress, bool irqs_off) |
1966 | { |
1967 | if (!xfer->ptp_sts) |
1968 | return; |
1969 | |
1970 | if (xfer->timestamped) |
1971 | return; |
1972 | |
1973 | if (progress > xfer->ptp_sts_word_pre) |
1974 | return; |
1975 | |
1976 | /* Capture the resolution of the timestamp */ |
1977 | xfer->ptp_sts_word_pre = progress; |
1978 | |
1979 | if (irqs_off) { |
1980 | local_irq_save(ctlr->irq_flags); |
1981 | preempt_disable(); |
1982 | } |
1983 | |
1984 | ptp_read_system_prets(sts: xfer->ptp_sts); |
1985 | } |
1986 | EXPORT_SYMBOL_GPL(spi_take_timestamp_pre); |
1987 | |
1988 | /** |
1989 | * spi_take_timestamp_post - helper to collect the end of the TX timestamp |
1990 | * @ctlr: Pointer to the spi_controller structure of the driver |
1991 | * @xfer: Pointer to the transfer being timestamped |
1992 | * @progress: How many words (not bytes) have been transferred so far |
1993 | * @irqs_off: If true, will re-enable IRQs and preemption for the local CPU. |
1994 | * |
1995 | * This is a helper for drivers to collect the end of the TX timestamp for |
1996 | * the requested byte from the SPI transfer. Can be called with an arbitrary |
1997 | * frequency: only the first call where @tx exceeds or is equal to the |
1998 | * requested word will be timestamped. |
1999 | */ |
2000 | void spi_take_timestamp_post(struct spi_controller *ctlr, |
2001 | struct spi_transfer *xfer, |
2002 | size_t progress, bool irqs_off) |
2003 | { |
2004 | if (!xfer->ptp_sts) |
2005 | return; |
2006 | |
2007 | if (xfer->timestamped) |
2008 | return; |
2009 | |
2010 | if (progress < xfer->ptp_sts_word_post) |
2011 | return; |
2012 | |
2013 | ptp_read_system_postts(sts: xfer->ptp_sts); |
2014 | |
2015 | if (irqs_off) { |
2016 | local_irq_restore(ctlr->irq_flags); |
2017 | preempt_enable(); |
2018 | } |
2019 | |
2020 | /* Capture the resolution of the timestamp */ |
2021 | xfer->ptp_sts_word_post = progress; |
2022 | |
2023 | xfer->timestamped = 1; |
2024 | } |
2025 | EXPORT_SYMBOL_GPL(spi_take_timestamp_post); |
2026 | |
2027 | /** |
2028 | * spi_set_thread_rt - set the controller to pump at realtime priority |
2029 | * @ctlr: controller to boost priority of |
2030 | * |
2031 | * This can be called because the controller requested realtime priority |
2032 | * (by setting the ->rt value before calling spi_register_controller()) or |
2033 | * because a device on the bus said that its transfers needed realtime |
2034 | * priority. |
2035 | * |
2036 | * NOTE: at the moment if any device on a bus says it needs realtime then |
2037 | * the thread will be at realtime priority for all transfers on that |
2038 | * controller. If this eventually becomes a problem we may see if we can |
2039 | * find a way to boost the priority only temporarily during relevant |
2040 | * transfers. |
2041 | */ |
2042 | static void spi_set_thread_rt(struct spi_controller *ctlr) |
2043 | { |
2044 | dev_info(&ctlr->dev, |
2045 | "will run message pump with realtime priority\n" ); |
2046 | sched_set_fifo(p: ctlr->kworker->task); |
2047 | } |
2048 | |
2049 | static int spi_init_queue(struct spi_controller *ctlr) |
2050 | { |
2051 | ctlr->running = false; |
2052 | ctlr->busy = false; |
2053 | ctlr->queue_empty = true; |
2054 | |
2055 | ctlr->kworker = kthread_create_worker(flags: 0, namefmt: dev_name(dev: &ctlr->dev)); |
2056 | if (IS_ERR(ptr: ctlr->kworker)) { |
2057 | dev_err(&ctlr->dev, "failed to create message pump kworker\n" ); |
2058 | return PTR_ERR(ptr: ctlr->kworker); |
2059 | } |
2060 | |
2061 | kthread_init_work(&ctlr->pump_messages, spi_pump_messages); |
2062 | |
2063 | /* |
2064 | * Controller config will indicate if this controller should run the |
2065 | * message pump with high (realtime) priority to reduce the transfer |
2066 | * latency on the bus by minimising the delay between a transfer |
2067 | * request and the scheduling of the message pump thread. Without this |
2068 | * setting the message pump thread will remain at default priority. |
2069 | */ |
2070 | if (ctlr->rt) |
2071 | spi_set_thread_rt(ctlr); |
2072 | |
2073 | return 0; |
2074 | } |
2075 | |
2076 | /** |
2077 | * spi_get_next_queued_message() - called by driver to check for queued |
2078 | * messages |
2079 | * @ctlr: the controller to check for queued messages |
2080 | * |
2081 | * If there are more messages in the queue, the next message is returned from |
2082 | * this call. |
2083 | * |
2084 | * Return: the next message in the queue, else NULL if the queue is empty. |
2085 | */ |
2086 | struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr) |
2087 | { |
2088 | struct spi_message *next; |
2089 | unsigned long flags; |
2090 | |
2091 | /* Get a pointer to the next message, if any */ |
2092 | spin_lock_irqsave(&ctlr->queue_lock, flags); |
2093 | next = list_first_entry_or_null(&ctlr->queue, struct spi_message, |
2094 | queue); |
2095 | spin_unlock_irqrestore(lock: &ctlr->queue_lock, flags); |
2096 | |
2097 | return next; |
2098 | } |
2099 | EXPORT_SYMBOL_GPL(spi_get_next_queued_message); |
2100 | |
2101 | /* |
2102 | * __spi_unoptimize_message - shared implementation of spi_unoptimize_message() |
2103 | * and spi_maybe_unoptimize_message() |
2104 | * @msg: the message to unoptimize |
2105 | * |
2106 | * Peripheral drivers should use spi_unoptimize_message() and callers inside |
2107 | * core should use spi_maybe_unoptimize_message() rather than calling this |
2108 | * function directly. |
2109 | * |
2110 | * It is not valid to call this on a message that is not currently optimized. |
2111 | */ |
2112 | static void __spi_unoptimize_message(struct spi_message *msg) |
2113 | { |
2114 | struct spi_controller *ctlr = msg->spi->controller; |
2115 | |
2116 | if (ctlr->unoptimize_message) |
2117 | ctlr->unoptimize_message(msg); |
2118 | |
2119 | spi_res_release(ctlr, message: msg); |
2120 | |
2121 | msg->optimized = false; |
2122 | msg->opt_state = NULL; |
2123 | } |
2124 | |
2125 | /* |
2126 | * spi_maybe_unoptimize_message - unoptimize msg not managed by a peripheral |
2127 | * @msg: the message to unoptimize |
2128 | * |
2129 | * This function is used to unoptimize a message if and only if it was |
2130 | * optimized by the core (via spi_maybe_optimize_message()). |
2131 | */ |
2132 | static void spi_maybe_unoptimize_message(struct spi_message *msg) |
2133 | { |
2134 | if (!msg->pre_optimized && msg->optimized) |
2135 | __spi_unoptimize_message(msg); |
2136 | } |
2137 | |
2138 | /** |
2139 | * spi_finalize_current_message() - the current message is complete |
2140 | * @ctlr: the controller to return the message to |
2141 | * |
2142 | * Called by the driver to notify the core that the message in the front of the |
2143 | * queue is complete and can be removed from the queue. |
2144 | */ |
2145 | void spi_finalize_current_message(struct spi_controller *ctlr) |
2146 | { |
2147 | struct spi_transfer *xfer; |
2148 | struct spi_message *mesg; |
2149 | int ret; |
2150 | |
2151 | mesg = ctlr->cur_msg; |
2152 | |
2153 | if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) { |
2154 | list_for_each_entry(xfer, &mesg->transfers, transfer_list) { |
2155 | ptp_read_system_postts(sts: xfer->ptp_sts); |
2156 | xfer->ptp_sts_word_post = xfer->len; |
2157 | } |
2158 | } |
2159 | |
2160 | if (unlikely(ctlr->ptp_sts_supported)) |
2161 | list_for_each_entry(xfer, &mesg->transfers, transfer_list) |
2162 | WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped); |
2163 | |
2164 | spi_unmap_msg(ctlr, msg: mesg); |
2165 | |
2166 | if (mesg->prepared && ctlr->unprepare_message) { |
2167 | ret = ctlr->unprepare_message(ctlr, mesg); |
2168 | if (ret) { |
2169 | dev_err(&ctlr->dev, "failed to unprepare message: %d\n" , |
2170 | ret); |
2171 | } |
2172 | } |
2173 | |
2174 | mesg->prepared = false; |
2175 | |
2176 | spi_maybe_unoptimize_message(msg: mesg); |
2177 | |
2178 | WRITE_ONCE(ctlr->cur_msg_incomplete, false); |
2179 | smp_mb(); /* See __spi_pump_transfer_message()... */ |
2180 | if (READ_ONCE(ctlr->cur_msg_need_completion)) |
2181 | complete(&ctlr->cur_msg_completion); |
2182 | |
2183 | trace_spi_message_done(msg: mesg); |
2184 | |
2185 | mesg->state = NULL; |
2186 | if (mesg->complete) |
2187 | mesg->complete(mesg->context); |
2188 | } |
2189 | EXPORT_SYMBOL_GPL(spi_finalize_current_message); |
2190 | |
2191 | static int spi_start_queue(struct spi_controller *ctlr) |
2192 | { |
2193 | unsigned long flags; |
2194 | |
2195 | spin_lock_irqsave(&ctlr->queue_lock, flags); |
2196 | |
2197 | if (ctlr->running || ctlr->busy) { |
2198 | spin_unlock_irqrestore(lock: &ctlr->queue_lock, flags); |
2199 | return -EBUSY; |
2200 | } |
2201 | |
2202 | ctlr->running = true; |
2203 | ctlr->cur_msg = NULL; |
2204 | spin_unlock_irqrestore(lock: &ctlr->queue_lock, flags); |
2205 | |
2206 | kthread_queue_work(worker: ctlr->kworker, work: &ctlr->pump_messages); |
2207 | |
2208 | return 0; |
2209 | } |
2210 | |
2211 | static int spi_stop_queue(struct spi_controller *ctlr) |
2212 | { |
2213 | unsigned long flags; |
2214 | unsigned limit = 500; |
2215 | int ret = 0; |
2216 | |
2217 | spin_lock_irqsave(&ctlr->queue_lock, flags); |
2218 | |
2219 | /* |
2220 | * This is a bit lame, but is optimized for the common execution path. |
2221 | * A wait_queue on the ctlr->busy could be used, but then the common |
2222 | * execution path (pump_messages) would be required to call wake_up or |
2223 | * friends on every SPI message. Do this instead. |
2224 | */ |
2225 | while ((!list_empty(head: &ctlr->queue) || ctlr->busy) && limit--) { |
2226 | spin_unlock_irqrestore(lock: &ctlr->queue_lock, flags); |
2227 | usleep_range(min: 10000, max: 11000); |
2228 | spin_lock_irqsave(&ctlr->queue_lock, flags); |
2229 | } |
2230 | |
2231 | if (!list_empty(head: &ctlr->queue) || ctlr->busy) |
2232 | ret = -EBUSY; |
2233 | else |
2234 | ctlr->running = false; |
2235 | |
2236 | spin_unlock_irqrestore(lock: &ctlr->queue_lock, flags); |
2237 | |
2238 | return ret; |
2239 | } |
2240 | |
2241 | static int spi_destroy_queue(struct spi_controller *ctlr) |
2242 | { |
2243 | int ret; |
2244 | |
2245 | ret = spi_stop_queue(ctlr); |
2246 | |
2247 | /* |
2248 | * kthread_flush_worker will block until all work is done. |
2249 | * If the reason that stop_queue timed out is that the work will never |
2250 | * finish, then it does no good to call flush/stop thread, so |
2251 | * return anyway. |
2252 | */ |
2253 | if (ret) { |
2254 | dev_err(&ctlr->dev, "problem destroying queue\n" ); |
2255 | return ret; |
2256 | } |
2257 | |
2258 | kthread_destroy_worker(worker: ctlr->kworker); |
2259 | |
2260 | return 0; |
2261 | } |
2262 | |
2263 | static int __spi_queued_transfer(struct spi_device *spi, |
2264 | struct spi_message *msg, |
2265 | bool need_pump) |
2266 | { |
2267 | struct spi_controller *ctlr = spi->controller; |
2268 | unsigned long flags; |
2269 | |
2270 | spin_lock_irqsave(&ctlr->queue_lock, flags); |
2271 | |
2272 | if (!ctlr->running) { |
2273 | spin_unlock_irqrestore(lock: &ctlr->queue_lock, flags); |
2274 | return -ESHUTDOWN; |
2275 | } |
2276 | msg->actual_length = 0; |
2277 | msg->status = -EINPROGRESS; |
2278 | |
2279 | list_add_tail(new: &msg->queue, head: &ctlr->queue); |
2280 | ctlr->queue_empty = false; |
2281 | if (!ctlr->busy && need_pump) |
2282 | kthread_queue_work(worker: ctlr->kworker, work: &ctlr->pump_messages); |
2283 | |
2284 | spin_unlock_irqrestore(lock: &ctlr->queue_lock, flags); |
2285 | return 0; |
2286 | } |
2287 | |
2288 | /** |
2289 | * spi_queued_transfer - transfer function for queued transfers |
2290 | * @spi: SPI device which is requesting transfer |
2291 | * @msg: SPI message which is to handled is queued to driver queue |
2292 | * |
2293 | * Return: zero on success, else a negative error code. |
2294 | */ |
2295 | static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg) |
2296 | { |
2297 | return __spi_queued_transfer(spi, msg, need_pump: true); |
2298 | } |
2299 | |
2300 | static int spi_controller_initialize_queue(struct spi_controller *ctlr) |
2301 | { |
2302 | int ret; |
2303 | |
2304 | ctlr->transfer = spi_queued_transfer; |
2305 | if (!ctlr->transfer_one_message) |
2306 | ctlr->transfer_one_message = spi_transfer_one_message; |
2307 | |
2308 | /* Initialize and start queue */ |
2309 | ret = spi_init_queue(ctlr); |
2310 | if (ret) { |
2311 | dev_err(&ctlr->dev, "problem initializing queue\n" ); |
2312 | goto err_init_queue; |
2313 | } |
2314 | ctlr->queued = true; |
2315 | ret = spi_start_queue(ctlr); |
2316 | if (ret) { |
2317 | dev_err(&ctlr->dev, "problem starting queue\n" ); |
2318 | goto err_start_queue; |
2319 | } |
2320 | |
2321 | return 0; |
2322 | |
2323 | err_start_queue: |
2324 | spi_destroy_queue(ctlr); |
2325 | err_init_queue: |
2326 | return ret; |
2327 | } |
2328 | |
2329 | /** |
2330 | * spi_flush_queue - Send all pending messages in the queue from the callers' |
2331 | * context |
2332 | * @ctlr: controller to process queue for |
2333 | * |
2334 | * This should be used when one wants to ensure all pending messages have been |
2335 | * sent before doing something. Is used by the spi-mem code to make sure SPI |
2336 | * memory operations do not preempt regular SPI transfers that have been queued |
2337 | * before the spi-mem operation. |
2338 | */ |
2339 | void spi_flush_queue(struct spi_controller *ctlr) |
2340 | { |
2341 | if (ctlr->transfer == spi_queued_transfer) |
2342 | __spi_pump_messages(ctlr, in_kthread: false); |
2343 | } |
2344 | |
2345 | /*-------------------------------------------------------------------------*/ |
2346 | |
2347 | #if defined(CONFIG_OF) |
2348 | static void of_spi_parse_dt_cs_delay(struct device_node *nc, |
2349 | struct spi_delay *delay, const char *prop) |
2350 | { |
2351 | u32 value; |
2352 | |
2353 | if (!of_property_read_u32(np: nc, propname: prop, out_value: &value)) { |
2354 | if (value > U16_MAX) { |
2355 | delay->value = DIV_ROUND_UP(value, 1000); |
2356 | delay->unit = SPI_DELAY_UNIT_USECS; |
2357 | } else { |
2358 | delay->value = value; |
2359 | delay->unit = SPI_DELAY_UNIT_NSECS; |
2360 | } |
2361 | } |
2362 | } |
2363 | |
2364 | static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi, |
2365 | struct device_node *nc) |
2366 | { |
2367 | u32 value, cs[SPI_CS_CNT_MAX]; |
2368 | int rc, idx; |
2369 | |
2370 | /* Mode (clock phase/polarity/etc.) */ |
2371 | if (of_property_read_bool(np: nc, propname: "spi-cpha" )) |
2372 | spi->mode |= SPI_CPHA; |
2373 | if (of_property_read_bool(np: nc, propname: "spi-cpol" )) |
2374 | spi->mode |= SPI_CPOL; |
2375 | if (of_property_read_bool(np: nc, propname: "spi-3wire" )) |
2376 | spi->mode |= SPI_3WIRE; |
2377 | if (of_property_read_bool(np: nc, propname: "spi-lsb-first" )) |
2378 | spi->mode |= SPI_LSB_FIRST; |
2379 | if (of_property_read_bool(np: nc, propname: "spi-cs-high" )) |
2380 | spi->mode |= SPI_CS_HIGH; |
2381 | |
2382 | /* Device DUAL/QUAD mode */ |
2383 | if (!of_property_read_u32(np: nc, propname: "spi-tx-bus-width" , out_value: &value)) { |
2384 | switch (value) { |
2385 | case 0: |
2386 | spi->mode |= SPI_NO_TX; |
2387 | break; |
2388 | case 1: |
2389 | break; |
2390 | case 2: |
2391 | spi->mode |= SPI_TX_DUAL; |
2392 | break; |
2393 | case 4: |
2394 | spi->mode |= SPI_TX_QUAD; |
2395 | break; |
2396 | case 8: |
2397 | spi->mode |= SPI_TX_OCTAL; |
2398 | break; |
2399 | default: |
2400 | dev_warn(&ctlr->dev, |
2401 | "spi-tx-bus-width %d not supported\n" , |
2402 | value); |
2403 | break; |
2404 | } |
2405 | } |
2406 | |
2407 | if (!of_property_read_u32(np: nc, propname: "spi-rx-bus-width" , out_value: &value)) { |
2408 | switch (value) { |
2409 | case 0: |
2410 | spi->mode |= SPI_NO_RX; |
2411 | break; |
2412 | case 1: |
2413 | break; |
2414 | case 2: |
2415 | spi->mode |= SPI_RX_DUAL; |
2416 | break; |
2417 | case 4: |
2418 | spi->mode |= SPI_RX_QUAD; |
2419 | break; |
2420 | case 8: |
2421 | spi->mode |= SPI_RX_OCTAL; |
2422 | break; |
2423 | default: |
2424 | dev_warn(&ctlr->dev, |
2425 | "spi-rx-bus-width %d not supported\n" , |
2426 | value); |
2427 | break; |
2428 | } |
2429 | } |
2430 | |
2431 | if (spi_controller_is_slave(ctlr)) { |
2432 | if (!of_node_name_eq(np: nc, name: "slave" )) { |
2433 | dev_err(&ctlr->dev, "%pOF is not called 'slave'\n" , |
2434 | nc); |
2435 | return -EINVAL; |
2436 | } |
2437 | return 0; |
2438 | } |
2439 | |
2440 | if (ctlr->num_chipselect > SPI_CS_CNT_MAX) { |
2441 | dev_err(&ctlr->dev, "No. of CS is more than max. no. of supported CS\n" ); |
2442 | return -EINVAL; |
2443 | } |
2444 | |
2445 | spi_set_all_cs_unused(spi); |
2446 | |
2447 | /* Device address */ |
2448 | rc = of_property_read_variable_u32_array(np: nc, propname: "reg" , out_values: &cs[0], sz_min: 1, |
2449 | SPI_CS_CNT_MAX); |
2450 | if (rc < 0) { |
2451 | dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n" , |
2452 | nc, rc); |
2453 | return rc; |
2454 | } |
2455 | if (rc > ctlr->num_chipselect) { |
2456 | dev_err(&ctlr->dev, "%pOF has number of CS > ctlr->num_chipselect (%d)\n" , |
2457 | nc, rc); |
2458 | return rc; |
2459 | } |
2460 | if ((of_property_read_bool(np: nc, propname: "parallel-memories" )) && |
2461 | (!(ctlr->flags & SPI_CONTROLLER_MULTI_CS))) { |
2462 | dev_err(&ctlr->dev, "SPI controller doesn't support multi CS\n" ); |
2463 | return -EINVAL; |
2464 | } |
2465 | for (idx = 0; idx < rc; idx++) |
2466 | spi_set_chipselect(spi, idx, chipselect: cs[idx]); |
2467 | |
2468 | /* |
2469 | * By default spi->chip_select[0] will hold the physical CS number, |
2470 | * so set bit 0 in spi->cs_index_mask. |
2471 | */ |
2472 | spi->cs_index_mask = BIT(0); |
2473 | |
2474 | /* Device speed */ |
2475 | if (!of_property_read_u32(np: nc, propname: "spi-max-frequency" , out_value: &value)) |
2476 | spi->max_speed_hz = value; |
2477 | |
2478 | /* Device CS delays */ |
2479 | of_spi_parse_dt_cs_delay(nc, delay: &spi->cs_setup, prop: "spi-cs-setup-delay-ns" ); |
2480 | of_spi_parse_dt_cs_delay(nc, delay: &spi->cs_hold, prop: "spi-cs-hold-delay-ns" ); |
2481 | of_spi_parse_dt_cs_delay(nc, delay: &spi->cs_inactive, prop: "spi-cs-inactive-delay-ns" ); |
2482 | |
2483 | return 0; |
2484 | } |
2485 | |
2486 | static struct spi_device * |
2487 | of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc) |
2488 | { |
2489 | struct spi_device *spi; |
2490 | int rc; |
2491 | |
2492 | /* Alloc an spi_device */ |
2493 | spi = spi_alloc_device(ctlr); |
2494 | if (!spi) { |
2495 | dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n" , nc); |
2496 | rc = -ENOMEM; |
2497 | goto err_out; |
2498 | } |
2499 | |
2500 | /* Select device driver */ |
2501 | rc = of_alias_from_compatible(node: nc, alias: spi->modalias, |
2502 | len: sizeof(spi->modalias)); |
2503 | if (rc < 0) { |
2504 | dev_err(&ctlr->dev, "cannot find modalias for %pOF\n" , nc); |
2505 | goto err_out; |
2506 | } |
2507 | |
2508 | rc = of_spi_parse_dt(ctlr, spi, nc); |
2509 | if (rc) |
2510 | goto err_out; |
2511 | |
2512 | /* Store a pointer to the node in the device structure */ |
2513 | of_node_get(node: nc); |
2514 | |
2515 | device_set_node(dev: &spi->dev, of_fwnode_handle(nc)); |
2516 | |
2517 | /* Register the new device */ |
2518 | rc = spi_add_device(spi); |
2519 | if (rc) { |
2520 | dev_err(&ctlr->dev, "spi_device register error %pOF\n" , nc); |
2521 | goto err_of_node_put; |
2522 | } |
2523 | |
2524 | return spi; |
2525 | |
2526 | err_of_node_put: |
2527 | of_node_put(node: nc); |
2528 | err_out: |
2529 | spi_dev_put(spi); |
2530 | return ERR_PTR(error: rc); |
2531 | } |
2532 | |
2533 | /** |
2534 | * of_register_spi_devices() - Register child devices onto the SPI bus |
2535 | * @ctlr: Pointer to spi_controller device |
2536 | * |
2537 | * Registers an spi_device for each child node of controller node which |
2538 | * represents a valid SPI slave. |
2539 | */ |
2540 | static void of_register_spi_devices(struct spi_controller *ctlr) |
2541 | { |
2542 | struct spi_device *spi; |
2543 | struct device_node *nc; |
2544 | |
2545 | for_each_available_child_of_node(ctlr->dev.of_node, nc) { |
2546 | if (of_node_test_and_set_flag(n: nc, OF_POPULATED)) |
2547 | continue; |
2548 | spi = of_register_spi_device(ctlr, nc); |
2549 | if (IS_ERR(ptr: spi)) { |
2550 | dev_warn(&ctlr->dev, |
2551 | "Failed to create SPI device for %pOF\n" , nc); |
2552 | of_node_clear_flag(n: nc, OF_POPULATED); |
2553 | } |
2554 | } |
2555 | } |
2556 | #else |
2557 | static void of_register_spi_devices(struct spi_controller *ctlr) { } |
2558 | #endif |
2559 | |
2560 | /** |
2561 | * spi_new_ancillary_device() - Register ancillary SPI device |
2562 | * @spi: Pointer to the main SPI device registering the ancillary device |
2563 | * @chip_select: Chip Select of the ancillary device |
2564 | * |
2565 | * Register an ancillary SPI device; for example some chips have a chip-select |
2566 | * for normal device usage and another one for setup/firmware upload. |
2567 | * |
2568 | * This may only be called from main SPI device's probe routine. |
2569 | * |
2570 | * Return: 0 on success; negative errno on failure |
2571 | */ |
2572 | struct spi_device *spi_new_ancillary_device(struct spi_device *spi, |
2573 | u8 chip_select) |
2574 | { |
2575 | struct spi_controller *ctlr = spi->controller; |
2576 | struct spi_device *ancillary; |
2577 | int rc = 0; |
2578 | |
2579 | /* Alloc an spi_device */ |
2580 | ancillary = spi_alloc_device(ctlr); |
2581 | if (!ancillary) { |
2582 | rc = -ENOMEM; |
2583 | goto err_out; |
2584 | } |
2585 | |
2586 | strscpy(ancillary->modalias, "dummy" , sizeof(ancillary->modalias)); |
2587 | |
2588 | /* Use provided chip-select for ancillary device */ |
2589 | spi_set_all_cs_unused(spi: ancillary); |
2590 | spi_set_chipselect(spi: ancillary, idx: 0, chipselect: chip_select); |
2591 | |
2592 | /* Take over SPI mode/speed from SPI main device */ |
2593 | ancillary->max_speed_hz = spi->max_speed_hz; |
2594 | ancillary->mode = spi->mode; |
2595 | /* |
2596 | * By default spi->chip_select[0] will hold the physical CS number, |
2597 | * so set bit 0 in spi->cs_index_mask. |
2598 | */ |
2599 | ancillary->cs_index_mask = BIT(0); |
2600 | |
2601 | WARN_ON(!mutex_is_locked(&ctlr->add_lock)); |
2602 | |
2603 | /* Register the new device */ |
2604 | rc = __spi_add_device(spi: ancillary); |
2605 | if (rc) { |
2606 | dev_err(&spi->dev, "failed to register ancillary device\n" ); |
2607 | goto err_out; |
2608 | } |
2609 | |
2610 | return ancillary; |
2611 | |
2612 | err_out: |
2613 | spi_dev_put(spi: ancillary); |
2614 | return ERR_PTR(error: rc); |
2615 | } |
2616 | EXPORT_SYMBOL_GPL(spi_new_ancillary_device); |
2617 | |
2618 | #ifdef CONFIG_ACPI |
2619 | struct acpi_spi_lookup { |
2620 | struct spi_controller *ctlr; |
2621 | u32 max_speed_hz; |
2622 | u32 mode; |
2623 | int irq; |
2624 | u8 bits_per_word; |
2625 | u8 chip_select; |
2626 | int n; |
2627 | int index; |
2628 | }; |
2629 | |
2630 | static int acpi_spi_count(struct acpi_resource *ares, void *data) |
2631 | { |
2632 | struct acpi_resource_spi_serialbus *sb; |
2633 | int *count = data; |
2634 | |
2635 | if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS) |
2636 | return 1; |
2637 | |
2638 | sb = &ares->data.spi_serial_bus; |
2639 | if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_SPI) |
2640 | return 1; |
2641 | |
2642 | *count = *count + 1; |
2643 | |
2644 | return 1; |
2645 | } |
2646 | |
2647 | /** |
2648 | * acpi_spi_count_resources - Count the number of SpiSerialBus resources |
2649 | * @adev: ACPI device |
2650 | * |
2651 | * Return: the number of SpiSerialBus resources in the ACPI-device's |
2652 | * resource-list; or a negative error code. |
2653 | */ |
2654 | int acpi_spi_count_resources(struct acpi_device *adev) |
2655 | { |
2656 | LIST_HEAD(r); |
2657 | int count = 0; |
2658 | int ret; |
2659 | |
2660 | ret = acpi_dev_get_resources(adev, list: &r, preproc: acpi_spi_count, preproc_data: &count); |
2661 | if (ret < 0) |
2662 | return ret; |
2663 | |
2664 | acpi_dev_free_resource_list(list: &r); |
2665 | |
2666 | return count; |
2667 | } |
2668 | EXPORT_SYMBOL_GPL(acpi_spi_count_resources); |
2669 | |
2670 | static void acpi_spi_parse_apple_properties(struct acpi_device *dev, |
2671 | struct acpi_spi_lookup *lookup) |
2672 | { |
2673 | const union acpi_object *obj; |
2674 | |
2675 | if (!x86_apple_machine) |
2676 | return; |
2677 | |
2678 | if (!acpi_dev_get_property(adev: dev, name: "spiSclkPeriod" , ACPI_TYPE_BUFFER, obj: &obj) |
2679 | && obj->buffer.length >= 4) |
2680 | lookup->max_speed_hz = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer; |
2681 | |
2682 | if (!acpi_dev_get_property(adev: dev, name: "spiWordSize" , ACPI_TYPE_BUFFER, obj: &obj) |
2683 | && obj->buffer.length == 8) |
2684 | lookup->bits_per_word = *(u64 *)obj->buffer.pointer; |
2685 | |
2686 | if (!acpi_dev_get_property(adev: dev, name: "spiBitOrder" , ACPI_TYPE_BUFFER, obj: &obj) |
2687 | && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer) |
2688 | lookup->mode |= SPI_LSB_FIRST; |
2689 | |
2690 | if (!acpi_dev_get_property(adev: dev, name: "spiSPO" , ACPI_TYPE_BUFFER, obj: &obj) |
2691 | && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer) |
2692 | lookup->mode |= SPI_CPOL; |
2693 | |
2694 | if (!acpi_dev_get_property(adev: dev, name: "spiSPH" , ACPI_TYPE_BUFFER, obj: &obj) |
2695 | && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer) |
2696 | lookup->mode |= SPI_CPHA; |
2697 | } |
2698 | |
2699 | static int acpi_spi_add_resource(struct acpi_resource *ares, void *data) |
2700 | { |
2701 | struct acpi_spi_lookup *lookup = data; |
2702 | struct spi_controller *ctlr = lookup->ctlr; |
2703 | |
2704 | if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) { |
2705 | struct acpi_resource_spi_serialbus *sb; |
2706 | acpi_handle parent_handle; |
2707 | acpi_status status; |
2708 | |
2709 | sb = &ares->data.spi_serial_bus; |
2710 | if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) { |
2711 | |
2712 | if (lookup->index != -1 && lookup->n++ != lookup->index) |
2713 | return 1; |
2714 | |
2715 | status = acpi_get_handle(NULL, |
2716 | pathname: sb->resource_source.string_ptr, |
2717 | ret_handle: &parent_handle); |
2718 | |
2719 | if (ACPI_FAILURE(status)) |
2720 | return -ENODEV; |
2721 | |
2722 | if (ctlr) { |
2723 | if (ACPI_HANDLE(ctlr->dev.parent) != parent_handle) |
2724 | return -ENODEV; |
2725 | } else { |
2726 | struct acpi_device *adev; |
2727 | |
2728 | adev = acpi_fetch_acpi_dev(handle: parent_handle); |
2729 | if (!adev) |
2730 | return -ENODEV; |
2731 | |
2732 | ctlr = acpi_spi_find_controller_by_adev(adev); |
2733 | if (!ctlr) |
2734 | return -EPROBE_DEFER; |
2735 | |
2736 | lookup->ctlr = ctlr; |
2737 | } |
2738 | |
2739 | /* |
2740 | * ACPI DeviceSelection numbering is handled by the |
2741 | * host controller driver in Windows and can vary |
2742 | * from driver to driver. In Linux we always expect |
2743 | * 0 .. max - 1 so we need to ask the driver to |
2744 | * translate between the two schemes. |
2745 | */ |
2746 | if (ctlr->fw_translate_cs) { |
2747 | int cs = ctlr->fw_translate_cs(ctlr, |
2748 | sb->device_selection); |
2749 | if (cs < 0) |
2750 | return cs; |
2751 | lookup->chip_select = cs; |
2752 | } else { |
2753 | lookup->chip_select = sb->device_selection; |
2754 | } |
2755 | |
2756 | lookup->max_speed_hz = sb->connection_speed; |
2757 | lookup->bits_per_word = sb->data_bit_length; |
2758 | |
2759 | if (sb->clock_phase == ACPI_SPI_SECOND_PHASE) |
2760 | lookup->mode |= SPI_CPHA; |
2761 | if (sb->clock_polarity == ACPI_SPI_START_HIGH) |
2762 | lookup->mode |= SPI_CPOL; |
2763 | if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH) |
2764 | lookup->mode |= SPI_CS_HIGH; |
2765 | } |
2766 | } else if (lookup->irq < 0) { |
2767 | struct resource r; |
2768 | |
2769 | if (acpi_dev_resource_interrupt(ares, index: 0, res: &r)) |
2770 | lookup->irq = r.start; |
2771 | } |
2772 | |
2773 | /* Always tell the ACPI core to skip this resource */ |
2774 | return 1; |
2775 | } |
2776 | |
2777 | /** |
2778 | * acpi_spi_device_alloc - Allocate a spi device, and fill it in with ACPI information |
2779 | * @ctlr: controller to which the spi device belongs |
2780 | * @adev: ACPI Device for the spi device |
2781 | * @index: Index of the spi resource inside the ACPI Node |
2782 | * |
2783 | * This should be used to allocate a new SPI device from and ACPI Device node. |
2784 | * The caller is responsible for calling spi_add_device to register the SPI device. |
2785 | * |
2786 | * If ctlr is set to NULL, the Controller for the SPI device will be looked up |
2787 | * using the resource. |
2788 | * If index is set to -1, index is not used. |
2789 | * Note: If index is -1, ctlr must be set. |
2790 | * |
2791 | * Return: a pointer to the new device, or ERR_PTR on error. |
2792 | */ |
2793 | struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr, |
2794 | struct acpi_device *adev, |
2795 | int index) |
2796 | { |
2797 | acpi_handle parent_handle = NULL; |
2798 | struct list_head resource_list; |
2799 | struct acpi_spi_lookup lookup = {}; |
2800 | struct spi_device *spi; |
2801 | int ret; |
2802 | |
2803 | if (!ctlr && index == -1) |
2804 | return ERR_PTR(error: -EINVAL); |
2805 | |
2806 | lookup.ctlr = ctlr; |
2807 | lookup.irq = -1; |
2808 | lookup.index = index; |
2809 | lookup.n = 0; |
2810 | |
2811 | INIT_LIST_HEAD(list: &resource_list); |
2812 | ret = acpi_dev_get_resources(adev, list: &resource_list, |
2813 | preproc: acpi_spi_add_resource, preproc_data: &lookup); |
2814 | acpi_dev_free_resource_list(list: &resource_list); |
2815 | |
2816 | if (ret < 0) |
2817 | /* Found SPI in _CRS but it points to another controller */ |
2818 | return ERR_PTR(error: ret); |
2819 | |
2820 | if (!lookup.max_speed_hz && |
2821 | ACPI_SUCCESS(acpi_get_parent(adev->handle, &parent_handle)) && |
2822 | ACPI_HANDLE(lookup.ctlr->dev.parent) == parent_handle) { |
2823 | /* Apple does not use _CRS but nested devices for SPI slaves */ |
2824 | acpi_spi_parse_apple_properties(dev: adev, lookup: &lookup); |
2825 | } |
2826 | |
2827 | if (!lookup.max_speed_hz) |
2828 | return ERR_PTR(error: -ENODEV); |
2829 | |
2830 | spi = spi_alloc_device(lookup.ctlr); |
2831 | if (!spi) { |
2832 | dev_err(&lookup.ctlr->dev, "failed to allocate SPI device for %s\n" , |
2833 | dev_name(&adev->dev)); |
2834 | return ERR_PTR(error: -ENOMEM); |
2835 | } |
2836 | |
2837 | spi_set_all_cs_unused(spi); |
2838 | spi_set_chipselect(spi, idx: 0, chipselect: lookup.chip_select); |
2839 | |
2840 | ACPI_COMPANION_SET(&spi->dev, adev); |
2841 | spi->max_speed_hz = lookup.max_speed_hz; |
2842 | spi->mode |= lookup.mode; |
2843 | spi->irq = lookup.irq; |
2844 | spi->bits_per_word = lookup.bits_per_word; |
2845 | /* |
2846 | * By default spi->chip_select[0] will hold the physical CS number, |
2847 | * so set bit 0 in spi->cs_index_mask. |
2848 | */ |
2849 | spi->cs_index_mask = BIT(0); |
2850 | |
2851 | return spi; |
2852 | } |
2853 | EXPORT_SYMBOL_GPL(acpi_spi_device_alloc); |
2854 | |
2855 | static acpi_status acpi_register_spi_device(struct spi_controller *ctlr, |
2856 | struct acpi_device *adev) |
2857 | { |
2858 | struct spi_device *spi; |
2859 | |
2860 | if (acpi_bus_get_status(device: adev) || !adev->status.present || |
2861 | acpi_device_enumerated(adev)) |
2862 | return AE_OK; |
2863 | |
2864 | spi = acpi_spi_device_alloc(ctlr, adev, -1); |
2865 | if (IS_ERR(ptr: spi)) { |
2866 | if (PTR_ERR(ptr: spi) == -ENOMEM) |
2867 | return AE_NO_MEMORY; |
2868 | else |
2869 | return AE_OK; |
2870 | } |
2871 | |
2872 | acpi_set_modalias(adev, default_id: acpi_device_hid(device: adev), modalias: spi->modalias, |
2873 | len: sizeof(spi->modalias)); |
2874 | |
2875 | if (spi->irq < 0) |
2876 | spi->irq = acpi_dev_gpio_irq_get(adev, index: 0); |
2877 | |
2878 | acpi_device_set_enumerated(adev); |
2879 | |
2880 | adev->power.flags.ignore_parent = true; |
2881 | if (spi_add_device(spi)) { |
2882 | adev->power.flags.ignore_parent = false; |
2883 | dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n" , |
2884 | dev_name(&adev->dev)); |
2885 | spi_dev_put(spi); |
2886 | } |
2887 | |
2888 | return AE_OK; |
2889 | } |
2890 | |
2891 | static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level, |
2892 | void *data, void **return_value) |
2893 | { |
2894 | struct acpi_device *adev = acpi_fetch_acpi_dev(handle); |
2895 | struct spi_controller *ctlr = data; |
2896 | |
2897 | if (!adev) |
2898 | return AE_OK; |
2899 | |
2900 | return acpi_register_spi_device(ctlr, adev); |
2901 | } |
2902 | |
2903 | #define SPI_ACPI_ENUMERATE_MAX_DEPTH 32 |
2904 | |
2905 | static void acpi_register_spi_devices(struct spi_controller *ctlr) |
2906 | { |
2907 | acpi_status status; |
2908 | acpi_handle handle; |
2909 | |
2910 | handle = ACPI_HANDLE(ctlr->dev.parent); |
2911 | if (!handle) |
2912 | return; |
2913 | |
2914 | status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, |
2915 | SPI_ACPI_ENUMERATE_MAX_DEPTH, |
2916 | descending_callback: acpi_spi_add_device, NULL, context: ctlr, NULL); |
2917 | if (ACPI_FAILURE(status)) |
2918 | dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n" ); |
2919 | } |
2920 | #else |
2921 | static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {} |
2922 | #endif /* CONFIG_ACPI */ |
2923 | |
2924 | static void spi_controller_release(struct device *dev) |
2925 | { |
2926 | struct spi_controller *ctlr; |
2927 | |
2928 | ctlr = container_of(dev, struct spi_controller, dev); |
2929 | kfree(objp: ctlr); |
2930 | } |
2931 | |
2932 | static struct class spi_master_class = { |
2933 | .name = "spi_master" , |
2934 | .dev_release = spi_controller_release, |
2935 | .dev_groups = spi_master_groups, |
2936 | }; |
2937 | |
2938 | #ifdef CONFIG_SPI_SLAVE |
2939 | /** |
2940 | * spi_slave_abort - abort the ongoing transfer request on an SPI slave |
2941 | * controller |
2942 | * @spi: device used for the current transfer |
2943 | */ |
2944 | int spi_slave_abort(struct spi_device *spi) |
2945 | { |
2946 | struct spi_controller *ctlr = spi->controller; |
2947 | |
2948 | if (spi_controller_is_slave(ctlr) && ctlr->slave_abort) |
2949 | return ctlr->slave_abort(ctlr); |
2950 | |
2951 | return -ENOTSUPP; |
2952 | } |
2953 | EXPORT_SYMBOL_GPL(spi_slave_abort); |
2954 | |
2955 | int spi_target_abort(struct spi_device *spi) |
2956 | { |
2957 | struct spi_controller *ctlr = spi->controller; |
2958 | |
2959 | if (spi_controller_is_target(ctlr) && ctlr->target_abort) |
2960 | return ctlr->target_abort(ctlr); |
2961 | |
2962 | return -ENOTSUPP; |
2963 | } |
2964 | EXPORT_SYMBOL_GPL(spi_target_abort); |
2965 | |
2966 | static ssize_t slave_show(struct device *dev, struct device_attribute *attr, |
2967 | char *buf) |
2968 | { |
2969 | struct spi_controller *ctlr = container_of(dev, struct spi_controller, |
2970 | dev); |
2971 | struct device *child; |
2972 | |
2973 | child = device_find_any_child(parent: &ctlr->dev); |
2974 | return sysfs_emit(buf, fmt: "%s\n" , child ? to_spi_device(dev: child)->modalias : NULL); |
2975 | } |
2976 | |
2977 | static ssize_t slave_store(struct device *dev, struct device_attribute *attr, |
2978 | const char *buf, size_t count) |
2979 | { |
2980 | struct spi_controller *ctlr = container_of(dev, struct spi_controller, |
2981 | dev); |
2982 | struct spi_device *spi; |
2983 | struct device *child; |
2984 | char name[32]; |
2985 | int rc; |
2986 | |
2987 | rc = sscanf(buf, "%31s" , name); |
2988 | if (rc != 1 || !name[0]) |
2989 | return -EINVAL; |
2990 | |
2991 | child = device_find_any_child(parent: &ctlr->dev); |
2992 | if (child) { |
2993 | /* Remove registered slave */ |
2994 | device_unregister(dev: child); |
2995 | put_device(dev: child); |
2996 | } |
2997 | |
2998 | if (strcmp(name, "(null)" )) { |
2999 | /* Register new slave */ |
3000 | spi = spi_alloc_device(ctlr); |
3001 | if (!spi) |
3002 | return -ENOMEM; |
3003 | |
3004 | strscpy(spi->modalias, name, sizeof(spi->modalias)); |
3005 | |
3006 | rc = spi_add_device(spi); |
3007 | if (rc) { |
3008 | spi_dev_put(spi); |
3009 | return rc; |
3010 | } |
3011 | } |
3012 | |
3013 | return count; |
3014 | } |
3015 | |
3016 | static DEVICE_ATTR_RW(slave); |
3017 | |
3018 | static struct attribute *spi_slave_attrs[] = { |
3019 | &dev_attr_slave.attr, |
3020 | NULL, |
3021 | }; |
3022 | |
3023 | static const struct attribute_group spi_slave_group = { |
3024 | .attrs = spi_slave_attrs, |
3025 | }; |
3026 | |
3027 | static const struct attribute_group *spi_slave_groups[] = { |
3028 | &spi_controller_statistics_group, |
3029 | &spi_slave_group, |
3030 | NULL, |
3031 | }; |
3032 | |
3033 | static struct class spi_slave_class = { |
3034 | .name = "spi_slave" , |
3035 | .dev_release = spi_controller_release, |
3036 | .dev_groups = spi_slave_groups, |
3037 | }; |
3038 | #else |
3039 | extern struct class spi_slave_class; /* dummy */ |
3040 | #endif |
3041 | |
3042 | /** |
3043 | * __spi_alloc_controller - allocate an SPI master or slave controller |
3044 | * @dev: the controller, possibly using the platform_bus |
3045 | * @size: how much zeroed driver-private data to allocate; the pointer to this |
3046 | * memory is in the driver_data field of the returned device, accessible |
3047 | * with spi_controller_get_devdata(); the memory is cacheline aligned; |
3048 | * drivers granting DMA access to portions of their private data need to |
3049 | * round up @size using ALIGN(size, dma_get_cache_alignment()). |
3050 | * @slave: flag indicating whether to allocate an SPI master (false) or SPI |
3051 | * slave (true) controller |
3052 | * Context: can sleep |
3053 | * |
3054 | * This call is used only by SPI controller drivers, which are the |
3055 | * only ones directly touching chip registers. It's how they allocate |
3056 | * an spi_controller structure, prior to calling spi_register_controller(). |
3057 | * |
3058 | * This must be called from context that can sleep. |
3059 | * |
3060 | * The caller is responsible for assigning the bus number and initializing the |
3061 | * controller's methods before calling spi_register_controller(); and (after |
3062 | * errors adding the device) calling spi_controller_put() to prevent a memory |
3063 | * leak. |
3064 | * |
3065 | * Return: the SPI controller structure on success, else NULL. |
3066 | */ |
3067 | struct spi_controller *__spi_alloc_controller(struct device *dev, |
3068 | unsigned int size, bool slave) |
3069 | { |
3070 | struct spi_controller *ctlr; |
3071 | size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment()); |
3072 | |
3073 | if (!dev) |
3074 | return NULL; |
3075 | |
3076 | ctlr = kzalloc(size: size + ctlr_size, GFP_KERNEL); |
3077 | if (!ctlr) |
3078 | return NULL; |
3079 | |
3080 | device_initialize(dev: &ctlr->dev); |
3081 | INIT_LIST_HEAD(list: &ctlr->queue); |
3082 | spin_lock_init(&ctlr->queue_lock); |
3083 | spin_lock_init(&ctlr->bus_lock_spinlock); |
3084 | mutex_init(&ctlr->bus_lock_mutex); |
3085 | mutex_init(&ctlr->io_mutex); |
3086 | mutex_init(&ctlr->add_lock); |
3087 | ctlr->bus_num = -1; |
3088 | ctlr->num_chipselect = 1; |
3089 | ctlr->slave = slave; |
3090 | if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave) |
3091 | ctlr->dev.class = &spi_slave_class; |
3092 | else |
3093 | ctlr->dev.class = &spi_master_class; |
3094 | ctlr->dev.parent = dev; |
3095 | pm_suspend_ignore_children(dev: &ctlr->dev, enable: true); |
3096 | spi_controller_set_devdata(ctlr, data: (void *)ctlr + ctlr_size); |
3097 | |
3098 | return ctlr; |
3099 | } |
3100 | EXPORT_SYMBOL_GPL(__spi_alloc_controller); |
3101 | |
3102 | static void devm_spi_release_controller(struct device *dev, void *ctlr) |
3103 | { |
3104 | spi_controller_put(ctlr: *(struct spi_controller **)ctlr); |
3105 | } |
3106 | |
3107 | /** |
3108 | * __devm_spi_alloc_controller - resource-managed __spi_alloc_controller() |
3109 | * @dev: physical device of SPI controller |
3110 | * @size: how much zeroed driver-private data to allocate |
3111 | * @slave: whether to allocate an SPI master (false) or SPI slave (true) |
3112 | * Context: can sleep |
3113 | * |
3114 | * Allocate an SPI controller and automatically release a reference on it |
3115 | * when @dev is unbound from its driver. Drivers are thus relieved from |
3116 | * having to call spi_controller_put(). |
3117 | * |
3118 | * The arguments to this function are identical to __spi_alloc_controller(). |
3119 | * |
3120 | * Return: the SPI controller structure on success, else NULL. |
3121 | */ |
3122 | struct spi_controller *__devm_spi_alloc_controller(struct device *dev, |
3123 | unsigned int size, |
3124 | bool slave) |
3125 | { |
3126 | struct spi_controller **ptr, *ctlr; |
3127 | |
3128 | ptr = devres_alloc(devm_spi_release_controller, sizeof(*ptr), |
3129 | GFP_KERNEL); |
3130 | if (!ptr) |
3131 | return NULL; |
3132 | |
3133 | ctlr = __spi_alloc_controller(dev, size, slave); |
3134 | if (ctlr) { |
3135 | ctlr->devm_allocated = true; |
3136 | *ptr = ctlr; |
3137 | devres_add(dev, res: ptr); |
3138 | } else { |
3139 | devres_free(res: ptr); |
3140 | } |
3141 | |
3142 | return ctlr; |
3143 | } |
3144 | EXPORT_SYMBOL_GPL(__devm_spi_alloc_controller); |
3145 | |
3146 | /** |
3147 | * spi_get_gpio_descs() - grab chip select GPIOs for the master |
3148 | * @ctlr: The SPI master to grab GPIO descriptors for |
3149 | */ |
3150 | static int spi_get_gpio_descs(struct spi_controller *ctlr) |
3151 | { |
3152 | int nb, i; |
3153 | struct gpio_desc **cs; |
3154 | struct device *dev = &ctlr->dev; |
3155 | unsigned long native_cs_mask = 0; |
3156 | unsigned int num_cs_gpios = 0; |
3157 | |
3158 | nb = gpiod_count(dev, con_id: "cs" ); |
3159 | if (nb < 0) { |
3160 | /* No GPIOs at all is fine, else return the error */ |
3161 | if (nb == -ENOENT) |
3162 | return 0; |
3163 | return nb; |
3164 | } |
3165 | |
3166 | ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect); |
3167 | |
3168 | cs = devm_kcalloc(dev, n: ctlr->num_chipselect, size: sizeof(*cs), |
3169 | GFP_KERNEL); |
3170 | if (!cs) |
3171 | return -ENOMEM; |
3172 | ctlr->cs_gpiods = cs; |
3173 | |
3174 | for (i = 0; i < nb; i++) { |
3175 | /* |
3176 | * Most chipselects are active low, the inverted |
3177 | * semantics are handled by special quirks in gpiolib, |
3178 | * so initializing them GPIOD_OUT_LOW here means |
3179 | * "unasserted", in most cases this will drive the physical |
3180 | * line high. |
3181 | */ |
3182 | cs[i] = devm_gpiod_get_index_optional(dev, con_id: "cs" , index: i, |
3183 | flags: GPIOD_OUT_LOW); |
3184 | if (IS_ERR(ptr: cs[i])) |
3185 | return PTR_ERR(ptr: cs[i]); |
3186 | |
3187 | if (cs[i]) { |
3188 | /* |
3189 | * If we find a CS GPIO, name it after the device and |
3190 | * chip select line. |
3191 | */ |
3192 | char *gpioname; |
3193 | |
3194 | gpioname = devm_kasprintf(dev, GFP_KERNEL, fmt: "%s CS%d" , |
3195 | dev_name(dev), i); |
3196 | if (!gpioname) |
3197 | return -ENOMEM; |
3198 | gpiod_set_consumer_name(desc: cs[i], name: gpioname); |
3199 | num_cs_gpios++; |
3200 | continue; |
3201 | } |
3202 | |
3203 | if (ctlr->max_native_cs && i >= ctlr->max_native_cs) { |
3204 | dev_err(dev, "Invalid native chip select %d\n" , i); |
3205 | return -EINVAL; |
3206 | } |
3207 | native_cs_mask |= BIT(i); |
3208 | } |
3209 | |
3210 | ctlr->unused_native_cs = ffs(~native_cs_mask) - 1; |
3211 | |
3212 | if ((ctlr->flags & SPI_CONTROLLER_GPIO_SS) && num_cs_gpios && |
3213 | ctlr->max_native_cs && ctlr->unused_native_cs >= ctlr->max_native_cs) { |
3214 | dev_err(dev, "No unused native chip select available\n" ); |
3215 | return -EINVAL; |
3216 | } |
3217 | |
3218 | return 0; |
3219 | } |
3220 | |
3221 | static int spi_controller_check_ops(struct spi_controller *ctlr) |
3222 | { |
3223 | /* |
3224 | * The controller may implement only the high-level SPI-memory like |
3225 | * operations if it does not support regular SPI transfers, and this is |
3226 | * valid use case. |
3227 | * If ->mem_ops or ->mem_ops->exec_op is NULL, we request that at least |
3228 | * one of the ->transfer_xxx() method be implemented. |
3229 | */ |
3230 | if (!ctlr->mem_ops || !ctlr->mem_ops->exec_op) { |
3231 | if (!ctlr->transfer && !ctlr->transfer_one && |
3232 | !ctlr->transfer_one_message) { |
3233 | return -EINVAL; |
3234 | } |
3235 | } |
3236 | |
3237 | return 0; |
3238 | } |
3239 | |
3240 | /* Allocate dynamic bus number using Linux idr */ |
3241 | static int spi_controller_id_alloc(struct spi_controller *ctlr, int start, int end) |
3242 | { |
3243 | int id; |
3244 | |
3245 | mutex_lock(&board_lock); |
3246 | id = idr_alloc(&spi_master_idr, ptr: ctlr, start, end, GFP_KERNEL); |
3247 | mutex_unlock(lock: &board_lock); |
3248 | if (WARN(id < 0, "couldn't get idr" )) |
3249 | return id == -ENOSPC ? -EBUSY : id; |
3250 | ctlr->bus_num = id; |
3251 | return 0; |
3252 | } |
3253 | |
3254 | /** |
3255 | * spi_register_controller - register SPI master or slave controller |
3256 | * @ctlr: initialized master, originally from spi_alloc_master() or |
3257 | * spi_alloc_slave() |
3258 | * Context: can sleep |
3259 | * |
3260 | * SPI controllers connect to their drivers using some non-SPI bus, |
3261 | * such as the platform bus. The final stage of probe() in that code |
3262 | * includes calling spi_register_controller() to hook up to this SPI bus glue. |
3263 | * |
3264 | * SPI controllers use board specific (often SOC specific) bus numbers, |
3265 | * and board-specific addressing for SPI devices combines those numbers |
3266 | * with chip select numbers. Since SPI does not directly support dynamic |
3267 | * device identification, boards need configuration tables telling which |
3268 | * chip is at which address. |
3269 | * |
3270 | * This must be called from context that can sleep. It returns zero on |
3271 | * success, else a negative error code (dropping the controller's refcount). |
3272 | * After a successful return, the caller is responsible for calling |
3273 | * spi_unregister_controller(). |
3274 | * |
3275 | * Return: zero on success, else a negative error code. |
3276 | */ |
3277 | int spi_register_controller(struct spi_controller *ctlr) |
3278 | { |
3279 | struct device *dev = ctlr->dev.parent; |
3280 | struct boardinfo *bi; |
3281 | int first_dynamic; |
3282 | int status; |
3283 | int idx; |
3284 | |
3285 | if (!dev) |
3286 | return -ENODEV; |
3287 | |
3288 | /* |
3289 | * Make sure all necessary hooks are implemented before registering |
3290 | * the SPI controller. |
3291 | */ |
3292 | status = spi_controller_check_ops(ctlr); |
3293 | if (status) |
3294 | return status; |
3295 | |
3296 | if (ctlr->bus_num < 0) |
3297 | ctlr->bus_num = of_alias_get_id(np: ctlr->dev.of_node, stem: "spi" ); |
3298 | if (ctlr->bus_num >= 0) { |
3299 | /* Devices with a fixed bus num must check-in with the num */ |
3300 | status = spi_controller_id_alloc(ctlr, start: ctlr->bus_num, end: ctlr->bus_num + 1); |
3301 | if (status) |
3302 | return status; |
3303 | } |
3304 | if (ctlr->bus_num < 0) { |
3305 | first_dynamic = of_alias_get_highest_id(stem: "spi" ); |
3306 | if (first_dynamic < 0) |
3307 | first_dynamic = 0; |
3308 | else |
3309 | first_dynamic++; |
3310 | |
3311 | status = spi_controller_id_alloc(ctlr, start: first_dynamic, end: 0); |
3312 | if (status) |
3313 | return status; |
3314 | } |
3315 | ctlr->bus_lock_flag = 0; |
3316 | init_completion(x: &ctlr->xfer_completion); |
3317 | init_completion(x: &ctlr->cur_msg_completion); |
3318 | if (!ctlr->max_dma_len) |
3319 | ctlr->max_dma_len = INT_MAX; |
3320 | |
3321 | /* |
3322 | * Register the device, then userspace will see it. |
3323 | * Registration fails if the bus ID is in use. |
3324 | */ |
3325 | dev_set_name(dev: &ctlr->dev, name: "spi%u" , ctlr->bus_num); |
3326 | |
3327 | if (!spi_controller_is_slave(ctlr) && ctlr->use_gpio_descriptors) { |
3328 | status = spi_get_gpio_descs(ctlr); |
3329 | if (status) |
3330 | goto free_bus_id; |
3331 | /* |
3332 | * A controller using GPIO descriptors always |
3333 | * supports SPI_CS_HIGH if need be. |
3334 | */ |
3335 | ctlr->mode_bits |= SPI_CS_HIGH; |
3336 | } |
3337 | |
3338 | /* |
3339 | * Even if it's just one always-selected device, there must |
3340 | * be at least one chipselect. |
3341 | */ |
3342 | if (!ctlr->num_chipselect) { |
3343 | status = -EINVAL; |
3344 | goto free_bus_id; |
3345 | } |
3346 | |
3347 | /* Setting last_cs to SPI_INVALID_CS means no chip selected */ |
3348 | for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) |
3349 | ctlr->last_cs[idx] = SPI_INVALID_CS; |
3350 | |
3351 | status = device_add(dev: &ctlr->dev); |
3352 | if (status < 0) |
3353 | goto free_bus_id; |
3354 | dev_dbg(dev, "registered %s %s\n" , |
3355 | spi_controller_is_slave(ctlr) ? "slave" : "master" , |
3356 | dev_name(&ctlr->dev)); |
3357 | |
3358 | /* |
3359 | * If we're using a queued driver, start the queue. Note that we don't |
3360 | * need the queueing logic if the driver is only supporting high-level |
3361 | * memory operations. |
3362 | */ |
3363 | if (ctlr->transfer) { |
3364 | dev_info(dev, "controller is unqueued, this is deprecated\n" ); |
3365 | } else if (ctlr->transfer_one || ctlr->transfer_one_message) { |
3366 | status = spi_controller_initialize_queue(ctlr); |
3367 | if (status) { |
3368 | device_del(dev: &ctlr->dev); |
3369 | goto free_bus_id; |
3370 | } |
3371 | } |
3372 | /* Add statistics */ |
3373 | ctlr->pcpu_statistics = spi_alloc_pcpu_stats(dev); |
3374 | if (!ctlr->pcpu_statistics) { |
3375 | dev_err(dev, "Error allocating per-cpu statistics\n" ); |
3376 | status = -ENOMEM; |
3377 | goto destroy_queue; |
3378 | } |
3379 | |
3380 | mutex_lock(&board_lock); |
3381 | list_add_tail(new: &ctlr->list, head: &spi_controller_list); |
3382 | list_for_each_entry(bi, &board_list, list) |
3383 | spi_match_controller_to_boardinfo(ctlr, bi: &bi->board_info); |
3384 | mutex_unlock(lock: &board_lock); |
3385 | |
3386 | /* Register devices from the device tree and ACPI */ |
3387 | of_register_spi_devices(ctlr); |
3388 | acpi_register_spi_devices(ctlr); |
3389 | return status; |
3390 | |
3391 | destroy_queue: |
3392 | spi_destroy_queue(ctlr); |
3393 | free_bus_id: |
3394 | mutex_lock(&board_lock); |
3395 | idr_remove(&spi_master_idr, id: ctlr->bus_num); |
3396 | mutex_unlock(lock: &board_lock); |
3397 | return status; |
3398 | } |
3399 | EXPORT_SYMBOL_GPL(spi_register_controller); |
3400 | |
3401 | static void devm_spi_unregister(struct device *dev, void *res) |
3402 | { |
3403 | spi_unregister_controller(ctlr: *(struct spi_controller **)res); |
3404 | } |
3405 | |
3406 | /** |
3407 | * devm_spi_register_controller - register managed SPI master or slave |
3408 | * controller |
3409 | * @dev: device managing SPI controller |
3410 | * @ctlr: initialized controller, originally from spi_alloc_master() or |
3411 | * spi_alloc_slave() |
3412 | * Context: can sleep |
3413 | * |
3414 | * Register a SPI device as with spi_register_controller() which will |
3415 | * automatically be unregistered and freed. |
3416 | * |
3417 | * Return: zero on success, else a negative error code. |
3418 | */ |
3419 | int devm_spi_register_controller(struct device *dev, |
3420 | struct spi_controller *ctlr) |
3421 | { |
3422 | struct spi_controller **ptr; |
3423 | int ret; |
3424 | |
3425 | ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL); |
3426 | if (!ptr) |
3427 | return -ENOMEM; |
3428 | |
3429 | ret = spi_register_controller(ctlr); |
3430 | if (!ret) { |
3431 | *ptr = ctlr; |
3432 | devres_add(dev, res: ptr); |
3433 | } else { |
3434 | devres_free(res: ptr); |
3435 | } |
3436 | |
3437 | return ret; |
3438 | } |
3439 | EXPORT_SYMBOL_GPL(devm_spi_register_controller); |
3440 | |
3441 | static int __unregister(struct device *dev, void *null) |
3442 | { |
3443 | spi_unregister_device(to_spi_device(dev)); |
3444 | return 0; |
3445 | } |
3446 | |
3447 | /** |
3448 | * spi_unregister_controller - unregister SPI master or slave controller |
3449 | * @ctlr: the controller being unregistered |
3450 | * Context: can sleep |
3451 | * |
3452 | * This call is used only by SPI controller drivers, which are the |
3453 | * only ones directly touching chip registers. |
3454 | * |
3455 | * This must be called from context that can sleep. |
3456 | * |
3457 | * Note that this function also drops a reference to the controller. |
3458 | */ |
3459 | void spi_unregister_controller(struct spi_controller *ctlr) |
3460 | { |
3461 | struct spi_controller *found; |
3462 | int id = ctlr->bus_num; |
3463 | |
3464 | /* Prevent addition of new devices, unregister existing ones */ |
3465 | if (IS_ENABLED(CONFIG_SPI_DYNAMIC)) |
3466 | mutex_lock(&ctlr->add_lock); |
3467 | |
3468 | device_for_each_child(dev: &ctlr->dev, NULL, fn: __unregister); |
3469 | |
3470 | /* First make sure that this controller was ever added */ |
3471 | mutex_lock(&board_lock); |
3472 | found = idr_find(&spi_master_idr, id); |
3473 | mutex_unlock(lock: &board_lock); |
3474 | if (ctlr->queued) { |
3475 | if (spi_destroy_queue(ctlr)) |
3476 | dev_err(&ctlr->dev, "queue remove failed\n" ); |
3477 | } |
3478 | mutex_lock(&board_lock); |
3479 | list_del(entry: &ctlr->list); |
3480 | mutex_unlock(lock: &board_lock); |
3481 | |
3482 | device_del(dev: &ctlr->dev); |
3483 | |
3484 | /* Free bus id */ |
3485 | mutex_lock(&board_lock); |
3486 | if (found == ctlr) |
3487 | idr_remove(&spi_master_idr, id); |
3488 | mutex_unlock(lock: &board_lock); |
3489 | |
3490 | if (IS_ENABLED(CONFIG_SPI_DYNAMIC)) |
3491 | mutex_unlock(lock: &ctlr->add_lock); |
3492 | |
3493 | /* |
3494 | * Release the last reference on the controller if its driver |
3495 | * has not yet been converted to devm_spi_alloc_master/slave(). |
3496 | */ |
3497 | if (!ctlr->devm_allocated) |
3498 | put_device(dev: &ctlr->dev); |
3499 | } |
3500 | EXPORT_SYMBOL_GPL(spi_unregister_controller); |
3501 | |
3502 | static inline int __spi_check_suspended(const struct spi_controller *ctlr) |
3503 | { |
3504 | return ctlr->flags & SPI_CONTROLLER_SUSPENDED ? -ESHUTDOWN : 0; |
3505 | } |
3506 | |
3507 | static inline void __spi_mark_suspended(struct spi_controller *ctlr) |
3508 | { |
3509 | mutex_lock(&ctlr->bus_lock_mutex); |
3510 | ctlr->flags |= SPI_CONTROLLER_SUSPENDED; |
3511 | mutex_unlock(lock: &ctlr->bus_lock_mutex); |
3512 | } |
3513 | |
3514 | static inline void __spi_mark_resumed(struct spi_controller *ctlr) |
3515 | { |
3516 | mutex_lock(&ctlr->bus_lock_mutex); |
3517 | ctlr->flags &= ~SPI_CONTROLLER_SUSPENDED; |
3518 | mutex_unlock(lock: &ctlr->bus_lock_mutex); |
3519 | } |
3520 | |
3521 | int spi_controller_suspend(struct spi_controller *ctlr) |
3522 | { |
3523 | int ret = 0; |
3524 | |
3525 | /* Basically no-ops for non-queued controllers */ |
3526 | if (ctlr->queued) { |
3527 | ret = spi_stop_queue(ctlr); |
3528 | if (ret) |
3529 | dev_err(&ctlr->dev, "queue stop failed\n" ); |
3530 | } |
3531 | |
3532 | __spi_mark_suspended(ctlr); |
3533 | return ret; |
3534 | } |
3535 | EXPORT_SYMBOL_GPL(spi_controller_suspend); |
3536 | |
3537 | int spi_controller_resume(struct spi_controller *ctlr) |
3538 | { |
3539 | int ret = 0; |
3540 | |
3541 | __spi_mark_resumed(ctlr); |
3542 | |
3543 | if (ctlr->queued) { |
3544 | ret = spi_start_queue(ctlr); |
3545 | if (ret) |
3546 | dev_err(&ctlr->dev, "queue restart failed\n" ); |
3547 | } |
3548 | return ret; |
3549 | } |
3550 | EXPORT_SYMBOL_GPL(spi_controller_resume); |
3551 | |
3552 | /*-------------------------------------------------------------------------*/ |
3553 | |
3554 | /* Core methods for spi_message alterations */ |
3555 | |
3556 | static void __spi_replace_transfers_release(struct spi_controller *ctlr, |
3557 | struct spi_message *msg, |
3558 | void *res) |
3559 | { |
3560 | struct spi_replaced_transfers *rxfer = res; |
3561 | size_t i; |
3562 | |
3563 | /* Call extra callback if requested */ |
3564 | if (rxfer->release) |
3565 | rxfer->release(ctlr, msg, res); |
3566 | |
3567 | /* Insert replaced transfers back into the message */ |
3568 | list_splice(list: &rxfer->replaced_transfers, head: rxfer->replaced_after); |
3569 | |
3570 | /* Remove the formerly inserted entries */ |
3571 | for (i = 0; i < rxfer->inserted; i++) |
3572 | list_del(entry: &rxfer->inserted_transfers[i].transfer_list); |
3573 | } |
3574 | |
3575 | /** |
3576 | * spi_replace_transfers - replace transfers with several transfers |
3577 | * and register change with spi_message.resources |
3578 | * @msg: the spi_message we work upon |
3579 | * @xfer_first: the first spi_transfer we want to replace |
3580 | * @remove: number of transfers to remove |
3581 | * @insert: the number of transfers we want to insert instead |
3582 | * @release: extra release code necessary in some circumstances |
3583 | * @extradatasize: extra data to allocate (with alignment guarantees |
3584 | * of struct @spi_transfer) |
3585 | * @gfp: gfp flags |
3586 | * |
3587 | * Returns: pointer to @spi_replaced_transfers, |
3588 | * PTR_ERR(...) in case of errors. |
3589 | */ |
3590 | static struct spi_replaced_transfers *spi_replace_transfers( |
3591 | struct spi_message *msg, |
3592 | struct spi_transfer *xfer_first, |
3593 | size_t remove, |
3594 | size_t insert, |
3595 | spi_replaced_release_t release, |
3596 | size_t , |
3597 | gfp_t gfp) |
3598 | { |
3599 | struct spi_replaced_transfers *rxfer; |
3600 | struct spi_transfer *xfer; |
3601 | size_t i; |
3602 | |
3603 | /* Allocate the structure using spi_res */ |
3604 | rxfer = spi_res_alloc(spi: msg->spi, release: __spi_replace_transfers_release, |
3605 | struct_size(rxfer, inserted_transfers, insert) |
3606 | + extradatasize, |
3607 | gfp); |
3608 | if (!rxfer) |
3609 | return ERR_PTR(error: -ENOMEM); |
3610 | |
3611 | /* The release code to invoke before running the generic release */ |
3612 | rxfer->release = release; |
3613 | |
3614 | /* Assign extradata */ |
3615 | if (extradatasize) |
3616 | rxfer->extradata = |
3617 | &rxfer->inserted_transfers[insert]; |
3618 | |
3619 | /* Init the replaced_transfers list */ |
3620 | INIT_LIST_HEAD(list: &rxfer->replaced_transfers); |
3621 | |
3622 | /* |
3623 | * Assign the list_entry after which we should reinsert |
3624 | * the @replaced_transfers - it may be spi_message.messages! |
3625 | */ |
3626 | rxfer->replaced_after = xfer_first->transfer_list.prev; |
3627 | |
3628 | /* Remove the requested number of transfers */ |
3629 | for (i = 0; i < remove; i++) { |
3630 | /* |
3631 | * If the entry after replaced_after it is msg->transfers |
3632 | * then we have been requested to remove more transfers |
3633 | * than are in the list. |
3634 | */ |
3635 | if (rxfer->replaced_after->next == &msg->transfers) { |
3636 | dev_err(&msg->spi->dev, |
3637 | "requested to remove more spi_transfers than are available\n" ); |
3638 | /* Insert replaced transfers back into the message */ |
3639 | list_splice(list: &rxfer->replaced_transfers, |
3640 | head: rxfer->replaced_after); |
3641 | |
3642 | /* Free the spi_replace_transfer structure... */ |
3643 | spi_res_free(res: rxfer); |
3644 | |
3645 | /* ...and return with an error */ |
3646 | return ERR_PTR(error: -EINVAL); |
3647 | } |
3648 | |
3649 | /* |
3650 | * Remove the entry after replaced_after from list of |
3651 | * transfers and add it to list of replaced_transfers. |
3652 | */ |
3653 | list_move_tail(list: rxfer->replaced_after->next, |
3654 | head: &rxfer->replaced_transfers); |
3655 | } |
3656 | |
3657 | /* |
3658 | * Create copy of the given xfer with identical settings |
3659 | * based on the first transfer to get removed. |
3660 | */ |
3661 | for (i = 0; i < insert; i++) { |
3662 | /* We need to run in reverse order */ |
3663 | xfer = &rxfer->inserted_transfers[insert - 1 - i]; |
3664 | |
3665 | /* Copy all spi_transfer data */ |
3666 | memcpy(xfer, xfer_first, sizeof(*xfer)); |
3667 | |
3668 | /* Add to list */ |
3669 | list_add(new: &xfer->transfer_list, head: rxfer->replaced_after); |
3670 | |
3671 | /* Clear cs_change and delay for all but the last */ |
3672 | if (i) { |
3673 | xfer->cs_change = false; |
3674 | xfer->delay.value = 0; |
3675 | } |
3676 | } |
3677 | |
3678 | /* Set up inserted... */ |
3679 | rxfer->inserted = insert; |
3680 | |
3681 | /* ...and register it with spi_res/spi_message */ |
3682 | spi_res_add(message: msg, res: rxfer); |
3683 | |
3684 | return rxfer; |
3685 | } |
3686 | |
3687 | static int __spi_split_transfer_maxsize(struct spi_controller *ctlr, |
3688 | struct spi_message *msg, |
3689 | struct spi_transfer **xferp, |
3690 | size_t maxsize) |
3691 | { |
3692 | struct spi_transfer *xfer = *xferp, *xfers; |
3693 | struct spi_replaced_transfers *srt; |
3694 | size_t offset; |
3695 | size_t count, i; |
3696 | |
3697 | /* Calculate how many we have to replace */ |
3698 | count = DIV_ROUND_UP(xfer->len, maxsize); |
3699 | |
3700 | /* Create replacement */ |
3701 | srt = spi_replace_transfers(msg, xfer_first: xfer, remove: 1, insert: count, NULL, extradatasize: 0, GFP_KERNEL); |
3702 | if (IS_ERR(ptr: srt)) |
3703 | return PTR_ERR(ptr: srt); |
3704 | xfers = srt->inserted_transfers; |
3705 | |
3706 | /* |
3707 | * Now handle each of those newly inserted spi_transfers. |
3708 | * Note that the replacements spi_transfers all are preset |
3709 | * to the same values as *xferp, so tx_buf, rx_buf and len |
3710 | * are all identical (as well as most others) |
3711 | * so we just have to fix up len and the pointers. |
3712 | * |
3713 | * This also includes support for the depreciated |
3714 | * spi_message.is_dma_mapped interface. |
3715 | */ |
3716 | |
3717 | /* |
3718 | * The first transfer just needs the length modified, so we |
3719 | * run it outside the loop. |
3720 | */ |
3721 | xfers[0].len = min_t(size_t, maxsize, xfer[0].len); |
3722 | |
3723 | /* All the others need rx_buf/tx_buf also set */ |
3724 | for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) { |
3725 | /* Update rx_buf, tx_buf and DMA */ |
3726 | if (xfers[i].rx_buf) |
3727 | xfers[i].rx_buf += offset; |
3728 | if (xfers[i].rx_dma) |
3729 | xfers[i].rx_dma += offset; |
3730 | if (xfers[i].tx_buf) |
3731 | xfers[i].tx_buf += offset; |
3732 | if (xfers[i].tx_dma) |
3733 | xfers[i].tx_dma += offset; |
3734 | |
3735 | /* Update length */ |
3736 | xfers[i].len = min(maxsize, xfers[i].len - offset); |
3737 | } |
3738 | |
3739 | /* |
3740 | * We set up xferp to the last entry we have inserted, |
3741 | * so that we skip those already split transfers. |
3742 | */ |
3743 | *xferp = &xfers[count - 1]; |
3744 | |
3745 | /* Increment statistics counters */ |
3746 | SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, |
3747 | transfers_split_maxsize); |
3748 | SPI_STATISTICS_INCREMENT_FIELD(msg->spi->pcpu_statistics, |
3749 | transfers_split_maxsize); |
3750 | |
3751 | return 0; |
3752 | } |
3753 | |
3754 | /** |
3755 | * spi_split_transfers_maxsize - split spi transfers into multiple transfers |
3756 | * when an individual transfer exceeds a |
3757 | * certain size |
3758 | * @ctlr: the @spi_controller for this transfer |
3759 | * @msg: the @spi_message to transform |
3760 | * @maxsize: the maximum when to apply this |
3761 | * |
3762 | * This function allocates resources that are automatically freed during the |
3763 | * spi message unoptimize phase so this function should only be called from |
3764 | * optimize_message callbacks. |
3765 | * |
3766 | * Return: status of transformation |
3767 | */ |
3768 | int spi_split_transfers_maxsize(struct spi_controller *ctlr, |
3769 | struct spi_message *msg, |
3770 | size_t maxsize) |
3771 | { |
3772 | struct spi_transfer *xfer; |
3773 | int ret; |
3774 | |
3775 | /* |
3776 | * Iterate over the transfer_list, |
3777 | * but note that xfer is advanced to the last transfer inserted |
3778 | * to avoid checking sizes again unnecessarily (also xfer does |
3779 | * potentially belong to a different list by the time the |
3780 | * replacement has happened). |
3781 | */ |
3782 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { |
3783 | if (xfer->len > maxsize) { |
3784 | ret = __spi_split_transfer_maxsize(ctlr, msg, xferp: &xfer, |
3785 | maxsize); |
3786 | if (ret) |
3787 | return ret; |
3788 | } |
3789 | } |
3790 | |
3791 | return 0; |
3792 | } |
3793 | EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize); |
3794 | |
3795 | |
3796 | /** |
3797 | * spi_split_transfers_maxwords - split SPI transfers into multiple transfers |
3798 | * when an individual transfer exceeds a |
3799 | * certain number of SPI words |
3800 | * @ctlr: the @spi_controller for this transfer |
3801 | * @msg: the @spi_message to transform |
3802 | * @maxwords: the number of words to limit each transfer to |
3803 | * |
3804 | * This function allocates resources that are automatically freed during the |
3805 | * spi message unoptimize phase so this function should only be called from |
3806 | * optimize_message callbacks. |
3807 | * |
3808 | * Return: status of transformation |
3809 | */ |
3810 | int spi_split_transfers_maxwords(struct spi_controller *ctlr, |
3811 | struct spi_message *msg, |
3812 | size_t maxwords) |
3813 | { |
3814 | struct spi_transfer *xfer; |
3815 | |
3816 | /* |
3817 | * Iterate over the transfer_list, |
3818 | * but note that xfer is advanced to the last transfer inserted |
3819 | * to avoid checking sizes again unnecessarily (also xfer does |
3820 | * potentially belong to a different list by the time the |
3821 | * replacement has happened). |
3822 | */ |
3823 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { |
3824 | size_t maxsize; |
3825 | int ret; |
3826 | |
3827 | maxsize = maxwords * roundup_pow_of_two(BITS_TO_BYTES(xfer->bits_per_word)); |
3828 | if (xfer->len > maxsize) { |
3829 | ret = __spi_split_transfer_maxsize(ctlr, msg, xferp: &xfer, |
3830 | maxsize); |
3831 | if (ret) |
3832 | return ret; |
3833 | } |
3834 | } |
3835 | |
3836 | return 0; |
3837 | } |
3838 | EXPORT_SYMBOL_GPL(spi_split_transfers_maxwords); |
3839 | |
3840 | /*-------------------------------------------------------------------------*/ |
3841 | |
3842 | /* |
3843 | * Core methods for SPI controller protocol drivers. Some of the |
3844 | * other core methods are currently defined as inline functions. |
3845 | */ |
3846 | |
3847 | static int __spi_validate_bits_per_word(struct spi_controller *ctlr, |
3848 | u8 bits_per_word) |
3849 | { |
3850 | if (ctlr->bits_per_word_mask) { |
3851 | /* Only 32 bits fit in the mask */ |
3852 | if (bits_per_word > 32) |
3853 | return -EINVAL; |
3854 | if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word))) |
3855 | return -EINVAL; |
3856 | } |
3857 | |
3858 | return 0; |
3859 | } |
3860 | |
3861 | /** |
3862 | * spi_set_cs_timing - configure CS setup, hold, and inactive delays |
3863 | * @spi: the device that requires specific CS timing configuration |
3864 | * |
3865 | * Return: zero on success, else a negative error code. |
3866 | */ |
3867 | static int spi_set_cs_timing(struct spi_device *spi) |
3868 | { |
3869 | struct device *parent = spi->controller->dev.parent; |
3870 | int status = 0; |
3871 | |
3872 | if (spi->controller->set_cs_timing && !spi_get_csgpiod(spi, idx: 0)) { |
3873 | if (spi->controller->auto_runtime_pm) { |
3874 | status = pm_runtime_get_sync(dev: parent); |
3875 | if (status < 0) { |
3876 | pm_runtime_put_noidle(dev: parent); |
3877 | dev_err(&spi->controller->dev, "Failed to power device: %d\n" , |
3878 | status); |
3879 | return status; |
3880 | } |
3881 | |
3882 | status = spi->controller->set_cs_timing(spi); |
3883 | pm_runtime_mark_last_busy(dev: parent); |
3884 | pm_runtime_put_autosuspend(dev: parent); |
3885 | } else { |
3886 | status = spi->controller->set_cs_timing(spi); |
3887 | } |
3888 | } |
3889 | return status; |
3890 | } |
3891 | |
3892 | /** |
3893 | * spi_setup - setup SPI mode and clock rate |
3894 | * @spi: the device whose settings are being modified |
3895 | * Context: can sleep, and no requests are queued to the device |
3896 | * |
3897 | * SPI protocol drivers may need to update the transfer mode if the |
3898 | * device doesn't work with its default. They may likewise need |
3899 | * to update clock rates or word sizes from initial values. This function |
3900 | * changes those settings, and must be called from a context that can sleep. |
3901 | * Except for SPI_CS_HIGH, which takes effect immediately, the changes take |
3902 | * effect the next time the device is selected and data is transferred to |
3903 | * or from it. When this function returns, the SPI device is deselected. |
3904 | * |
3905 | * Note that this call will fail if the protocol driver specifies an option |
3906 | * that the underlying controller or its driver does not support. For |
3907 | * example, not all hardware supports wire transfers using nine bit words, |
3908 | * LSB-first wire encoding, or active-high chipselects. |
3909 | * |
3910 | * Return: zero on success, else a negative error code. |
3911 | */ |
3912 | int spi_setup(struct spi_device *spi) |
3913 | { |
3914 | unsigned bad_bits, ugly_bits; |
3915 | int status = 0; |
3916 | |
3917 | /* |
3918 | * Check mode to prevent that any two of DUAL, QUAD and NO_MOSI/MISO |
3919 | * are set at the same time. |
3920 | */ |
3921 | if ((hweight_long(w: spi->mode & |
3922 | (SPI_TX_DUAL | SPI_TX_QUAD | SPI_NO_TX)) > 1) || |
3923 | (hweight_long(w: spi->mode & |
3924 | (SPI_RX_DUAL | SPI_RX_QUAD | SPI_NO_RX)) > 1)) { |
3925 | dev_err(&spi->dev, |
3926 | "setup: can not select any two of dual, quad and no-rx/tx at the same time\n" ); |
3927 | return -EINVAL; |
3928 | } |
3929 | /* If it is SPI_3WIRE mode, DUAL and QUAD should be forbidden */ |
3930 | if ((spi->mode & SPI_3WIRE) && (spi->mode & |
3931 | (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL | |
3932 | SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL))) |
3933 | return -EINVAL; |
3934 | /* |
3935 | * Help drivers fail *cleanly* when they need options |
3936 | * that aren't supported with their current controller. |
3937 | * SPI_CS_WORD has a fallback software implementation, |
3938 | * so it is ignored here. |
3939 | */ |
3940 | bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD | |
3941 | SPI_NO_TX | SPI_NO_RX); |
3942 | ugly_bits = bad_bits & |
3943 | (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL | |
3944 | SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL); |
3945 | if (ugly_bits) { |
3946 | dev_warn(&spi->dev, |
3947 | "setup: ignoring unsupported mode bits %x\n" , |
3948 | ugly_bits); |
3949 | spi->mode &= ~ugly_bits; |
3950 | bad_bits &= ~ugly_bits; |
3951 | } |
3952 | if (bad_bits) { |
3953 | dev_err(&spi->dev, "setup: unsupported mode bits %x\n" , |
3954 | bad_bits); |
3955 | return -EINVAL; |
3956 | } |
3957 | |
3958 | if (!spi->bits_per_word) { |
3959 | spi->bits_per_word = 8; |
3960 | } else { |
3961 | /* |
3962 | * Some controllers may not support the default 8 bits-per-word |
3963 | * so only perform the check when this is explicitly provided. |
3964 | */ |
3965 | status = __spi_validate_bits_per_word(ctlr: spi->controller, |
3966 | bits_per_word: spi->bits_per_word); |
3967 | if (status) |
3968 | return status; |
3969 | } |
3970 | |
3971 | if (spi->controller->max_speed_hz && |
3972 | (!spi->max_speed_hz || |
3973 | spi->max_speed_hz > spi->controller->max_speed_hz)) |
3974 | spi->max_speed_hz = spi->controller->max_speed_hz; |
3975 | |
3976 | mutex_lock(&spi->controller->io_mutex); |
3977 | |
3978 | if (spi->controller->setup) { |
3979 | status = spi->controller->setup(spi); |
3980 | if (status) { |
3981 | mutex_unlock(lock: &spi->controller->io_mutex); |
3982 | dev_err(&spi->controller->dev, "Failed to setup device: %d\n" , |
3983 | status); |
3984 | return status; |
3985 | } |
3986 | } |
3987 | |
3988 | status = spi_set_cs_timing(spi); |
3989 | if (status) { |
3990 | mutex_unlock(lock: &spi->controller->io_mutex); |
3991 | return status; |
3992 | } |
3993 | |
3994 | if (spi->controller->auto_runtime_pm && spi->controller->set_cs) { |
3995 | status = pm_runtime_resume_and_get(dev: spi->controller->dev.parent); |
3996 | if (status < 0) { |
3997 | mutex_unlock(lock: &spi->controller->io_mutex); |
3998 | dev_err(&spi->controller->dev, "Failed to power device: %d\n" , |
3999 | status); |
4000 | return status; |
4001 | } |
4002 | |
4003 | /* |
4004 | * We do not want to return positive value from pm_runtime_get, |
4005 | * there are many instances of devices calling spi_setup() and |
4006 | * checking for a non-zero return value instead of a negative |
4007 | * return value. |
4008 | */ |
4009 | status = 0; |
4010 | |
4011 | spi_set_cs(spi, enable: false, force: true); |
4012 | pm_runtime_mark_last_busy(dev: spi->controller->dev.parent); |
4013 | pm_runtime_put_autosuspend(dev: spi->controller->dev.parent); |
4014 | } else { |
4015 | spi_set_cs(spi, enable: false, force: true); |
4016 | } |
4017 | |
4018 | mutex_unlock(lock: &spi->controller->io_mutex); |
4019 | |
4020 | if (spi->rt && !spi->controller->rt) { |
4021 | spi->controller->rt = true; |
4022 | spi_set_thread_rt(ctlr: spi->controller); |
4023 | } |
4024 | |
4025 | trace_spi_setup(spi, status); |
4026 | |
4027 | dev_dbg(&spi->dev, "setup mode %lu, %s%s%s%s%u bits/w, %u Hz max --> %d\n" , |
4028 | spi->mode & SPI_MODE_X_MASK, |
4029 | (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "" , |
4030 | (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "" , |
4031 | (spi->mode & SPI_3WIRE) ? "3wire, " : "" , |
4032 | (spi->mode & SPI_LOOP) ? "loopback, " : "" , |
4033 | spi->bits_per_word, spi->max_speed_hz, |
4034 | status); |
4035 | |
4036 | return status; |
4037 | } |
4038 | EXPORT_SYMBOL_GPL(spi_setup); |
4039 | |
4040 | static int _spi_xfer_word_delay_update(struct spi_transfer *xfer, |
4041 | struct spi_device *spi) |
4042 | { |
4043 | int delay1, delay2; |
4044 | |
4045 | delay1 = spi_delay_to_ns(&xfer->word_delay, xfer); |
4046 | if (delay1 < 0) |
4047 | return delay1; |
4048 | |
4049 | delay2 = spi_delay_to_ns(&spi->word_delay, xfer); |
4050 | if (delay2 < 0) |
4051 | return delay2; |
4052 | |
4053 | if (delay1 < delay2) |
4054 | memcpy(&xfer->word_delay, &spi->word_delay, |
4055 | sizeof(xfer->word_delay)); |
4056 | |
4057 | return 0; |
4058 | } |
4059 | |
4060 | static int __spi_validate(struct spi_device *spi, struct spi_message *message) |
4061 | { |
4062 | struct spi_controller *ctlr = spi->controller; |
4063 | struct spi_transfer *xfer; |
4064 | int w_size; |
4065 | |
4066 | if (list_empty(head: &message->transfers)) |
4067 | return -EINVAL; |
4068 | |
4069 | message->spi = spi; |
4070 | |
4071 | /* |
4072 | * Half-duplex links include original MicroWire, and ones with |
4073 | * only one data pin like SPI_3WIRE (switches direction) or where |
4074 | * either MOSI or MISO is missing. They can also be caused by |
4075 | * software limitations. |
4076 | */ |
4077 | if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) || |
4078 | (spi->mode & SPI_3WIRE)) { |
4079 | unsigned flags = ctlr->flags; |
4080 | |
4081 | list_for_each_entry(xfer, &message->transfers, transfer_list) { |
4082 | if (xfer->rx_buf && xfer->tx_buf) |
4083 | return -EINVAL; |
4084 | if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf) |
4085 | return -EINVAL; |
4086 | if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf) |
4087 | return -EINVAL; |
4088 | } |
4089 | } |
4090 | |
4091 | /* |
4092 | * Set transfer bits_per_word and max speed as spi device default if |
4093 | * it is not set for this transfer. |
4094 | * Set transfer tx_nbits and rx_nbits as single transfer default |
4095 | * (SPI_NBITS_SINGLE) if it is not set for this transfer. |
4096 | * Ensure transfer word_delay is at least as long as that required by |
4097 | * device itself. |
4098 | */ |
4099 | message->frame_length = 0; |
4100 | list_for_each_entry(xfer, &message->transfers, transfer_list) { |
4101 | xfer->effective_speed_hz = 0; |
4102 | message->frame_length += xfer->len; |
4103 | if (!xfer->bits_per_word) |
4104 | xfer->bits_per_word = spi->bits_per_word; |
4105 | |
4106 | if (!xfer->speed_hz) |
4107 | xfer->speed_hz = spi->max_speed_hz; |
4108 | |
4109 | if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz) |
4110 | xfer->speed_hz = ctlr->max_speed_hz; |
4111 | |
4112 | if (__spi_validate_bits_per_word(ctlr, bits_per_word: xfer->bits_per_word)) |
4113 | return -EINVAL; |
4114 | |
4115 | /* |
4116 | * SPI transfer length should be multiple of SPI word size |
4117 | * where SPI word size should be power-of-two multiple. |
4118 | */ |
4119 | if (xfer->bits_per_word <= 8) |
4120 | w_size = 1; |
4121 | else if (xfer->bits_per_word <= 16) |
4122 | w_size = 2; |
4123 | else |
4124 | w_size = 4; |
4125 | |
4126 | /* No partial transfers accepted */ |
4127 | if (xfer->len % w_size) |
4128 | return -EINVAL; |
4129 | |
4130 | if (xfer->speed_hz && ctlr->min_speed_hz && |
4131 | xfer->speed_hz < ctlr->min_speed_hz) |
4132 | return -EINVAL; |
4133 | |
4134 | if (xfer->tx_buf && !xfer->tx_nbits) |
4135 | xfer->tx_nbits = SPI_NBITS_SINGLE; |
4136 | if (xfer->rx_buf && !xfer->rx_nbits) |
4137 | xfer->rx_nbits = SPI_NBITS_SINGLE; |
4138 | /* |
4139 | * Check transfer tx/rx_nbits: |
4140 | * 1. check the value matches one of single, dual and quad |
4141 | * 2. check tx/rx_nbits match the mode in spi_device |
4142 | */ |
4143 | if (xfer->tx_buf) { |
4144 | if (spi->mode & SPI_NO_TX) |
4145 | return -EINVAL; |
4146 | if (xfer->tx_nbits != SPI_NBITS_SINGLE && |
4147 | xfer->tx_nbits != SPI_NBITS_DUAL && |
4148 | xfer->tx_nbits != SPI_NBITS_QUAD) |
4149 | return -EINVAL; |
4150 | if ((xfer->tx_nbits == SPI_NBITS_DUAL) && |
4151 | !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD))) |
4152 | return -EINVAL; |
4153 | if ((xfer->tx_nbits == SPI_NBITS_QUAD) && |
4154 | !(spi->mode & SPI_TX_QUAD)) |
4155 | return -EINVAL; |
4156 | } |
4157 | /* Check transfer rx_nbits */ |
4158 | if (xfer->rx_buf) { |
4159 | if (spi->mode & SPI_NO_RX) |
4160 | return -EINVAL; |
4161 | if (xfer->rx_nbits != SPI_NBITS_SINGLE && |
4162 | xfer->rx_nbits != SPI_NBITS_DUAL && |
4163 | xfer->rx_nbits != SPI_NBITS_QUAD) |
4164 | return -EINVAL; |
4165 | if ((xfer->rx_nbits == SPI_NBITS_DUAL) && |
4166 | !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD))) |
4167 | return -EINVAL; |
4168 | if ((xfer->rx_nbits == SPI_NBITS_QUAD) && |
4169 | !(spi->mode & SPI_RX_QUAD)) |
4170 | return -EINVAL; |
4171 | } |
4172 | |
4173 | if (_spi_xfer_word_delay_update(xfer, spi)) |
4174 | return -EINVAL; |
4175 | } |
4176 | |
4177 | message->status = -EINPROGRESS; |
4178 | |
4179 | return 0; |
4180 | } |
4181 | |
4182 | /* |
4183 | * spi_split_transfers - generic handling of transfer splitting |
4184 | * @msg: the message to split |
4185 | * |
4186 | * Under certain conditions, a SPI controller may not support arbitrary |
4187 | * transfer sizes or other features required by a peripheral. This function |
4188 | * will split the transfers in the message into smaller transfers that are |
4189 | * supported by the controller. |
4190 | * |
4191 | * Controllers with special requirements not covered here can also split |
4192 | * transfers in the optimize_message() callback. |
4193 | * |
4194 | * Context: can sleep |
4195 | * Return: zero on success, else a negative error code |
4196 | */ |
4197 | static int spi_split_transfers(struct spi_message *msg) |
4198 | { |
4199 | struct spi_controller *ctlr = msg->spi->controller; |
4200 | struct spi_transfer *xfer; |
4201 | int ret; |
4202 | |
4203 | /* |
4204 | * If an SPI controller does not support toggling the CS line on each |
4205 | * transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO |
4206 | * for the CS line, we can emulate the CS-per-word hardware function by |
4207 | * splitting transfers into one-word transfers and ensuring that |
4208 | * cs_change is set for each transfer. |
4209 | */ |
4210 | if ((msg->spi->mode & SPI_CS_WORD) && |
4211 | (!(ctlr->mode_bits & SPI_CS_WORD) || spi_is_csgpiod(spi: msg->spi))) { |
4212 | ret = spi_split_transfers_maxwords(ctlr, msg, 1); |
4213 | if (ret) |
4214 | return ret; |
4215 | |
4216 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { |
4217 | /* Don't change cs_change on the last entry in the list */ |
4218 | if (list_is_last(list: &xfer->transfer_list, head: &msg->transfers)) |
4219 | break; |
4220 | |
4221 | xfer->cs_change = 1; |
4222 | } |
4223 | } else { |
4224 | ret = spi_split_transfers_maxsize(ctlr, msg, |
4225 | spi_max_transfer_size(spi: msg->spi)); |
4226 | if (ret) |
4227 | return ret; |
4228 | } |
4229 | |
4230 | return 0; |
4231 | } |
4232 | |
4233 | /* |
4234 | * __spi_optimize_message - shared implementation for spi_optimize_message() |
4235 | * and spi_maybe_optimize_message() |
4236 | * @spi: the device that will be used for the message |
4237 | * @msg: the message to optimize |
4238 | * |
4239 | * Peripheral drivers will call spi_optimize_message() and the spi core will |
4240 | * call spi_maybe_optimize_message() instead of calling this directly. |
4241 | * |
4242 | * It is not valid to call this on a message that has already been optimized. |
4243 | * |
4244 | * Return: zero on success, else a negative error code |
4245 | */ |
4246 | static int __spi_optimize_message(struct spi_device *spi, |
4247 | struct spi_message *msg) |
4248 | { |
4249 | struct spi_controller *ctlr = spi->controller; |
4250 | int ret; |
4251 | |
4252 | ret = __spi_validate(spi, message: msg); |
4253 | if (ret) |
4254 | return ret; |
4255 | |
4256 | ret = spi_split_transfers(msg); |
4257 | if (ret) |
4258 | return ret; |
4259 | |
4260 | if (ctlr->optimize_message) { |
4261 | ret = ctlr->optimize_message(msg); |
4262 | if (ret) { |
4263 | spi_res_release(ctlr, message: msg); |
4264 | return ret; |
4265 | } |
4266 | } |
4267 | |
4268 | msg->optimized = true; |
4269 | |
4270 | return 0; |
4271 | } |
4272 | |
4273 | /* |
4274 | * spi_maybe_optimize_message - optimize message if it isn't already pre-optimized |
4275 | * @spi: the device that will be used for the message |
4276 | * @msg: the message to optimize |
4277 | * Return: zero on success, else a negative error code |
4278 | */ |
4279 | static int spi_maybe_optimize_message(struct spi_device *spi, |
4280 | struct spi_message *msg) |
4281 | { |
4282 | if (msg->pre_optimized) |
4283 | return 0; |
4284 | |
4285 | return __spi_optimize_message(spi, msg); |
4286 | } |
4287 | |
4288 | /** |
4289 | * spi_optimize_message - do any one-time validation and setup for a SPI message |
4290 | * @spi: the device that will be used for the message |
4291 | * @msg: the message to optimize |
4292 | * |
4293 | * Peripheral drivers that reuse the same message repeatedly may call this to |
4294 | * perform as much message prep as possible once, rather than repeating it each |
4295 | * time a message transfer is performed to improve throughput and reduce CPU |
4296 | * usage. |
4297 | * |
4298 | * Once a message has been optimized, it cannot be modified with the exception |
4299 | * of updating the contents of any xfer->tx_buf (the pointer can't be changed, |
4300 | * only the data in the memory it points to). |
4301 | * |
4302 | * Calls to this function must be balanced with calls to spi_unoptimize_message() |
4303 | * to avoid leaking resources. |
4304 | * |
4305 | * Context: can sleep |
4306 | * Return: zero on success, else a negative error code |
4307 | */ |
4308 | int spi_optimize_message(struct spi_device *spi, struct spi_message *msg) |
4309 | { |
4310 | int ret; |
4311 | |
4312 | ret = __spi_optimize_message(spi, msg); |
4313 | if (ret) |
4314 | return ret; |
4315 | |
4316 | /* |
4317 | * This flag indicates that the peripheral driver called spi_optimize_message() |
4318 | * and therefore we shouldn't unoptimize message automatically when finalizing |
4319 | * the message but rather wait until spi_unoptimize_message() is called |
4320 | * by the peripheral driver. |
4321 | */ |
4322 | msg->pre_optimized = true; |
4323 | |
4324 | return 0; |
4325 | } |
4326 | EXPORT_SYMBOL_GPL(spi_optimize_message); |
4327 | |
4328 | /** |
4329 | * spi_unoptimize_message - releases any resources allocated by spi_optimize_message() |
4330 | * @msg: the message to unoptimize |
4331 | * |
4332 | * Calls to this function must be balanced with calls to spi_optimize_message(). |
4333 | * |
4334 | * Context: can sleep |
4335 | */ |
4336 | void spi_unoptimize_message(struct spi_message *msg) |
4337 | { |
4338 | __spi_unoptimize_message(msg); |
4339 | msg->pre_optimized = false; |
4340 | } |
4341 | EXPORT_SYMBOL_GPL(spi_unoptimize_message); |
4342 | |
4343 | static int __spi_async(struct spi_device *spi, struct spi_message *message) |
4344 | { |
4345 | struct spi_controller *ctlr = spi->controller; |
4346 | struct spi_transfer *xfer; |
4347 | |
4348 | /* |
4349 | * Some controllers do not support doing regular SPI transfers. Return |
4350 | * ENOTSUPP when this is the case. |
4351 | */ |
4352 | if (!ctlr->transfer) |
4353 | return -ENOTSUPP; |
4354 | |
4355 | SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_async); |
4356 | SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_async); |
4357 | |
4358 | trace_spi_message_submit(msg: message); |
4359 | |
4360 | if (!ctlr->ptp_sts_supported) { |
4361 | list_for_each_entry(xfer, &message->transfers, transfer_list) { |
4362 | xfer->ptp_sts_word_pre = 0; |
4363 | ptp_read_system_prets(sts: xfer->ptp_sts); |
4364 | } |
4365 | } |
4366 | |
4367 | return ctlr->transfer(spi, message); |
4368 | } |
4369 | |
4370 | /** |
4371 | * spi_async - asynchronous SPI transfer |
4372 | * @spi: device with which data will be exchanged |
4373 | * @message: describes the data transfers, including completion callback |
4374 | * Context: any (IRQs may be blocked, etc) |
4375 | * |
4376 | * This call may be used in_irq and other contexts which can't sleep, |
4377 | * as well as from task contexts which can sleep. |
4378 | * |
4379 | * The completion callback is invoked in a context which can't sleep. |
4380 | * Before that invocation, the value of message->status is undefined. |
4381 | * When the callback is issued, message->status holds either zero (to |
4382 | * indicate complete success) or a negative error code. After that |
4383 | * callback returns, the driver which issued the transfer request may |
4384 | * deallocate the associated memory; it's no longer in use by any SPI |
4385 | * core or controller driver code. |
4386 | * |
4387 | * Note that although all messages to a spi_device are handled in |
4388 | * FIFO order, messages may go to different devices in other orders. |
4389 | * Some device might be higher priority, or have various "hard" access |
4390 | * time requirements, for example. |
4391 | * |
4392 | * On detection of any fault during the transfer, processing of |
4393 | * the entire message is aborted, and the device is deselected. |
4394 | * Until returning from the associated message completion callback, |
4395 | * no other spi_message queued to that device will be processed. |
4396 | * (This rule applies equally to all the synchronous transfer calls, |
4397 | * which are wrappers around this core asynchronous primitive.) |
4398 | * |
4399 | * Return: zero on success, else a negative error code. |
4400 | */ |
4401 | int spi_async(struct spi_device *spi, struct spi_message *message) |
4402 | { |
4403 | struct spi_controller *ctlr = spi->controller; |
4404 | int ret; |
4405 | unsigned long flags; |
4406 | |
4407 | ret = spi_maybe_optimize_message(spi, msg: message); |
4408 | if (ret) |
4409 | return ret; |
4410 | |
4411 | spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); |
4412 | |
4413 | if (ctlr->bus_lock_flag) |
4414 | ret = -EBUSY; |
4415 | else |
4416 | ret = __spi_async(spi, message); |
4417 | |
4418 | spin_unlock_irqrestore(lock: &ctlr->bus_lock_spinlock, flags); |
4419 | |
4420 | spi_maybe_unoptimize_message(msg: message); |
4421 | |
4422 | return ret; |
4423 | } |
4424 | EXPORT_SYMBOL_GPL(spi_async); |
4425 | |
4426 | static void __spi_transfer_message_noqueue(struct spi_controller *ctlr, struct spi_message *msg) |
4427 | { |
4428 | bool was_busy; |
4429 | int ret; |
4430 | |
4431 | mutex_lock(&ctlr->io_mutex); |
4432 | |
4433 | was_busy = ctlr->busy; |
4434 | |
4435 | ctlr->cur_msg = msg; |
4436 | ret = __spi_pump_transfer_message(ctlr, msg, was_busy); |
4437 | if (ret) |
4438 | dev_err(&ctlr->dev, "noqueue transfer failed\n" ); |
4439 | ctlr->cur_msg = NULL; |
4440 | ctlr->fallback = false; |
4441 | |
4442 | if (!was_busy) { |
4443 | kfree(objp: ctlr->dummy_rx); |
4444 | ctlr->dummy_rx = NULL; |
4445 | kfree(objp: ctlr->dummy_tx); |
4446 | ctlr->dummy_tx = NULL; |
4447 | if (ctlr->unprepare_transfer_hardware && |
4448 | ctlr->unprepare_transfer_hardware(ctlr)) |
4449 | dev_err(&ctlr->dev, |
4450 | "failed to unprepare transfer hardware\n" ); |
4451 | spi_idle_runtime_pm(ctlr); |
4452 | } |
4453 | |
4454 | mutex_unlock(lock: &ctlr->io_mutex); |
4455 | } |
4456 | |
4457 | /*-------------------------------------------------------------------------*/ |
4458 | |
4459 | /* |
4460 | * Utility methods for SPI protocol drivers, layered on |
4461 | * top of the core. Some other utility methods are defined as |
4462 | * inline functions. |
4463 | */ |
4464 | |
4465 | static void spi_complete(void *arg) |
4466 | { |
4467 | complete(arg); |
4468 | } |
4469 | |
4470 | static int __spi_sync(struct spi_device *spi, struct spi_message *message) |
4471 | { |
4472 | DECLARE_COMPLETION_ONSTACK(done); |
4473 | unsigned long flags; |
4474 | int status; |
4475 | struct spi_controller *ctlr = spi->controller; |
4476 | |
4477 | if (__spi_check_suspended(ctlr)) { |
4478 | dev_warn_once(&spi->dev, "Attempted to sync while suspend\n" ); |
4479 | return -ESHUTDOWN; |
4480 | } |
4481 | |
4482 | status = spi_maybe_optimize_message(spi, msg: message); |
4483 | if (status) |
4484 | return status; |
4485 | |
4486 | SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync); |
4487 | SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync); |
4488 | |
4489 | /* |
4490 | * Checking queue_empty here only guarantees async/sync message |
4491 | * ordering when coming from the same context. It does not need to |
4492 | * guard against reentrancy from a different context. The io_mutex |
4493 | * will catch those cases. |
4494 | */ |
4495 | if (READ_ONCE(ctlr->queue_empty) && !ctlr->must_async) { |
4496 | message->actual_length = 0; |
4497 | message->status = -EINPROGRESS; |
4498 | |
4499 | trace_spi_message_submit(msg: message); |
4500 | |
4501 | SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync_immediate); |
4502 | SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync_immediate); |
4503 | |
4504 | __spi_transfer_message_noqueue(ctlr, msg: message); |
4505 | |
4506 | return message->status; |
4507 | } |
4508 | |
4509 | /* |
4510 | * There are messages in the async queue that could have originated |
4511 | * from the same context, so we need to preserve ordering. |
4512 | * Therefor we send the message to the async queue and wait until they |
4513 | * are completed. |
4514 | */ |
4515 | message->complete = spi_complete; |
4516 | message->context = &done; |
4517 | |
4518 | spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); |
4519 | status = __spi_async(spi, message); |
4520 | spin_unlock_irqrestore(lock: &ctlr->bus_lock_spinlock, flags); |
4521 | |
4522 | if (status == 0) { |
4523 | wait_for_completion(&done); |
4524 | status = message->status; |
4525 | } |
4526 | message->context = NULL; |
4527 | |
4528 | return status; |
4529 | } |
4530 | |
4531 | /** |
4532 | * spi_sync - blocking/synchronous SPI data transfers |
4533 | * @spi: device with which data will be exchanged |
4534 | * @message: describes the data transfers |
4535 | * Context: can sleep |
4536 | * |
4537 | * This call may only be used from a context that may sleep. The sleep |
4538 | * is non-interruptible, and has no timeout. Low-overhead controller |
4539 | * drivers may DMA directly into and out of the message buffers. |
4540 | * |
4541 | * Note that the SPI device's chip select is active during the message, |
4542 | * and then is normally disabled between messages. Drivers for some |
4543 | * frequently-used devices may want to minimize costs of selecting a chip, |
4544 | * by leaving it selected in anticipation that the next message will go |
4545 | * to the same chip. (That may increase power usage.) |
4546 | * |
4547 | * Also, the caller is guaranteeing that the memory associated with the |
4548 | * message will not be freed before this call returns. |
4549 | * |
4550 | * Return: zero on success, else a negative error code. |
4551 | */ |
4552 | int spi_sync(struct spi_device *spi, struct spi_message *message) |
4553 | { |
4554 | int ret; |
4555 | |
4556 | mutex_lock(&spi->controller->bus_lock_mutex); |
4557 | ret = __spi_sync(spi, message); |
4558 | mutex_unlock(lock: &spi->controller->bus_lock_mutex); |
4559 | |
4560 | return ret; |
4561 | } |
4562 | EXPORT_SYMBOL_GPL(spi_sync); |
4563 | |
4564 | /** |
4565 | * spi_sync_locked - version of spi_sync with exclusive bus usage |
4566 | * @spi: device with which data will be exchanged |
4567 | * @message: describes the data transfers |
4568 | * Context: can sleep |
4569 | * |
4570 | * This call may only be used from a context that may sleep. The sleep |
4571 | * is non-interruptible, and has no timeout. Low-overhead controller |
4572 | * drivers may DMA directly into and out of the message buffers. |
4573 | * |
4574 | * This call should be used by drivers that require exclusive access to the |
4575 | * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must |
4576 | * be released by a spi_bus_unlock call when the exclusive access is over. |
4577 | * |
4578 | * Return: zero on success, else a negative error code. |
4579 | */ |
4580 | int spi_sync_locked(struct spi_device *spi, struct spi_message *message) |
4581 | { |
4582 | return __spi_sync(spi, message); |
4583 | } |
4584 | EXPORT_SYMBOL_GPL(spi_sync_locked); |
4585 | |
4586 | /** |
4587 | * spi_bus_lock - obtain a lock for exclusive SPI bus usage |
4588 | * @ctlr: SPI bus master that should be locked for exclusive bus access |
4589 | * Context: can sleep |
4590 | * |
4591 | * This call may only be used from a context that may sleep. The sleep |
4592 | * is non-interruptible, and has no timeout. |
4593 | * |
4594 | * This call should be used by drivers that require exclusive access to the |
4595 | * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the |
4596 | * exclusive access is over. Data transfer must be done by spi_sync_locked |
4597 | * and spi_async_locked calls when the SPI bus lock is held. |
4598 | * |
4599 | * Return: always zero. |
4600 | */ |
4601 | int spi_bus_lock(struct spi_controller *ctlr) |
4602 | { |
4603 | unsigned long flags; |
4604 | |
4605 | mutex_lock(&ctlr->bus_lock_mutex); |
4606 | |
4607 | spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); |
4608 | ctlr->bus_lock_flag = 1; |
4609 | spin_unlock_irqrestore(lock: &ctlr->bus_lock_spinlock, flags); |
4610 | |
4611 | /* Mutex remains locked until spi_bus_unlock() is called */ |
4612 | |
4613 | return 0; |
4614 | } |
4615 | EXPORT_SYMBOL_GPL(spi_bus_lock); |
4616 | |
4617 | /** |
4618 | * spi_bus_unlock - release the lock for exclusive SPI bus usage |
4619 | * @ctlr: SPI bus master that was locked for exclusive bus access |
4620 | * Context: can sleep |
4621 | * |
4622 | * This call may only be used from a context that may sleep. The sleep |
4623 | * is non-interruptible, and has no timeout. |
4624 | * |
4625 | * This call releases an SPI bus lock previously obtained by an spi_bus_lock |
4626 | * call. |
4627 | * |
4628 | * Return: always zero. |
4629 | */ |
4630 | int spi_bus_unlock(struct spi_controller *ctlr) |
4631 | { |
4632 | ctlr->bus_lock_flag = 0; |
4633 | |
4634 | mutex_unlock(lock: &ctlr->bus_lock_mutex); |
4635 | |
4636 | return 0; |
4637 | } |
4638 | EXPORT_SYMBOL_GPL(spi_bus_unlock); |
4639 | |
4640 | /* Portable code must never pass more than 32 bytes */ |
4641 | #define SPI_BUFSIZ max(32, SMP_CACHE_BYTES) |
4642 | |
4643 | static u8 *buf; |
4644 | |
4645 | /** |
4646 | * spi_write_then_read - SPI synchronous write followed by read |
4647 | * @spi: device with which data will be exchanged |
4648 | * @txbuf: data to be written (need not be DMA-safe) |
4649 | * @n_tx: size of txbuf, in bytes |
4650 | * @rxbuf: buffer into which data will be read (need not be DMA-safe) |
4651 | * @n_rx: size of rxbuf, in bytes |
4652 | * Context: can sleep |
4653 | * |
4654 | * This performs a half duplex MicroWire style transaction with the |
4655 | * device, sending txbuf and then reading rxbuf. The return value |
4656 | * is zero for success, else a negative errno status code. |
4657 | * This call may only be used from a context that may sleep. |
4658 | * |
4659 | * Parameters to this routine are always copied using a small buffer. |
4660 | * Performance-sensitive or bulk transfer code should instead use |
4661 | * spi_{async,sync}() calls with DMA-safe buffers. |
4662 | * |
4663 | * Return: zero on success, else a negative error code. |
4664 | */ |
4665 | int spi_write_then_read(struct spi_device *spi, |
4666 | const void *txbuf, unsigned n_tx, |
4667 | void *rxbuf, unsigned n_rx) |
4668 | { |
4669 | static DEFINE_MUTEX(lock); |
4670 | |
4671 | int status; |
4672 | struct spi_message message; |
4673 | struct spi_transfer x[2]; |
4674 | u8 *local_buf; |
4675 | |
4676 | /* |
4677 | * Use preallocated DMA-safe buffer if we can. We can't avoid |
4678 | * copying here, (as a pure convenience thing), but we can |
4679 | * keep heap costs out of the hot path unless someone else is |
4680 | * using the pre-allocated buffer or the transfer is too large. |
4681 | */ |
4682 | if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(lock: &lock)) { |
4683 | local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx), |
4684 | GFP_KERNEL | GFP_DMA); |
4685 | if (!local_buf) |
4686 | return -ENOMEM; |
4687 | } else { |
4688 | local_buf = buf; |
4689 | } |
4690 | |
4691 | spi_message_init(m: &message); |
4692 | memset(x, 0, sizeof(x)); |
4693 | if (n_tx) { |
4694 | x[0].len = n_tx; |
4695 | spi_message_add_tail(t: &x[0], m: &message); |
4696 | } |
4697 | if (n_rx) { |
4698 | x[1].len = n_rx; |
4699 | spi_message_add_tail(t: &x[1], m: &message); |
4700 | } |
4701 | |
4702 | memcpy(local_buf, txbuf, n_tx); |
4703 | x[0].tx_buf = local_buf; |
4704 | x[1].rx_buf = local_buf + n_tx; |
4705 | |
4706 | /* Do the I/O */ |
4707 | status = spi_sync(spi, &message); |
4708 | if (status == 0) |
4709 | memcpy(rxbuf, x[1].rx_buf, n_rx); |
4710 | |
4711 | if (x[0].tx_buf == buf) |
4712 | mutex_unlock(lock: &lock); |
4713 | else |
4714 | kfree(objp: local_buf); |
4715 | |
4716 | return status; |
4717 | } |
4718 | EXPORT_SYMBOL_GPL(spi_write_then_read); |
4719 | |
4720 | /*-------------------------------------------------------------------------*/ |
4721 | |
4722 | #if IS_ENABLED(CONFIG_OF_DYNAMIC) |
4723 | /* Must call put_device() when done with returned spi_device device */ |
4724 | static struct spi_device *of_find_spi_device_by_node(struct device_node *node) |
4725 | { |
4726 | struct device *dev = bus_find_device_by_of_node(bus: &spi_bus_type, np: node); |
4727 | |
4728 | return dev ? to_spi_device(dev) : NULL; |
4729 | } |
4730 | |
4731 | /* The spi controllers are not using spi_bus, so we find it with another way */ |
4732 | static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node) |
4733 | { |
4734 | struct device *dev; |
4735 | |
4736 | dev = class_find_device_by_of_node(class: &spi_master_class, np: node); |
4737 | if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE)) |
4738 | dev = class_find_device_by_of_node(class: &spi_slave_class, np: node); |
4739 | if (!dev) |
4740 | return NULL; |
4741 | |
4742 | /* Reference got in class_find_device */ |
4743 | return container_of(dev, struct spi_controller, dev); |
4744 | } |
4745 | |
4746 | static int of_spi_notify(struct notifier_block *nb, unsigned long action, |
4747 | void *arg) |
4748 | { |
4749 | struct of_reconfig_data *rd = arg; |
4750 | struct spi_controller *ctlr; |
4751 | struct spi_device *spi; |
4752 | |
4753 | switch (of_reconfig_get_state_change(action, arg)) { |
4754 | case OF_RECONFIG_CHANGE_ADD: |
4755 | ctlr = of_find_spi_controller_by_node(node: rd->dn->parent); |
4756 | if (ctlr == NULL) |
4757 | return NOTIFY_OK; /* Not for us */ |
4758 | |
4759 | if (of_node_test_and_set_flag(n: rd->dn, OF_POPULATED)) { |
4760 | put_device(dev: &ctlr->dev); |
4761 | return NOTIFY_OK; |
4762 | } |
4763 | |
4764 | /* |
4765 | * Clear the flag before adding the device so that fw_devlink |
4766 | * doesn't skip adding consumers to this device. |
4767 | */ |
4768 | rd->dn->fwnode.flags &= ~FWNODE_FLAG_NOT_DEVICE; |
4769 | spi = of_register_spi_device(ctlr, nc: rd->dn); |
4770 | put_device(dev: &ctlr->dev); |
4771 | |
4772 | if (IS_ERR(ptr: spi)) { |
4773 | pr_err("%s: failed to create for '%pOF'\n" , |
4774 | __func__, rd->dn); |
4775 | of_node_clear_flag(n: rd->dn, OF_POPULATED); |
4776 | return notifier_from_errno(err: PTR_ERR(ptr: spi)); |
4777 | } |
4778 | break; |
4779 | |
4780 | case OF_RECONFIG_CHANGE_REMOVE: |
4781 | /* Already depopulated? */ |
4782 | if (!of_node_check_flag(n: rd->dn, OF_POPULATED)) |
4783 | return NOTIFY_OK; |
4784 | |
4785 | /* Find our device by node */ |
4786 | spi = of_find_spi_device_by_node(node: rd->dn); |
4787 | if (spi == NULL) |
4788 | return NOTIFY_OK; /* No? not meant for us */ |
4789 | |
4790 | /* Unregister takes one ref away */ |
4791 | spi_unregister_device(spi); |
4792 | |
4793 | /* And put the reference of the find */ |
4794 | put_device(dev: &spi->dev); |
4795 | break; |
4796 | } |
4797 | |
4798 | return NOTIFY_OK; |
4799 | } |
4800 | |
4801 | static struct notifier_block spi_of_notifier = { |
4802 | .notifier_call = of_spi_notify, |
4803 | }; |
4804 | #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ |
4805 | extern struct notifier_block spi_of_notifier; |
4806 | #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ |
4807 | |
4808 | #if IS_ENABLED(CONFIG_ACPI) |
4809 | static int spi_acpi_controller_match(struct device *dev, const void *data) |
4810 | { |
4811 | return ACPI_COMPANION(dev->parent) == data; |
4812 | } |
4813 | |
4814 | struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev) |
4815 | { |
4816 | struct device *dev; |
4817 | |
4818 | dev = class_find_device(class: &spi_master_class, NULL, data: adev, |
4819 | match: spi_acpi_controller_match); |
4820 | if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE)) |
4821 | dev = class_find_device(class: &spi_slave_class, NULL, data: adev, |
4822 | match: spi_acpi_controller_match); |
4823 | if (!dev) |
4824 | return NULL; |
4825 | |
4826 | return container_of(dev, struct spi_controller, dev); |
4827 | } |
4828 | EXPORT_SYMBOL_GPL(acpi_spi_find_controller_by_adev); |
4829 | |
4830 | static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev) |
4831 | { |
4832 | struct device *dev; |
4833 | |
4834 | dev = bus_find_device_by_acpi_dev(bus: &spi_bus_type, adev); |
4835 | return to_spi_device(dev); |
4836 | } |
4837 | |
4838 | static int acpi_spi_notify(struct notifier_block *nb, unsigned long value, |
4839 | void *arg) |
4840 | { |
4841 | struct acpi_device *adev = arg; |
4842 | struct spi_controller *ctlr; |
4843 | struct spi_device *spi; |
4844 | |
4845 | switch (value) { |
4846 | case ACPI_RECONFIG_DEVICE_ADD: |
4847 | ctlr = acpi_spi_find_controller_by_adev(acpi_dev_parent(adev)); |
4848 | if (!ctlr) |
4849 | break; |
4850 | |
4851 | acpi_register_spi_device(ctlr, adev); |
4852 | put_device(dev: &ctlr->dev); |
4853 | break; |
4854 | case ACPI_RECONFIG_DEVICE_REMOVE: |
4855 | if (!acpi_device_enumerated(adev)) |
4856 | break; |
4857 | |
4858 | spi = acpi_spi_find_device_by_adev(adev); |
4859 | if (!spi) |
4860 | break; |
4861 | |
4862 | spi_unregister_device(spi); |
4863 | put_device(dev: &spi->dev); |
4864 | break; |
4865 | } |
4866 | |
4867 | return NOTIFY_OK; |
4868 | } |
4869 | |
4870 | static struct notifier_block spi_acpi_notifier = { |
4871 | .notifier_call = acpi_spi_notify, |
4872 | }; |
4873 | #else |
4874 | extern struct notifier_block spi_acpi_notifier; |
4875 | #endif |
4876 | |
4877 | static int __init spi_init(void) |
4878 | { |
4879 | int status; |
4880 | |
4881 | buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL); |
4882 | if (!buf) { |
4883 | status = -ENOMEM; |
4884 | goto err0; |
4885 | } |
4886 | |
4887 | status = bus_register(bus: &spi_bus_type); |
4888 | if (status < 0) |
4889 | goto err1; |
4890 | |
4891 | status = class_register(class: &spi_master_class); |
4892 | if (status < 0) |
4893 | goto err2; |
4894 | |
4895 | if (IS_ENABLED(CONFIG_SPI_SLAVE)) { |
4896 | status = class_register(class: &spi_slave_class); |
4897 | if (status < 0) |
4898 | goto err3; |
4899 | } |
4900 | |
4901 | if (IS_ENABLED(CONFIG_OF_DYNAMIC)) |
4902 | WARN_ON(of_reconfig_notifier_register(&spi_of_notifier)); |
4903 | if (IS_ENABLED(CONFIG_ACPI)) |
4904 | WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier)); |
4905 | |
4906 | return 0; |
4907 | |
4908 | err3: |
4909 | class_unregister(class: &spi_master_class); |
4910 | err2: |
4911 | bus_unregister(bus: &spi_bus_type); |
4912 | err1: |
4913 | kfree(objp: buf); |
4914 | buf = NULL; |
4915 | err0: |
4916 | return status; |
4917 | } |
4918 | |
4919 | /* |
4920 | * A board_info is normally registered in arch_initcall(), |
4921 | * but even essential drivers wait till later. |
4922 | * |
4923 | * REVISIT only boardinfo really needs static linking. The rest (device and |
4924 | * driver registration) _could_ be dynamically linked (modular) ... Costs |
4925 | * include needing to have boardinfo data structures be much more public. |
4926 | */ |
4927 | postcore_initcall(spi_init); |
4928 | |