1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * NVEC: NVIDIA compliant embedded controller interface |
4 | * |
5 | * Copyright (C) 2011 The AC100 Kernel Team <ac100@lists.lauchpad.net> |
6 | * |
7 | * Authors: Pierre-Hugues Husson <phhusson@free.fr> |
8 | * Ilya Petrov <ilya.muromec@gmail.com> |
9 | * Marc Dietrich <marvin24@gmx.de> |
10 | * Julian Andres Klode <jak@jak-linux.org> |
11 | */ |
12 | |
13 | #include <linux/kernel.h> |
14 | #include <linux/module.h> |
15 | #include <linux/atomic.h> |
16 | #include <linux/clk.h> |
17 | #include <linux/completion.h> |
18 | #include <linux/delay.h> |
19 | #include <linux/err.h> |
20 | #include <linux/gpio/consumer.h> |
21 | #include <linux/interrupt.h> |
22 | #include <linux/io.h> |
23 | #include <linux/irq.h> |
24 | #include <linux/of.h> |
25 | #include <linux/list.h> |
26 | #include <linux/mfd/core.h> |
27 | #include <linux/mutex.h> |
28 | #include <linux/notifier.h> |
29 | #include <linux/slab.h> |
30 | #include <linux/spinlock.h> |
31 | #include <linux/workqueue.h> |
32 | |
33 | #include "nvec.h" |
34 | |
35 | #define I2C_CNFG 0x00 |
36 | #define I2C_CNFG_PACKET_MODE_EN BIT(10) |
37 | #define I2C_CNFG_NEW_MASTER_SFM BIT(11) |
38 | #define I2C_CNFG_DEBOUNCE_CNT_SHIFT 12 |
39 | |
40 | #define I2C_SL_CNFG 0x20 |
41 | #define I2C_SL_NEWSL BIT(2) |
42 | #define I2C_SL_NACK BIT(1) |
43 | #define I2C_SL_RESP BIT(0) |
44 | #define I2C_SL_IRQ BIT(3) |
45 | #define END_TRANS BIT(4) |
46 | #define RCVD BIT(2) |
47 | #define RNW BIT(1) |
48 | |
49 | #define I2C_SL_RCVD 0x24 |
50 | #define I2C_SL_STATUS 0x28 |
51 | #define I2C_SL_ADDR1 0x2c |
52 | #define I2C_SL_ADDR2 0x30 |
53 | #define I2C_SL_DELAY_COUNT 0x3c |
54 | |
55 | /** |
56 | * enum nvec_msg_category - Message categories for nvec_msg_alloc() |
57 | * @NVEC_MSG_RX: The message is an incoming message (from EC) |
58 | * @NVEC_MSG_TX: The message is an outgoing message (to EC) |
59 | */ |
60 | enum nvec_msg_category { |
61 | NVEC_MSG_RX, |
62 | NVEC_MSG_TX, |
63 | }; |
64 | |
65 | enum nvec_sleep_subcmds { |
66 | GLOBAL_EVENTS, |
67 | AP_PWR_DOWN, |
68 | AP_SUSPEND, |
69 | }; |
70 | |
71 | #define CNF_EVENT_REPORTING 0x01 |
72 | #define GET_FIRMWARE_VERSION 0x15 |
73 | #define LID_SWITCH BIT(1) |
74 | #define PWR_BUTTON BIT(15) |
75 | |
76 | static struct nvec_chip *nvec_power_handle; |
77 | |
78 | static const struct mfd_cell nvec_devices[] = { |
79 | { |
80 | .name = "nvec-kbd" , |
81 | }, |
82 | { |
83 | .name = "nvec-mouse" , |
84 | }, |
85 | { |
86 | .name = "nvec-power" , |
87 | .id = 0, |
88 | }, |
89 | { |
90 | .name = "nvec-power" , |
91 | .id = 1, |
92 | }, |
93 | { |
94 | .name = "nvec-paz00" , |
95 | }, |
96 | }; |
97 | |
98 | /** |
99 | * nvec_register_notifier - Register a notifier with nvec |
100 | * @nvec: A &struct nvec_chip |
101 | * @nb: The notifier block to register |
102 | * @events: Unused |
103 | * |
104 | * Registers a notifier with @nvec. The notifier will be added to an atomic |
105 | * notifier chain that is called for all received messages except those that |
106 | * correspond to a request initiated by nvec_write_sync(). |
107 | */ |
108 | int nvec_register_notifier(struct nvec_chip *nvec, struct notifier_block *nb, |
109 | unsigned int events) |
110 | { |
111 | return atomic_notifier_chain_register(nh: &nvec->notifier_list, nb); |
112 | } |
113 | EXPORT_SYMBOL_GPL(nvec_register_notifier); |
114 | |
115 | /** |
116 | * nvec_unregister_notifier - Unregister a notifier with nvec |
117 | * @nvec: A &struct nvec_chip |
118 | * @nb: The notifier block to unregister |
119 | * |
120 | * Unregisters a notifier with @nvec. The notifier will be removed from the |
121 | * atomic notifier chain. |
122 | */ |
123 | int nvec_unregister_notifier(struct nvec_chip *nvec, struct notifier_block *nb) |
124 | { |
125 | return atomic_notifier_chain_unregister(nh: &nvec->notifier_list, nb); |
126 | } |
127 | EXPORT_SYMBOL_GPL(nvec_unregister_notifier); |
128 | |
129 | /* |
130 | * nvec_status_notifier - The final notifier |
131 | * |
132 | * Prints a message about control events not handled in the notifier |
133 | * chain. |
134 | */ |
135 | static int nvec_status_notifier(struct notifier_block *nb, |
136 | unsigned long event_type, void *data) |
137 | { |
138 | struct nvec_chip *nvec = container_of(nb, struct nvec_chip, |
139 | nvec_status_notifier); |
140 | unsigned char *msg = data; |
141 | |
142 | if (event_type != NVEC_CNTL) |
143 | return NOTIFY_DONE; |
144 | |
145 | dev_warn(nvec->dev, "unhandled msg type %ld\n" , event_type); |
146 | print_hex_dump(KERN_WARNING, prefix_str: "payload: " , prefix_type: DUMP_PREFIX_NONE, rowsize: 16, groupsize: 1, |
147 | buf: msg, len: msg[1] + 2, ascii: true); |
148 | |
149 | return NOTIFY_OK; |
150 | } |
151 | |
152 | /** |
153 | * nvec_msg_alloc: |
154 | * @nvec: A &struct nvec_chip |
155 | * @category: Pool category, see &enum nvec_msg_category |
156 | * |
157 | * Allocate a single &struct nvec_msg object from the message pool of |
158 | * @nvec. The result shall be passed to nvec_msg_free() if no longer |
159 | * used. |
160 | * |
161 | * Outgoing messages are placed in the upper 75% of the pool, keeping the |
162 | * lower 25% available for RX buffers only. The reason is to prevent a |
163 | * situation where all buffers are full and a message is thus endlessly |
164 | * retried because the response could never be processed. |
165 | */ |
166 | static struct nvec_msg *nvec_msg_alloc(struct nvec_chip *nvec, |
167 | enum nvec_msg_category category) |
168 | { |
169 | int i = (category == NVEC_MSG_TX) ? (NVEC_POOL_SIZE / 4) : 0; |
170 | |
171 | for (; i < NVEC_POOL_SIZE; i++) { |
172 | if (atomic_xchg(v: &nvec->msg_pool[i].used, new: 1) == 0) { |
173 | dev_vdbg(nvec->dev, "INFO: Allocate %i\n" , i); |
174 | return &nvec->msg_pool[i]; |
175 | } |
176 | } |
177 | |
178 | dev_err(nvec->dev, "could not allocate %s buffer\n" , |
179 | (category == NVEC_MSG_TX) ? "TX" : "RX" ); |
180 | |
181 | return NULL; |
182 | } |
183 | |
184 | /** |
185 | * nvec_msg_free: |
186 | * @nvec: A &struct nvec_chip |
187 | * @msg: A message (must be allocated by nvec_msg_alloc() and belong to @nvec) |
188 | * |
189 | * Free the given message |
190 | */ |
191 | void nvec_msg_free(struct nvec_chip *nvec, struct nvec_msg *msg) |
192 | { |
193 | if (msg != &nvec->tx_scratch) |
194 | dev_vdbg(nvec->dev, "INFO: Free %ti\n" , msg - nvec->msg_pool); |
195 | atomic_set(v: &msg->used, i: 0); |
196 | } |
197 | EXPORT_SYMBOL_GPL(nvec_msg_free); |
198 | |
199 | /** |
200 | * nvec_msg_is_event - Return %true if @msg is an event |
201 | * @msg: A message |
202 | */ |
203 | static bool nvec_msg_is_event(struct nvec_msg *msg) |
204 | { |
205 | return msg->data[0] >> 7; |
206 | } |
207 | |
208 | /** |
209 | * nvec_msg_size - Get the size of a message |
210 | * @msg: The message to get the size for |
211 | * |
212 | * This only works for received messages, not for outgoing messages. |
213 | */ |
214 | static size_t nvec_msg_size(struct nvec_msg *msg) |
215 | { |
216 | bool is_event = nvec_msg_is_event(msg); |
217 | int event_length = (msg->data[0] & 0x60) >> 5; |
218 | |
219 | /* for variable size, payload size in byte 1 + count (1) + cmd (1) */ |
220 | if (!is_event || event_length == NVEC_VAR_SIZE) |
221 | return (msg->pos || msg->size) ? (msg->data[1] + 2) : 0; |
222 | else if (event_length == NVEC_2BYTES) |
223 | return 2; |
224 | else if (event_length == NVEC_3BYTES) |
225 | return 3; |
226 | return 0; |
227 | } |
228 | |
229 | /** |
230 | * nvec_gpio_set_value - Set the GPIO value |
231 | * @nvec: A &struct nvec_chip |
232 | * @value: The value to write (0 or 1) |
233 | * |
234 | * Like gpio_set_value(), but generating debugging information |
235 | */ |
236 | static void nvec_gpio_set_value(struct nvec_chip *nvec, int value) |
237 | { |
238 | dev_dbg(nvec->dev, "GPIO changed from %u to %u\n" , |
239 | gpiod_get_value(nvec->gpiod), value); |
240 | gpiod_set_value(desc: nvec->gpiod, value); |
241 | } |
242 | |
243 | /** |
244 | * nvec_write_async - Asynchronously write a message to NVEC |
245 | * @nvec: An nvec_chip instance |
246 | * @data: The message data, starting with the request type |
247 | * @size: The size of @data |
248 | * |
249 | * Queue a single message to be transferred to the embedded controller |
250 | * and return immediately. |
251 | * |
252 | * Returns: 0 on success, a negative error code on failure. If a failure |
253 | * occurred, the nvec driver may print an error. |
254 | */ |
255 | int nvec_write_async(struct nvec_chip *nvec, const unsigned char *data, |
256 | short size) |
257 | { |
258 | struct nvec_msg *msg; |
259 | unsigned long flags; |
260 | |
261 | msg = nvec_msg_alloc(nvec, category: NVEC_MSG_TX); |
262 | |
263 | if (!msg) |
264 | return -ENOMEM; |
265 | |
266 | msg->data[0] = size; |
267 | memcpy(msg->data + 1, data, size); |
268 | msg->size = size + 1; |
269 | |
270 | spin_lock_irqsave(&nvec->tx_lock, flags); |
271 | list_add_tail(new: &msg->node, head: &nvec->tx_data); |
272 | spin_unlock_irqrestore(lock: &nvec->tx_lock, flags); |
273 | |
274 | schedule_work(work: &nvec->tx_work); |
275 | |
276 | return 0; |
277 | } |
278 | EXPORT_SYMBOL(nvec_write_async); |
279 | |
280 | /** |
281 | * nvec_write_sync - Write a message to nvec and read the response |
282 | * @nvec: An &struct nvec_chip |
283 | * @data: The data to write |
284 | * @size: The size of @data |
285 | * @msg: The response message received |
286 | * |
287 | * This is similar to nvec_write_async(), but waits for the |
288 | * request to be answered before returning. This function |
289 | * uses a mutex and can thus not be called from e.g. |
290 | * interrupt handlers. |
291 | * |
292 | * Returns: 0 on success, a negative error code on failure. |
293 | * The response message is returned in @msg. Shall be freed |
294 | * with nvec_msg_free() once no longer used. |
295 | * |
296 | */ |
297 | int nvec_write_sync(struct nvec_chip *nvec, |
298 | const unsigned char *data, short size, |
299 | struct nvec_msg **msg) |
300 | { |
301 | mutex_lock(&nvec->sync_write_mutex); |
302 | |
303 | *msg = NULL; |
304 | nvec->sync_write_pending = (data[1] << 8) + data[0]; |
305 | |
306 | if (nvec_write_async(nvec, data, size) < 0) { |
307 | mutex_unlock(lock: &nvec->sync_write_mutex); |
308 | return -ENOMEM; |
309 | } |
310 | |
311 | dev_dbg(nvec->dev, "nvec_sync_write: 0x%04x\n" , |
312 | nvec->sync_write_pending); |
313 | if (!(wait_for_completion_timeout(x: &nvec->sync_write, |
314 | timeout: msecs_to_jiffies(m: 2000)))) { |
315 | dev_warn(nvec->dev, |
316 | "timeout waiting for sync write to complete\n" ); |
317 | mutex_unlock(lock: &nvec->sync_write_mutex); |
318 | return -ETIMEDOUT; |
319 | } |
320 | |
321 | dev_dbg(nvec->dev, "nvec_sync_write: pong!\n" ); |
322 | |
323 | *msg = nvec->last_sync_msg; |
324 | |
325 | mutex_unlock(lock: &nvec->sync_write_mutex); |
326 | |
327 | return 0; |
328 | } |
329 | EXPORT_SYMBOL(nvec_write_sync); |
330 | |
331 | /** |
332 | * nvec_toggle_global_events - enables or disables global event reporting |
333 | * @nvec: nvec handle |
334 | * @state: true for enable, false for disable |
335 | * |
336 | * This switches on/off global event reports by the embedded controller. |
337 | */ |
338 | static void nvec_toggle_global_events(struct nvec_chip *nvec, bool state) |
339 | { |
340 | unsigned char global_events[] = { NVEC_SLEEP, GLOBAL_EVENTS, state }; |
341 | |
342 | nvec_write_async(nvec, global_events, 3); |
343 | } |
344 | |
345 | /** |
346 | * nvec_event_mask - fill the command string with event bitfield |
347 | * @ev: points to event command string |
348 | * @mask: bit to insert into the event mask |
349 | * |
350 | * Configure event command expects a 32 bit bitfield which describes |
351 | * which events to enable. The bitfield has the following structure |
352 | * (from highest byte to lowest): |
353 | * system state bits 7-0 |
354 | * system state bits 15-8 |
355 | * oem system state bits 7-0 |
356 | * oem system state bits 15-8 |
357 | */ |
358 | static void nvec_event_mask(char *ev, u32 mask) |
359 | { |
360 | ev[3] = mask >> 16 & 0xff; |
361 | ev[4] = mask >> 24 & 0xff; |
362 | ev[5] = mask >> 0 & 0xff; |
363 | ev[6] = mask >> 8 & 0xff; |
364 | } |
365 | |
366 | /** |
367 | * nvec_request_master - Process outgoing messages |
368 | * @work: A &struct work_struct (the tx_worker member of &struct nvec_chip) |
369 | * |
370 | * Processes all outgoing requests by sending the request and awaiting the |
371 | * response, then continuing with the next request. Once a request has a |
372 | * matching response, it will be freed and removed from the list. |
373 | */ |
374 | static void nvec_request_master(struct work_struct *work) |
375 | { |
376 | struct nvec_chip *nvec = container_of(work, struct nvec_chip, tx_work); |
377 | unsigned long flags; |
378 | long err; |
379 | struct nvec_msg *msg; |
380 | |
381 | spin_lock_irqsave(&nvec->tx_lock, flags); |
382 | while (!list_empty(head: &nvec->tx_data)) { |
383 | msg = list_first_entry(&nvec->tx_data, struct nvec_msg, node); |
384 | spin_unlock_irqrestore(lock: &nvec->tx_lock, flags); |
385 | nvec_gpio_set_value(nvec, value: 0); |
386 | err = wait_for_completion_interruptible_timeout(x: &nvec->ec_transfer, |
387 | timeout: msecs_to_jiffies(m: 5000)); |
388 | |
389 | if (err == 0) { |
390 | dev_warn(nvec->dev, "timeout waiting for ec transfer\n" ); |
391 | nvec_gpio_set_value(nvec, value: 1); |
392 | msg->pos = 0; |
393 | } |
394 | |
395 | spin_lock_irqsave(&nvec->tx_lock, flags); |
396 | |
397 | if (err > 0) { |
398 | list_del_init(entry: &msg->node); |
399 | nvec_msg_free(nvec, msg); |
400 | } |
401 | } |
402 | spin_unlock_irqrestore(lock: &nvec->tx_lock, flags); |
403 | } |
404 | |
405 | /** |
406 | * parse_msg - Print some information and call the notifiers on an RX message |
407 | * @nvec: A &struct nvec_chip |
408 | * @msg: A message received by @nvec |
409 | * |
410 | * Paarse some pieces of the message and then call the chain of notifiers |
411 | * registered via nvec_register_notifier. |
412 | */ |
413 | static int parse_msg(struct nvec_chip *nvec, struct nvec_msg *msg) |
414 | { |
415 | if ((msg->data[0] & 1 << 7) == 0 && msg->data[3]) { |
416 | dev_err(nvec->dev, "ec responded %*ph\n" , 4, msg->data); |
417 | return -EINVAL; |
418 | } |
419 | |
420 | if ((msg->data[0] >> 7) == 1 && (msg->data[0] & 0x0f) == 5) |
421 | print_hex_dump(KERN_WARNING, prefix_str: "ec system event " , |
422 | prefix_type: DUMP_PREFIX_NONE, rowsize: 16, groupsize: 1, buf: msg->data, |
423 | len: msg->data[1] + 2, ascii: true); |
424 | |
425 | atomic_notifier_call_chain(nh: &nvec->notifier_list, val: msg->data[0] & 0x8f, |
426 | v: msg->data); |
427 | |
428 | return 0; |
429 | } |
430 | |
431 | /** |
432 | * nvec_dispatch - Process messages received from the EC |
433 | * @work: A &struct work_struct (the tx_worker member of &struct nvec_chip) |
434 | * |
435 | * Process messages previously received from the EC and put into the RX |
436 | * queue of the &struct nvec_chip instance associated with @work. |
437 | */ |
438 | static void nvec_dispatch(struct work_struct *work) |
439 | { |
440 | struct nvec_chip *nvec = container_of(work, struct nvec_chip, rx_work); |
441 | unsigned long flags; |
442 | struct nvec_msg *msg; |
443 | |
444 | spin_lock_irqsave(&nvec->rx_lock, flags); |
445 | while (!list_empty(head: &nvec->rx_data)) { |
446 | msg = list_first_entry(&nvec->rx_data, struct nvec_msg, node); |
447 | list_del_init(entry: &msg->node); |
448 | spin_unlock_irqrestore(lock: &nvec->rx_lock, flags); |
449 | |
450 | if (nvec->sync_write_pending == |
451 | (msg->data[2] << 8) + msg->data[0]) { |
452 | dev_dbg(nvec->dev, "sync write completed!\n" ); |
453 | nvec->sync_write_pending = 0; |
454 | nvec->last_sync_msg = msg; |
455 | complete(&nvec->sync_write); |
456 | } else { |
457 | parse_msg(nvec, msg); |
458 | nvec_msg_free(nvec, msg); |
459 | } |
460 | spin_lock_irqsave(&nvec->rx_lock, flags); |
461 | } |
462 | spin_unlock_irqrestore(lock: &nvec->rx_lock, flags); |
463 | } |
464 | |
465 | /** |
466 | * nvec_tx_completed - Complete the current transfer |
467 | * @nvec: A &struct nvec_chip |
468 | * |
469 | * This is called when we have received an END_TRANS on a TX transfer. |
470 | */ |
471 | static void nvec_tx_completed(struct nvec_chip *nvec) |
472 | { |
473 | /* We got an END_TRANS, let's skip this, maybe there's an event */ |
474 | if (nvec->tx->pos != nvec->tx->size) { |
475 | dev_err(nvec->dev, "premature END_TRANS, resending\n" ); |
476 | nvec->tx->pos = 0; |
477 | nvec_gpio_set_value(nvec, value: 0); |
478 | } else { |
479 | nvec->state = 0; |
480 | } |
481 | } |
482 | |
483 | /** |
484 | * nvec_rx_completed - Complete the current transfer |
485 | * @nvec: A &struct nvec_chip |
486 | * |
487 | * This is called when we have received an END_TRANS on a RX transfer. |
488 | */ |
489 | static void nvec_rx_completed(struct nvec_chip *nvec) |
490 | { |
491 | if (nvec->rx->pos != nvec_msg_size(msg: nvec->rx)) { |
492 | dev_err(nvec->dev, "RX incomplete: Expected %u bytes, got %u\n" , |
493 | (uint)nvec_msg_size(nvec->rx), |
494 | (uint)nvec->rx->pos); |
495 | |
496 | nvec_msg_free(nvec, nvec->rx); |
497 | nvec->state = 0; |
498 | |
499 | /* Battery quirk - Often incomplete, and likes to crash */ |
500 | if (nvec->rx->data[0] == NVEC_BAT) |
501 | complete(&nvec->ec_transfer); |
502 | |
503 | return; |
504 | } |
505 | |
506 | spin_lock(lock: &nvec->rx_lock); |
507 | |
508 | /* |
509 | * Add the received data to the work list and move the ring buffer |
510 | * pointer to the next entry. |
511 | */ |
512 | list_add_tail(new: &nvec->rx->node, head: &nvec->rx_data); |
513 | |
514 | spin_unlock(lock: &nvec->rx_lock); |
515 | |
516 | nvec->state = 0; |
517 | |
518 | if (!nvec_msg_is_event(msg: nvec->rx)) |
519 | complete(&nvec->ec_transfer); |
520 | |
521 | schedule_work(work: &nvec->rx_work); |
522 | } |
523 | |
524 | /** |
525 | * nvec_invalid_flags - Send an error message about invalid flags and jump |
526 | * @nvec: The nvec device |
527 | * @status: The status flags |
528 | * @reset: Whether we shall jump to state 0. |
529 | */ |
530 | static void nvec_invalid_flags(struct nvec_chip *nvec, unsigned int status, |
531 | bool reset) |
532 | { |
533 | dev_err(nvec->dev, "unexpected status flags 0x%02x during state %i\n" , |
534 | status, nvec->state); |
535 | if (reset) |
536 | nvec->state = 0; |
537 | } |
538 | |
539 | /** |
540 | * nvec_tx_set - Set the message to transfer (nvec->tx) |
541 | * @nvec: A &struct nvec_chip |
542 | * |
543 | * Gets the first entry from the tx_data list of @nvec and sets the |
544 | * tx member to it. If the tx_data list is empty, this uses the |
545 | * tx_scratch message to send a no operation message. |
546 | */ |
547 | static void nvec_tx_set(struct nvec_chip *nvec) |
548 | { |
549 | spin_lock(lock: &nvec->tx_lock); |
550 | if (list_empty(head: &nvec->tx_data)) { |
551 | dev_err(nvec->dev, "empty tx - sending no-op\n" ); |
552 | memcpy(nvec->tx_scratch.data, "\x02\x07\x02" , 3); |
553 | nvec->tx_scratch.size = 3; |
554 | nvec->tx_scratch.pos = 0; |
555 | nvec->tx = &nvec->tx_scratch; |
556 | list_add_tail(new: &nvec->tx->node, head: &nvec->tx_data); |
557 | } else { |
558 | nvec->tx = list_first_entry(&nvec->tx_data, struct nvec_msg, |
559 | node); |
560 | nvec->tx->pos = 0; |
561 | } |
562 | spin_unlock(lock: &nvec->tx_lock); |
563 | |
564 | dev_dbg(nvec->dev, "Sending message of length %u, command 0x%x\n" , |
565 | (uint)nvec->tx->size, nvec->tx->data[1]); |
566 | } |
567 | |
568 | /** |
569 | * nvec_interrupt - Interrupt handler |
570 | * @irq: The IRQ |
571 | * @dev: The nvec device |
572 | * |
573 | * Interrupt handler that fills our RX buffers and empties our TX |
574 | * buffers. This uses a finite state machine with ridiculous amounts |
575 | * of error checking, in order to be fairly reliable. |
576 | */ |
577 | static irqreturn_t nvec_interrupt(int irq, void *dev) |
578 | { |
579 | unsigned long status; |
580 | unsigned int received = 0; |
581 | unsigned char to_send = 0xff; |
582 | const unsigned long irq_mask = I2C_SL_IRQ | END_TRANS | RCVD | RNW; |
583 | struct nvec_chip *nvec = dev; |
584 | unsigned int state = nvec->state; |
585 | |
586 | status = readl(addr: nvec->base + I2C_SL_STATUS); |
587 | |
588 | /* Filter out some errors */ |
589 | if ((status & irq_mask) == 0 && (status & ~irq_mask) != 0) { |
590 | dev_err(nvec->dev, "unexpected irq mask %lx\n" , status); |
591 | return IRQ_HANDLED; |
592 | } |
593 | if ((status & I2C_SL_IRQ) == 0) { |
594 | dev_err(nvec->dev, "Spurious IRQ\n" ); |
595 | return IRQ_HANDLED; |
596 | } |
597 | |
598 | /* The EC did not request a read, so it send us something, read it */ |
599 | if ((status & RNW) == 0) { |
600 | received = readl(addr: nvec->base + I2C_SL_RCVD); |
601 | if (status & RCVD) |
602 | writel(val: 0, addr: nvec->base + I2C_SL_RCVD); |
603 | } |
604 | |
605 | if (status == (I2C_SL_IRQ | RCVD)) |
606 | nvec->state = 0; |
607 | |
608 | switch (nvec->state) { |
609 | case 0: /* Verify that its a transfer start, the rest later */ |
610 | if (status != (I2C_SL_IRQ | RCVD)) |
611 | nvec_invalid_flags(nvec, status, reset: false); |
612 | break; |
613 | case 1: /* command byte */ |
614 | if (status != I2C_SL_IRQ) { |
615 | nvec_invalid_flags(nvec, status, reset: true); |
616 | } else { |
617 | nvec->rx = nvec_msg_alloc(nvec, category: NVEC_MSG_RX); |
618 | /* Should not happen in a normal world */ |
619 | if (unlikely(!nvec->rx)) { |
620 | nvec->state = 0; |
621 | break; |
622 | } |
623 | nvec->rx->data[0] = received; |
624 | nvec->rx->pos = 1; |
625 | nvec->state = 2; |
626 | } |
627 | break; |
628 | case 2: /* first byte after command */ |
629 | if (status == (I2C_SL_IRQ | RNW | RCVD)) { |
630 | udelay(33); |
631 | if (nvec->rx->data[0] != 0x01) { |
632 | dev_err(nvec->dev, |
633 | "Read without prior read command\n" ); |
634 | nvec->state = 0; |
635 | break; |
636 | } |
637 | nvec_msg_free(nvec, nvec->rx); |
638 | nvec->state = 3; |
639 | nvec_tx_set(nvec); |
640 | to_send = nvec->tx->data[0]; |
641 | nvec->tx->pos = 1; |
642 | } else if (status == (I2C_SL_IRQ)) { |
643 | nvec->rx->data[1] = received; |
644 | nvec->rx->pos = 2; |
645 | nvec->state = 4; |
646 | } else { |
647 | nvec_invalid_flags(nvec, status, reset: true); |
648 | } |
649 | break; |
650 | case 3: /* EC does a block read, we transmit data */ |
651 | if (status & END_TRANS) { |
652 | nvec_tx_completed(nvec); |
653 | } else if ((status & RNW) == 0 || (status & RCVD)) { |
654 | nvec_invalid_flags(nvec, status, reset: true); |
655 | } else if (nvec->tx && nvec->tx->pos < nvec->tx->size) { |
656 | to_send = nvec->tx->data[nvec->tx->pos++]; |
657 | } else { |
658 | dev_err(nvec->dev, |
659 | "tx buffer underflow on %p (%u > %u)\n" , |
660 | nvec->tx, |
661 | (uint)(nvec->tx ? nvec->tx->pos : 0), |
662 | (uint)(nvec->tx ? nvec->tx->size : 0)); |
663 | nvec->state = 0; |
664 | } |
665 | break; |
666 | case 4: /* EC does some write, we read the data */ |
667 | if ((status & (END_TRANS | RNW)) == END_TRANS) |
668 | nvec_rx_completed(nvec); |
669 | else if (status & (RNW | RCVD)) |
670 | nvec_invalid_flags(nvec, status, reset: true); |
671 | else if (nvec->rx && nvec->rx->pos < NVEC_MSG_SIZE) |
672 | nvec->rx->data[nvec->rx->pos++] = received; |
673 | else |
674 | dev_err(nvec->dev, |
675 | "RX buffer overflow on %p: Trying to write byte %u of %u\n" , |
676 | nvec->rx, nvec->rx ? nvec->rx->pos : 0, |
677 | NVEC_MSG_SIZE); |
678 | break; |
679 | default: |
680 | nvec->state = 0; |
681 | } |
682 | |
683 | /* If we are told that a new transfer starts, verify it */ |
684 | if ((status & (RCVD | RNW)) == RCVD) { |
685 | if (received != nvec->i2c_addr) |
686 | dev_err(nvec->dev, |
687 | "received address 0x%02x, expected 0x%02x\n" , |
688 | received, nvec->i2c_addr); |
689 | nvec->state = 1; |
690 | } |
691 | |
692 | /* Send data if requested, but not on end of transmission */ |
693 | if ((status & (RNW | END_TRANS)) == RNW) |
694 | writel(val: to_send, addr: nvec->base + I2C_SL_RCVD); |
695 | |
696 | /* If we have send the first byte */ |
697 | if (status == (I2C_SL_IRQ | RNW | RCVD)) |
698 | nvec_gpio_set_value(nvec, value: 1); |
699 | |
700 | dev_dbg(nvec->dev, |
701 | "Handled: %s 0x%02x, %s 0x%02x in state %u [%s%s%s]\n" , |
702 | (status & RNW) == 0 ? "received" : "R=" , |
703 | received, |
704 | (status & (RNW | END_TRANS)) ? "sent" : "S=" , |
705 | to_send, |
706 | state, |
707 | status & END_TRANS ? " END_TRANS" : "" , |
708 | status & RCVD ? " RCVD" : "" , |
709 | status & RNW ? " RNW" : "" ); |
710 | |
711 | /* |
712 | * TODO: replace the udelay with a read back after each writel above |
713 | * in order to work around a hardware issue, see i2c-tegra.c |
714 | * |
715 | * Unfortunately, this change causes an intialisation issue with the |
716 | * touchpad, which needs to be fixed first. |
717 | */ |
718 | udelay(100); |
719 | |
720 | return IRQ_HANDLED; |
721 | } |
722 | |
723 | static void tegra_init_i2c_slave(struct nvec_chip *nvec) |
724 | { |
725 | u32 val; |
726 | |
727 | clk_prepare_enable(clk: nvec->i2c_clk); |
728 | |
729 | reset_control_assert(rstc: nvec->rst); |
730 | udelay(2); |
731 | reset_control_deassert(rstc: nvec->rst); |
732 | |
733 | val = I2C_CNFG_NEW_MASTER_SFM | I2C_CNFG_PACKET_MODE_EN | |
734 | (0x2 << I2C_CNFG_DEBOUNCE_CNT_SHIFT); |
735 | writel(val, addr: nvec->base + I2C_CNFG); |
736 | |
737 | clk_set_rate(clk: nvec->i2c_clk, rate: 8 * 80000); |
738 | |
739 | writel(I2C_SL_NEWSL, addr: nvec->base + I2C_SL_CNFG); |
740 | writel(val: 0x1E, addr: nvec->base + I2C_SL_DELAY_COUNT); |
741 | |
742 | writel(val: nvec->i2c_addr >> 1, addr: nvec->base + I2C_SL_ADDR1); |
743 | writel(val: 0, addr: nvec->base + I2C_SL_ADDR2); |
744 | |
745 | enable_irq(irq: nvec->irq); |
746 | } |
747 | |
748 | #ifdef CONFIG_PM_SLEEP |
749 | static void nvec_disable_i2c_slave(struct nvec_chip *nvec) |
750 | { |
751 | disable_irq(irq: nvec->irq); |
752 | writel(I2C_SL_NEWSL | I2C_SL_NACK, addr: nvec->base + I2C_SL_CNFG); |
753 | clk_disable_unprepare(clk: nvec->i2c_clk); |
754 | } |
755 | #endif |
756 | |
757 | static void nvec_power_off(void) |
758 | { |
759 | char ap_pwr_down[] = { NVEC_SLEEP, AP_PWR_DOWN }; |
760 | |
761 | nvec_toggle_global_events(nvec: nvec_power_handle, state: false); |
762 | nvec_write_async(nvec_power_handle, ap_pwr_down, 2); |
763 | } |
764 | |
765 | static int tegra_nvec_probe(struct platform_device *pdev) |
766 | { |
767 | int err, ret; |
768 | struct clk *i2c_clk; |
769 | struct device *dev = &pdev->dev; |
770 | struct nvec_chip *nvec; |
771 | struct nvec_msg *msg; |
772 | void __iomem *base; |
773 | char get_firmware_version[] = { NVEC_CNTL, GET_FIRMWARE_VERSION }, |
774 | unmute_speakers[] = { NVEC_OEM0, 0x10, 0x59, 0x95 }, |
775 | enable_event[7] = { NVEC_SYS, CNF_EVENT_REPORTING, true }; |
776 | |
777 | if (!dev->of_node) { |
778 | dev_err(dev, "must be instantiated using device tree\n" ); |
779 | return -ENODEV; |
780 | } |
781 | |
782 | nvec = devm_kzalloc(dev, size: sizeof(struct nvec_chip), GFP_KERNEL); |
783 | if (!nvec) |
784 | return -ENOMEM; |
785 | |
786 | platform_set_drvdata(pdev, data: nvec); |
787 | nvec->dev = dev; |
788 | |
789 | if (of_property_read_u32(np: dev->of_node, propname: "slave-addr" , out_value: &nvec->i2c_addr)) { |
790 | dev_err(dev, "no i2c address specified" ); |
791 | return -ENODEV; |
792 | } |
793 | |
794 | base = devm_platform_ioremap_resource(pdev, index: 0); |
795 | if (IS_ERR(ptr: base)) |
796 | return PTR_ERR(ptr: base); |
797 | |
798 | nvec->irq = platform_get_irq(pdev, 0); |
799 | if (nvec->irq < 0) |
800 | return -ENODEV; |
801 | |
802 | i2c_clk = devm_clk_get(dev, id: "div-clk" ); |
803 | if (IS_ERR(ptr: i2c_clk)) { |
804 | dev_err(dev, "failed to get controller clock\n" ); |
805 | return -ENODEV; |
806 | } |
807 | |
808 | nvec->rst = devm_reset_control_get_exclusive(dev, id: "i2c" ); |
809 | if (IS_ERR(ptr: nvec->rst)) { |
810 | dev_err(dev, "failed to get controller reset\n" ); |
811 | return PTR_ERR(ptr: nvec->rst); |
812 | } |
813 | |
814 | nvec->base = base; |
815 | nvec->i2c_clk = i2c_clk; |
816 | nvec->rx = &nvec->msg_pool[0]; |
817 | |
818 | ATOMIC_INIT_NOTIFIER_HEAD(&nvec->notifier_list); |
819 | |
820 | init_completion(x: &nvec->sync_write); |
821 | init_completion(x: &nvec->ec_transfer); |
822 | mutex_init(&nvec->sync_write_mutex); |
823 | spin_lock_init(&nvec->tx_lock); |
824 | spin_lock_init(&nvec->rx_lock); |
825 | INIT_LIST_HEAD(list: &nvec->rx_data); |
826 | INIT_LIST_HEAD(list: &nvec->tx_data); |
827 | INIT_WORK(&nvec->rx_work, nvec_dispatch); |
828 | INIT_WORK(&nvec->tx_work, nvec_request_master); |
829 | |
830 | nvec->gpiod = devm_gpiod_get(dev, con_id: "request" , flags: GPIOD_OUT_HIGH); |
831 | if (IS_ERR(ptr: nvec->gpiod)) { |
832 | dev_err(dev, "couldn't request gpio\n" ); |
833 | return PTR_ERR(ptr: nvec->gpiod); |
834 | } |
835 | |
836 | err = devm_request_irq(dev, irq: nvec->irq, handler: nvec_interrupt, irqflags: 0, |
837 | devname: "nvec" , dev_id: nvec); |
838 | if (err) { |
839 | dev_err(dev, "couldn't request irq\n" ); |
840 | return -ENODEV; |
841 | } |
842 | disable_irq(irq: nvec->irq); |
843 | |
844 | tegra_init_i2c_slave(nvec); |
845 | |
846 | /* enable event reporting */ |
847 | nvec_toggle_global_events(nvec, state: true); |
848 | |
849 | nvec->nvec_status_notifier.notifier_call = nvec_status_notifier; |
850 | nvec_register_notifier(nvec, &nvec->nvec_status_notifier, 0); |
851 | |
852 | nvec_power_handle = nvec; |
853 | pm_power_off = nvec_power_off; |
854 | |
855 | /* Get Firmware Version */ |
856 | err = nvec_write_sync(nvec, get_firmware_version, 2, &msg); |
857 | |
858 | if (!err) { |
859 | dev_warn(dev, |
860 | "ec firmware version %02x.%02x.%02x / %02x\n" , |
861 | msg->data[4], msg->data[5], |
862 | msg->data[6], msg->data[7]); |
863 | |
864 | nvec_msg_free(nvec, msg); |
865 | } |
866 | |
867 | ret = mfd_add_devices(parent: dev, id: 0, cells: nvec_devices, |
868 | ARRAY_SIZE(nvec_devices), NULL, irq_base: 0, NULL); |
869 | if (ret) |
870 | dev_err(dev, "error adding subdevices\n" ); |
871 | |
872 | /* unmute speakers? */ |
873 | nvec_write_async(nvec, unmute_speakers, 4); |
874 | |
875 | /* enable lid switch event */ |
876 | nvec_event_mask(ev: enable_event, LID_SWITCH); |
877 | nvec_write_async(nvec, enable_event, 7); |
878 | |
879 | /* enable power button event */ |
880 | nvec_event_mask(ev: enable_event, PWR_BUTTON); |
881 | nvec_write_async(nvec, enable_event, 7); |
882 | |
883 | return 0; |
884 | } |
885 | |
886 | static void tegra_nvec_remove(struct platform_device *pdev) |
887 | { |
888 | struct nvec_chip *nvec = platform_get_drvdata(pdev); |
889 | |
890 | nvec_toggle_global_events(nvec, state: false); |
891 | mfd_remove_devices(parent: nvec->dev); |
892 | nvec_unregister_notifier(nvec, &nvec->nvec_status_notifier); |
893 | cancel_work_sync(work: &nvec->rx_work); |
894 | cancel_work_sync(work: &nvec->tx_work); |
895 | /* FIXME: needs check whether nvec is responsible for power off */ |
896 | pm_power_off = NULL; |
897 | } |
898 | |
899 | #ifdef CONFIG_PM_SLEEP |
900 | static int nvec_suspend(struct device *dev) |
901 | { |
902 | int err; |
903 | struct nvec_chip *nvec = dev_get_drvdata(dev); |
904 | struct nvec_msg *msg; |
905 | char ap_suspend[] = { NVEC_SLEEP, AP_SUSPEND }; |
906 | |
907 | dev_dbg(nvec->dev, "suspending\n" ); |
908 | |
909 | /* keep these sync or you'll break suspend */ |
910 | nvec_toggle_global_events(nvec, state: false); |
911 | |
912 | err = nvec_write_sync(nvec, ap_suspend, sizeof(ap_suspend), &msg); |
913 | if (!err) |
914 | nvec_msg_free(nvec, msg); |
915 | |
916 | nvec_disable_i2c_slave(nvec); |
917 | |
918 | return 0; |
919 | } |
920 | |
921 | static int nvec_resume(struct device *dev) |
922 | { |
923 | struct nvec_chip *nvec = dev_get_drvdata(dev); |
924 | |
925 | dev_dbg(nvec->dev, "resuming\n" ); |
926 | tegra_init_i2c_slave(nvec); |
927 | nvec_toggle_global_events(nvec, state: true); |
928 | |
929 | return 0; |
930 | } |
931 | #endif |
932 | |
933 | static SIMPLE_DEV_PM_OPS(nvec_pm_ops, nvec_suspend, nvec_resume); |
934 | |
935 | /* Match table for of_platform binding */ |
936 | static const struct of_device_id nvidia_nvec_of_match[] = { |
937 | { .compatible = "nvidia,nvec" , }, |
938 | {}, |
939 | }; |
940 | MODULE_DEVICE_TABLE(of, nvidia_nvec_of_match); |
941 | |
942 | static struct platform_driver nvec_device_driver = { |
943 | .probe = tegra_nvec_probe, |
944 | .remove_new = tegra_nvec_remove, |
945 | .driver = { |
946 | .name = "nvec" , |
947 | .pm = &nvec_pm_ops, |
948 | .of_match_table = nvidia_nvec_of_match, |
949 | } |
950 | }; |
951 | |
952 | module_platform_driver(nvec_device_driver); |
953 | |
954 | MODULE_ALIAS("platform:nvec" ); |
955 | MODULE_DESCRIPTION("NVIDIA compliant embedded controller interface" ); |
956 | MODULE_AUTHOR("Marc Dietrich <marvin24@gmx.de>" ); |
957 | MODULE_LICENSE("GPL" ); |
958 | |