1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * UCSI driver for Cypress CCGx Type-C controller |
4 | * |
5 | * Copyright (C) 2017-2018 NVIDIA Corporation. All rights reserved. |
6 | * Author: Ajay Gupta <ajayg@nvidia.com> |
7 | * |
8 | * Some code borrowed from drivers/usb/typec/ucsi/ucsi_acpi.c |
9 | */ |
10 | #include <linux/acpi.h> |
11 | #include <linux/delay.h> |
12 | #include <linux/firmware.h> |
13 | #include <linux/i2c.h> |
14 | #include <linux/module.h> |
15 | #include <linux/pci.h> |
16 | #include <linux/platform_device.h> |
17 | #include <linux/pm.h> |
18 | #include <linux/pm_runtime.h> |
19 | #include <linux/usb/typec_dp.h> |
20 | |
21 | #include <asm/unaligned.h> |
22 | #include "ucsi.h" |
23 | |
24 | enum enum_fw_mode { |
25 | BOOT, /* bootloader */ |
26 | FW1, /* FW partition-1 (contains secondary fw) */ |
27 | FW2, /* FW partition-2 (contains primary fw) */ |
28 | FW_INVALID, |
29 | }; |
30 | |
31 | #define CCGX_RAB_DEVICE_MODE 0x0000 |
32 | #define CCGX_RAB_INTR_REG 0x0006 |
33 | #define DEV_INT BIT(0) |
34 | #define PORT0_INT BIT(1) |
35 | #define PORT1_INT BIT(2) |
36 | #define UCSI_READ_INT BIT(7) |
37 | #define CCGX_RAB_JUMP_TO_BOOT 0x0007 |
38 | #define TO_BOOT 'J' |
39 | #define TO_ALT_FW 'A' |
40 | #define CCGX_RAB_RESET_REQ 0x0008 |
41 | #define RESET_SIG 'R' |
42 | #define CMD_RESET_I2C 0x0 |
43 | #define CMD_RESET_DEV 0x1 |
44 | #define CCGX_RAB_ENTER_FLASHING 0x000A |
45 | #define FLASH_ENTER_SIG 'P' |
46 | #define CCGX_RAB_VALIDATE_FW 0x000B |
47 | #define CCGX_RAB_FLASH_ROW_RW 0x000C |
48 | #define FLASH_SIG 'F' |
49 | #define FLASH_RD_CMD 0x0 |
50 | #define FLASH_WR_CMD 0x1 |
51 | #define FLASH_FWCT1_WR_CMD 0x2 |
52 | #define FLASH_FWCT2_WR_CMD 0x3 |
53 | #define FLASH_FWCT_SIG_WR_CMD 0x4 |
54 | #define CCGX_RAB_READ_ALL_VER 0x0010 |
55 | #define CCGX_RAB_READ_FW2_VER 0x0020 |
56 | #define CCGX_RAB_UCSI_CONTROL 0x0039 |
57 | #define CCGX_RAB_UCSI_CONTROL_START BIT(0) |
58 | #define CCGX_RAB_UCSI_CONTROL_STOP BIT(1) |
59 | #define CCGX_RAB_UCSI_DATA_BLOCK(offset) (0xf000 | ((offset) & 0xff)) |
60 | #define REG_FLASH_RW_MEM 0x0200 |
61 | #define DEV_REG_IDX CCGX_RAB_DEVICE_MODE |
62 | #define CCGX_RAB_PDPORT_ENABLE 0x002C |
63 | #define PDPORT_1 BIT(0) |
64 | #define PDPORT_2 BIT(1) |
65 | #define CCGX_RAB_RESPONSE 0x007E |
66 | #define ASYNC_EVENT BIT(7) |
67 | |
68 | /* CCGx events & async msg codes */ |
69 | #define RESET_COMPLETE 0x80 |
70 | #define EVENT_INDEX RESET_COMPLETE |
71 | #define PORT_CONNECT_DET 0x84 |
72 | #define PORT_DISCONNECT_DET 0x85 |
73 | #define ROLE_SWAP_COMPELETE 0x87 |
74 | |
75 | /* ccg firmware */ |
76 | #define CYACD_LINE_SIZE 527 |
77 | #define CCG4_ROW_SIZE 256 |
78 | #define FW1_METADATA_ROW 0x1FF |
79 | #define FW2_METADATA_ROW 0x1FE |
80 | #define FW_CFG_TABLE_SIG_SIZE 256 |
81 | |
82 | static int secondary_fw_min_ver = 41; |
83 | |
84 | enum enum_flash_mode { |
85 | SECONDARY_BL, /* update secondary using bootloader */ |
86 | PRIMARY, /* update primary using secondary */ |
87 | SECONDARY, /* update secondary using primary */ |
88 | FLASH_NOT_NEEDED, /* update not required */ |
89 | FLASH_INVALID, |
90 | }; |
91 | |
92 | static const char * const ccg_fw_names[] = { |
93 | "ccg_boot.cyacd" , |
94 | "ccg_primary.cyacd" , |
95 | "ccg_secondary.cyacd" |
96 | }; |
97 | |
98 | struct ccg_dev_info { |
99 | #define CCG_DEVINFO_FWMODE_SHIFT (0) |
100 | #define CCG_DEVINFO_FWMODE_MASK (0x3 << CCG_DEVINFO_FWMODE_SHIFT) |
101 | #define CCG_DEVINFO_PDPORTS_SHIFT (2) |
102 | #define CCG_DEVINFO_PDPORTS_MASK (0x3 << CCG_DEVINFO_PDPORTS_SHIFT) |
103 | u8 mode; |
104 | u8 bl_mode; |
105 | __le16 silicon_id; |
106 | __le16 bl_last_row; |
107 | } __packed; |
108 | |
109 | struct version_format { |
110 | __le16 build; |
111 | u8 patch; |
112 | u8 ver; |
113 | #define CCG_VERSION_PATCH(x) ((x) << 16) |
114 | #define CCG_VERSION(x) ((x) << 24) |
115 | #define CCG_VERSION_MIN_SHIFT (0) |
116 | #define CCG_VERSION_MIN_MASK (0xf << CCG_VERSION_MIN_SHIFT) |
117 | #define CCG_VERSION_MAJ_SHIFT (4) |
118 | #define CCG_VERSION_MAJ_MASK (0xf << CCG_VERSION_MAJ_SHIFT) |
119 | } __packed; |
120 | |
121 | /* |
122 | * Firmware version 3.1.10 or earlier, built for NVIDIA has known issue |
123 | * of missing interrupt when a device is connected for runtime resume |
124 | */ |
125 | #define CCG_FW_BUILD_NVIDIA (('n' << 8) | 'v') |
126 | #define CCG_OLD_FW_VERSION (CCG_VERSION(0x31) | CCG_VERSION_PATCH(10)) |
127 | |
128 | /* Firmware for Tegra doesn't support UCSI ALT command, built |
129 | * for NVIDIA has known issue of reporting wrong capability info |
130 | */ |
131 | #define CCG_FW_BUILD_NVIDIA_TEGRA (('g' << 8) | 'n') |
132 | |
133 | /* Altmode offset for NVIDIA Function Test Board (FTB) */ |
134 | #define NVIDIA_FTB_DP_OFFSET (2) |
135 | #define NVIDIA_FTB_DBG_OFFSET (3) |
136 | |
137 | struct version_info { |
138 | struct version_format base; |
139 | struct version_format app; |
140 | }; |
141 | |
142 | struct fw_config_table { |
143 | u32 identity; |
144 | u16 table_size; |
145 | u8 fwct_version; |
146 | u8 is_key_change; |
147 | u8 guid[16]; |
148 | struct version_format base; |
149 | struct version_format app; |
150 | u8 primary_fw_digest[32]; |
151 | u32 key_exp_length; |
152 | u8 key_modulus[256]; |
153 | u8 key_exp[4]; |
154 | }; |
155 | |
156 | /* CCGx response codes */ |
157 | enum ccg_resp_code { |
158 | CMD_NO_RESP = 0x00, |
159 | CMD_SUCCESS = 0x02, |
160 | FLASH_DATA_AVAILABLE = 0x03, |
161 | CMD_INVALID = 0x05, |
162 | FLASH_UPDATE_FAIL = 0x07, |
163 | INVALID_FW = 0x08, |
164 | INVALID_ARG = 0x09, |
165 | CMD_NOT_SUPPORT = 0x0A, |
166 | TRANSACTION_FAIL = 0x0C, |
167 | PD_CMD_FAIL = 0x0D, |
168 | UNDEF_ERROR = 0x0F, |
169 | INVALID_RESP = 0x10, |
170 | }; |
171 | |
172 | #define CCG_EVENT_MAX (EVENT_INDEX + 43) |
173 | |
174 | struct ccg_cmd { |
175 | u16 reg; |
176 | u32 data; |
177 | int len; |
178 | u32 delay; /* ms delay for cmd timeout */ |
179 | }; |
180 | |
181 | struct ccg_resp { |
182 | u8 code; |
183 | u8 length; |
184 | }; |
185 | |
186 | struct ucsi_ccg_altmode { |
187 | u16 svid; |
188 | u32 mid; |
189 | u8 linked_idx; |
190 | u8 active_idx; |
191 | #define UCSI_MULTI_DP_INDEX (0xff) |
192 | bool checked; |
193 | } __packed; |
194 | |
195 | #define CCGX_MESSAGE_IN_MAX 4 |
196 | struct op_region { |
197 | __le32 cci; |
198 | __le32 message_in[CCGX_MESSAGE_IN_MAX]; |
199 | }; |
200 | |
201 | struct ucsi_ccg { |
202 | struct device *dev; |
203 | struct ucsi *ucsi; |
204 | struct i2c_client *client; |
205 | |
206 | struct ccg_dev_info info; |
207 | /* version info for boot, primary and secondary */ |
208 | struct version_info version[FW2 + 1]; |
209 | u32 fw_version; |
210 | /* CCG HPI communication flags */ |
211 | unsigned long flags; |
212 | #define RESET_PENDING 0 |
213 | #define DEV_CMD_PENDING 1 |
214 | struct ccg_resp dev_resp; |
215 | u8 cmd_resp; |
216 | int port_num; |
217 | int irq; |
218 | struct work_struct work; |
219 | struct mutex lock; /* to sync between user and driver thread */ |
220 | |
221 | /* fw build with vendor information */ |
222 | u16 fw_build; |
223 | struct work_struct pm_work; |
224 | |
225 | struct completion complete; |
226 | |
227 | u64 last_cmd_sent; |
228 | bool has_multiple_dp; |
229 | struct ucsi_ccg_altmode orig[UCSI_MAX_ALTMODES]; |
230 | struct ucsi_ccg_altmode updated[UCSI_MAX_ALTMODES]; |
231 | |
232 | /* |
233 | * This spinlock protects op_data which includes CCI and MESSAGE_IN that |
234 | * will be updated in ISR |
235 | */ |
236 | spinlock_t op_lock; |
237 | struct op_region op_data; |
238 | }; |
239 | |
240 | static int ccg_read(struct ucsi_ccg *uc, u16 rab, u8 *data, u32 len) |
241 | { |
242 | struct i2c_client *client = uc->client; |
243 | const struct i2c_adapter_quirks *quirks = client->adapter->quirks; |
244 | unsigned char buf[2]; |
245 | struct i2c_msg msgs[] = { |
246 | { |
247 | .addr = client->addr, |
248 | .flags = 0x0, |
249 | .len = sizeof(buf), |
250 | .buf = buf, |
251 | }, |
252 | { |
253 | .addr = client->addr, |
254 | .flags = I2C_M_RD, |
255 | .buf = data, |
256 | }, |
257 | }; |
258 | u32 rlen, rem_len = len, max_read_len = len; |
259 | int status; |
260 | |
261 | /* check any max_read_len limitation on i2c adapter */ |
262 | if (quirks && quirks->max_read_len) |
263 | max_read_len = quirks->max_read_len; |
264 | |
265 | pm_runtime_get_sync(dev: uc->dev); |
266 | while (rem_len > 0) { |
267 | msgs[1].buf = &data[len - rem_len]; |
268 | rlen = min_t(u16, rem_len, max_read_len); |
269 | msgs[1].len = rlen; |
270 | put_unaligned_le16(val: rab, p: buf); |
271 | status = i2c_transfer(adap: client->adapter, msgs, ARRAY_SIZE(msgs)); |
272 | if (status < 0) { |
273 | dev_err(uc->dev, "i2c_transfer failed %d\n" , status); |
274 | pm_runtime_put_sync(dev: uc->dev); |
275 | return status; |
276 | } |
277 | rab += rlen; |
278 | rem_len -= rlen; |
279 | } |
280 | |
281 | pm_runtime_put_sync(dev: uc->dev); |
282 | return 0; |
283 | } |
284 | |
285 | static int ccg_write(struct ucsi_ccg *uc, u16 rab, const u8 *data, u32 len) |
286 | { |
287 | struct i2c_client *client = uc->client; |
288 | unsigned char *buf; |
289 | struct i2c_msg msgs[] = { |
290 | { |
291 | .addr = client->addr, |
292 | .flags = 0x0, |
293 | } |
294 | }; |
295 | int status; |
296 | |
297 | buf = kzalloc(size: len + sizeof(rab), GFP_KERNEL); |
298 | if (!buf) |
299 | return -ENOMEM; |
300 | |
301 | put_unaligned_le16(val: rab, p: buf); |
302 | memcpy(buf + sizeof(rab), data, len); |
303 | |
304 | msgs[0].len = len + sizeof(rab); |
305 | msgs[0].buf = buf; |
306 | |
307 | pm_runtime_get_sync(dev: uc->dev); |
308 | status = i2c_transfer(adap: client->adapter, msgs, ARRAY_SIZE(msgs)); |
309 | if (status < 0) { |
310 | dev_err(uc->dev, "i2c_transfer failed %d\n" , status); |
311 | pm_runtime_put_sync(dev: uc->dev); |
312 | kfree(objp: buf); |
313 | return status; |
314 | } |
315 | |
316 | pm_runtime_put_sync(dev: uc->dev); |
317 | kfree(objp: buf); |
318 | return 0; |
319 | } |
320 | |
321 | static int ccg_op_region_update(struct ucsi_ccg *uc, u32 cci) |
322 | { |
323 | u16 reg = CCGX_RAB_UCSI_DATA_BLOCK(UCSI_MESSAGE_IN); |
324 | struct op_region *data = &uc->op_data; |
325 | unsigned char *buf; |
326 | size_t size = sizeof(data->message_in); |
327 | |
328 | buf = kzalloc(size, GFP_ATOMIC); |
329 | if (!buf) |
330 | return -ENOMEM; |
331 | if (UCSI_CCI_LENGTH(cci)) { |
332 | int ret = ccg_read(uc, rab: reg, data: (void *)buf, len: size); |
333 | |
334 | if (ret) { |
335 | kfree(objp: buf); |
336 | return ret; |
337 | } |
338 | } |
339 | |
340 | spin_lock(lock: &uc->op_lock); |
341 | data->cci = cpu_to_le32(cci); |
342 | if (UCSI_CCI_LENGTH(cci)) |
343 | memcpy(&data->message_in, buf, size); |
344 | spin_unlock(lock: &uc->op_lock); |
345 | kfree(objp: buf); |
346 | return 0; |
347 | } |
348 | |
349 | static int ucsi_ccg_init(struct ucsi_ccg *uc) |
350 | { |
351 | unsigned int count = 10; |
352 | u8 data; |
353 | int status; |
354 | |
355 | spin_lock_init(&uc->op_lock); |
356 | |
357 | data = CCGX_RAB_UCSI_CONTROL_STOP; |
358 | status = ccg_write(uc, CCGX_RAB_UCSI_CONTROL, data: &data, len: sizeof(data)); |
359 | if (status < 0) |
360 | return status; |
361 | |
362 | data = CCGX_RAB_UCSI_CONTROL_START; |
363 | status = ccg_write(uc, CCGX_RAB_UCSI_CONTROL, data: &data, len: sizeof(data)); |
364 | if (status < 0) |
365 | return status; |
366 | |
367 | /* |
368 | * Flush CCGx RESPONSE queue by acking interrupts. Above ucsi control |
369 | * register write will push response which must be cleared. |
370 | */ |
371 | do { |
372 | status = ccg_read(uc, CCGX_RAB_INTR_REG, data: &data, len: sizeof(data)); |
373 | if (status < 0) |
374 | return status; |
375 | |
376 | if (!(data & DEV_INT)) |
377 | return 0; |
378 | |
379 | status = ccg_write(uc, CCGX_RAB_INTR_REG, data: &data, len: sizeof(data)); |
380 | if (status < 0) |
381 | return status; |
382 | |
383 | usleep_range(min: 10000, max: 11000); |
384 | } while (--count); |
385 | |
386 | return -ETIMEDOUT; |
387 | } |
388 | |
389 | static void ucsi_ccg_update_get_current_cam_cmd(struct ucsi_ccg *uc, u8 *data) |
390 | { |
391 | u8 cam, new_cam; |
392 | |
393 | cam = data[0]; |
394 | new_cam = uc->orig[cam].linked_idx; |
395 | uc->updated[new_cam].active_idx = cam; |
396 | data[0] = new_cam; |
397 | } |
398 | |
399 | static bool ucsi_ccg_update_altmodes(struct ucsi *ucsi, |
400 | struct ucsi_altmode *orig, |
401 | struct ucsi_altmode *updated) |
402 | { |
403 | struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi); |
404 | struct ucsi_ccg_altmode *alt, *new_alt; |
405 | int i, j, k = 0; |
406 | bool found = false; |
407 | |
408 | alt = uc->orig; |
409 | new_alt = uc->updated; |
410 | memset(uc->updated, 0, sizeof(uc->updated)); |
411 | |
412 | /* |
413 | * Copy original connector altmodes to new structure. |
414 | * We need this before second loop since second loop |
415 | * checks for duplicate altmodes. |
416 | */ |
417 | for (i = 0; i < UCSI_MAX_ALTMODES; i++) { |
418 | alt[i].svid = orig[i].svid; |
419 | alt[i].mid = orig[i].mid; |
420 | if (!alt[i].svid) |
421 | break; |
422 | } |
423 | |
424 | for (i = 0; i < UCSI_MAX_ALTMODES; i++) { |
425 | if (!alt[i].svid) |
426 | break; |
427 | |
428 | /* already checked and considered */ |
429 | if (alt[i].checked) |
430 | continue; |
431 | |
432 | if (!DP_CONF_GET_PIN_ASSIGN(alt[i].mid)) { |
433 | /* Found Non DP altmode */ |
434 | new_alt[k].svid = alt[i].svid; |
435 | new_alt[k].mid |= alt[i].mid; |
436 | new_alt[k].linked_idx = i; |
437 | alt[i].linked_idx = k; |
438 | updated[k].svid = new_alt[k].svid; |
439 | updated[k].mid = new_alt[k].mid; |
440 | k++; |
441 | continue; |
442 | } |
443 | |
444 | for (j = i + 1; j < UCSI_MAX_ALTMODES; j++) { |
445 | if (alt[i].svid != alt[j].svid || |
446 | !DP_CONF_GET_PIN_ASSIGN(alt[j].mid)) { |
447 | continue; |
448 | } else { |
449 | /* Found duplicate DP mode */ |
450 | new_alt[k].svid = alt[i].svid; |
451 | new_alt[k].mid |= alt[i].mid | alt[j].mid; |
452 | new_alt[k].linked_idx = UCSI_MULTI_DP_INDEX; |
453 | alt[i].linked_idx = k; |
454 | alt[j].linked_idx = k; |
455 | alt[j].checked = true; |
456 | found = true; |
457 | } |
458 | } |
459 | if (found) { |
460 | uc->has_multiple_dp = true; |
461 | } else { |
462 | /* Didn't find any duplicate DP altmode */ |
463 | new_alt[k].svid = alt[i].svid; |
464 | new_alt[k].mid |= alt[i].mid; |
465 | new_alt[k].linked_idx = i; |
466 | alt[i].linked_idx = k; |
467 | } |
468 | updated[k].svid = new_alt[k].svid; |
469 | updated[k].mid = new_alt[k].mid; |
470 | k++; |
471 | } |
472 | return found; |
473 | } |
474 | |
475 | static void ucsi_ccg_update_set_new_cam_cmd(struct ucsi_ccg *uc, |
476 | struct ucsi_connector *con, |
477 | u64 *cmd) |
478 | { |
479 | struct ucsi_ccg_altmode *new_port, *port; |
480 | struct typec_altmode *alt = NULL; |
481 | u8 new_cam, cam, pin; |
482 | bool enter_new_mode; |
483 | int i, j, k = 0xff; |
484 | |
485 | port = uc->orig; |
486 | new_cam = UCSI_SET_NEW_CAM_GET_AM(*cmd); |
487 | new_port = &uc->updated[new_cam]; |
488 | cam = new_port->linked_idx; |
489 | enter_new_mode = UCSI_SET_NEW_CAM_ENTER(*cmd); |
490 | |
491 | /* |
492 | * If CAM is UCSI_MULTI_DP_INDEX then this is DP altmode |
493 | * with multiple DP mode. Find out CAM for best pin assignment |
494 | * among all DP mode. Priorite pin E->D->C after making sure |
495 | * the partner supports that pin. |
496 | */ |
497 | if (cam == UCSI_MULTI_DP_INDEX) { |
498 | if (enter_new_mode) { |
499 | for (i = 0; con->partner_altmode[i]; i++) { |
500 | alt = con->partner_altmode[i]; |
501 | if (alt->svid == new_port->svid) |
502 | break; |
503 | } |
504 | /* |
505 | * alt will always be non NULL since this is |
506 | * UCSI_SET_NEW_CAM command and so there will be |
507 | * at least one con->partner_altmode[i] with svid |
508 | * matching with new_port->svid. |
509 | */ |
510 | for (j = 0; port[j].svid; j++) { |
511 | pin = DP_CONF_GET_PIN_ASSIGN(port[j].mid); |
512 | if (alt && port[j].svid == alt->svid && |
513 | (pin & DP_CONF_GET_PIN_ASSIGN(alt->vdo))) { |
514 | /* prioritize pin E->D->C */ |
515 | if (k == 0xff || (k != 0xff && pin > |
516 | DP_CONF_GET_PIN_ASSIGN(port[k].mid)) |
517 | ) { |
518 | k = j; |
519 | } |
520 | } |
521 | } |
522 | cam = k; |
523 | new_port->active_idx = cam; |
524 | } else { |
525 | cam = new_port->active_idx; |
526 | } |
527 | } |
528 | *cmd &= ~UCSI_SET_NEW_CAM_AM_MASK; |
529 | *cmd |= UCSI_SET_NEW_CAM_SET_AM(cam); |
530 | } |
531 | |
532 | /* |
533 | * Change the order of vdo values of NVIDIA test device FTB |
534 | * (Function Test Board) which reports altmode list with vdo=0x3 |
535 | * first and then vdo=0x. Current logic to assign mode value is |
536 | * based on order in altmode list and it causes a mismatch of CON |
537 | * and SOP altmodes since NVIDIA GPU connector has order of vdo=0x1 |
538 | * first and then vdo=0x3 |
539 | */ |
540 | static void ucsi_ccg_nvidia_altmode(struct ucsi_ccg *uc, |
541 | struct ucsi_altmode *alt) |
542 | { |
543 | switch (UCSI_ALTMODE_OFFSET(uc->last_cmd_sent)) { |
544 | case NVIDIA_FTB_DP_OFFSET: |
545 | if (alt[0].mid == USB_TYPEC_NVIDIA_VLINK_DBG_VDO) |
546 | alt[0].mid = USB_TYPEC_NVIDIA_VLINK_DP_VDO | |
547 | DP_CAP_DP_SIGNALLING(0) | DP_CAP_USB | |
548 | DP_CONF_SET_PIN_ASSIGN(BIT(DP_PIN_ASSIGN_E)); |
549 | break; |
550 | case NVIDIA_FTB_DBG_OFFSET: |
551 | if (alt[0].mid == USB_TYPEC_NVIDIA_VLINK_DP_VDO) |
552 | alt[0].mid = USB_TYPEC_NVIDIA_VLINK_DBG_VDO; |
553 | break; |
554 | default: |
555 | break; |
556 | } |
557 | } |
558 | |
559 | static int ucsi_ccg_read(struct ucsi *ucsi, unsigned int offset, |
560 | void *val, size_t val_len) |
561 | { |
562 | struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi); |
563 | u16 reg = CCGX_RAB_UCSI_DATA_BLOCK(offset); |
564 | struct ucsi_capability *cap; |
565 | struct ucsi_altmode *alt; |
566 | int ret = 0; |
567 | |
568 | if (offset == UCSI_CCI) { |
569 | spin_lock(lock: &uc->op_lock); |
570 | memcpy(val, &(uc->op_data).cci, val_len); |
571 | spin_unlock(lock: &uc->op_lock); |
572 | } else if (offset == UCSI_MESSAGE_IN) { |
573 | spin_lock(lock: &uc->op_lock); |
574 | memcpy(val, &(uc->op_data).message_in, val_len); |
575 | spin_unlock(lock: &uc->op_lock); |
576 | } else { |
577 | ret = ccg_read(uc, rab: reg, data: val, len: val_len); |
578 | } |
579 | |
580 | if (ret) |
581 | return ret; |
582 | |
583 | if (offset != UCSI_MESSAGE_IN) |
584 | return ret; |
585 | |
586 | switch (UCSI_COMMAND(uc->last_cmd_sent)) { |
587 | case UCSI_GET_CURRENT_CAM: |
588 | if (uc->has_multiple_dp) |
589 | ucsi_ccg_update_get_current_cam_cmd(uc, data: (u8 *)val); |
590 | break; |
591 | case UCSI_GET_ALTERNATE_MODES: |
592 | if (UCSI_ALTMODE_RECIPIENT(uc->last_cmd_sent) == |
593 | UCSI_RECIPIENT_SOP) { |
594 | alt = val; |
595 | if (alt[0].svid == USB_TYPEC_NVIDIA_VLINK_SID) |
596 | ucsi_ccg_nvidia_altmode(uc, alt); |
597 | } |
598 | break; |
599 | case UCSI_GET_CAPABILITY: |
600 | if (uc->fw_build == CCG_FW_BUILD_NVIDIA_TEGRA) { |
601 | cap = val; |
602 | cap->features &= ~UCSI_CAP_ALT_MODE_DETAILS; |
603 | } |
604 | break; |
605 | default: |
606 | break; |
607 | } |
608 | uc->last_cmd_sent = 0; |
609 | |
610 | return ret; |
611 | } |
612 | |
613 | static int ucsi_ccg_async_write(struct ucsi *ucsi, unsigned int offset, |
614 | const void *val, size_t val_len) |
615 | { |
616 | struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi); |
617 | u16 reg = CCGX_RAB_UCSI_DATA_BLOCK(offset); |
618 | |
619 | /* |
620 | * UCSI may read CCI instantly after async_write, |
621 | * clear CCI to avoid caller getting wrong data before we get CCI from ISR |
622 | */ |
623 | spin_lock(lock: &uc->op_lock); |
624 | uc->op_data.cci = 0; |
625 | spin_unlock(lock: &uc->op_lock); |
626 | |
627 | return ccg_write(uc, rab: reg, data: val, len: val_len); |
628 | } |
629 | |
630 | static int ucsi_ccg_sync_write(struct ucsi *ucsi, unsigned int offset, |
631 | const void *val, size_t val_len) |
632 | { |
633 | struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi); |
634 | struct ucsi_connector *con; |
635 | int con_index; |
636 | int ret; |
637 | |
638 | mutex_lock(&uc->lock); |
639 | pm_runtime_get_sync(dev: uc->dev); |
640 | set_bit(DEV_CMD_PENDING, addr: &uc->flags); |
641 | |
642 | if (offset == UCSI_CONTROL && val_len == sizeof(uc->last_cmd_sent)) { |
643 | uc->last_cmd_sent = *(u64 *)val; |
644 | |
645 | if (UCSI_COMMAND(uc->last_cmd_sent) == UCSI_SET_NEW_CAM && |
646 | uc->has_multiple_dp) { |
647 | con_index = (uc->last_cmd_sent >> 16) & |
648 | UCSI_CMD_CONNECTOR_MASK; |
649 | con = &uc->ucsi->connector[con_index - 1]; |
650 | ucsi_ccg_update_set_new_cam_cmd(uc, con, cmd: (u64 *)val); |
651 | } |
652 | } |
653 | |
654 | ret = ucsi_ccg_async_write(ucsi, offset, val, val_len); |
655 | if (ret) |
656 | goto err_clear_bit; |
657 | |
658 | if (!wait_for_completion_timeout(x: &uc->complete, timeout: msecs_to_jiffies(m: 5000))) |
659 | ret = -ETIMEDOUT; |
660 | |
661 | err_clear_bit: |
662 | clear_bit(DEV_CMD_PENDING, addr: &uc->flags); |
663 | pm_runtime_put_sync(dev: uc->dev); |
664 | mutex_unlock(lock: &uc->lock); |
665 | |
666 | return ret; |
667 | } |
668 | |
669 | static const struct ucsi_operations ucsi_ccg_ops = { |
670 | .read = ucsi_ccg_read, |
671 | .sync_write = ucsi_ccg_sync_write, |
672 | .async_write = ucsi_ccg_async_write, |
673 | .update_altmodes = ucsi_ccg_update_altmodes |
674 | }; |
675 | |
676 | static irqreturn_t ccg_irq_handler(int irq, void *data) |
677 | { |
678 | u16 reg = CCGX_RAB_UCSI_DATA_BLOCK(UCSI_CCI); |
679 | struct ucsi_ccg *uc = data; |
680 | u8 intr_reg; |
681 | u32 cci = 0; |
682 | int ret = 0; |
683 | |
684 | ret = ccg_read(uc, CCGX_RAB_INTR_REG, data: &intr_reg, len: sizeof(intr_reg)); |
685 | if (ret) |
686 | return ret; |
687 | |
688 | if (!intr_reg) |
689 | return IRQ_HANDLED; |
690 | else if (!(intr_reg & UCSI_READ_INT)) |
691 | goto err_clear_irq; |
692 | |
693 | ret = ccg_read(uc, rab: reg, data: (void *)&cci, len: sizeof(cci)); |
694 | if (ret) |
695 | goto err_clear_irq; |
696 | |
697 | if (UCSI_CCI_CONNECTOR(cci)) |
698 | ucsi_connector_change(ucsi: uc->ucsi, UCSI_CCI_CONNECTOR(cci)); |
699 | |
700 | /* |
701 | * As per CCGx UCSI interface guide, copy CCI and MESSAGE_IN |
702 | * to the OpRegion before clear the UCSI interrupt |
703 | */ |
704 | ret = ccg_op_region_update(uc, cci); |
705 | if (ret) |
706 | goto err_clear_irq; |
707 | |
708 | err_clear_irq: |
709 | ccg_write(uc, CCGX_RAB_INTR_REG, data: &intr_reg, len: sizeof(intr_reg)); |
710 | |
711 | if (!ret && test_bit(DEV_CMD_PENDING, &uc->flags) && |
712 | cci & (UCSI_CCI_ACK_COMPLETE | UCSI_CCI_COMMAND_COMPLETE)) |
713 | complete(&uc->complete); |
714 | |
715 | return IRQ_HANDLED; |
716 | } |
717 | |
718 | static int ccg_request_irq(struct ucsi_ccg *uc) |
719 | { |
720 | unsigned long flags = IRQF_ONESHOT; |
721 | |
722 | if (!dev_fwnode(uc->dev)) |
723 | flags |= IRQF_TRIGGER_HIGH; |
724 | |
725 | return request_threaded_irq(irq: uc->irq, NULL, thread_fn: ccg_irq_handler, flags, name: dev_name(dev: uc->dev), dev: uc); |
726 | } |
727 | |
728 | static void ccg_pm_workaround_work(struct work_struct *pm_work) |
729 | { |
730 | ccg_irq_handler(irq: 0, container_of(pm_work, struct ucsi_ccg, pm_work)); |
731 | } |
732 | |
733 | static int get_fw_info(struct ucsi_ccg *uc) |
734 | { |
735 | int err; |
736 | |
737 | err = ccg_read(uc, CCGX_RAB_READ_ALL_VER, data: (u8 *)(&uc->version), |
738 | len: sizeof(uc->version)); |
739 | if (err < 0) |
740 | return err; |
741 | |
742 | uc->fw_version = CCG_VERSION(uc->version[FW2].app.ver) | |
743 | CCG_VERSION_PATCH(uc->version[FW2].app.patch); |
744 | |
745 | err = ccg_read(uc, CCGX_RAB_DEVICE_MODE, data: (u8 *)(&uc->info), |
746 | len: sizeof(uc->info)); |
747 | if (err < 0) |
748 | return err; |
749 | |
750 | return 0; |
751 | } |
752 | |
753 | static inline bool invalid_async_evt(int code) |
754 | { |
755 | return (code >= CCG_EVENT_MAX) || (code < EVENT_INDEX); |
756 | } |
757 | |
758 | static void ccg_process_response(struct ucsi_ccg *uc) |
759 | { |
760 | struct device *dev = uc->dev; |
761 | |
762 | if (uc->dev_resp.code & ASYNC_EVENT) { |
763 | if (uc->dev_resp.code == RESET_COMPLETE) { |
764 | if (test_bit(RESET_PENDING, &uc->flags)) |
765 | uc->cmd_resp = uc->dev_resp.code; |
766 | get_fw_info(uc); |
767 | } |
768 | if (invalid_async_evt(code: uc->dev_resp.code)) |
769 | dev_err(dev, "invalid async evt %d\n" , |
770 | uc->dev_resp.code); |
771 | } else { |
772 | if (test_bit(DEV_CMD_PENDING, &uc->flags)) { |
773 | uc->cmd_resp = uc->dev_resp.code; |
774 | clear_bit(DEV_CMD_PENDING, addr: &uc->flags); |
775 | } else { |
776 | dev_err(dev, "dev resp 0x%04x but no cmd pending\n" , |
777 | uc->dev_resp.code); |
778 | } |
779 | } |
780 | } |
781 | |
782 | static int ccg_read_response(struct ucsi_ccg *uc) |
783 | { |
784 | unsigned long target = jiffies + msecs_to_jiffies(m: 1000); |
785 | struct device *dev = uc->dev; |
786 | u8 intval; |
787 | int status; |
788 | |
789 | /* wait for interrupt status to get updated */ |
790 | do { |
791 | status = ccg_read(uc, CCGX_RAB_INTR_REG, data: &intval, |
792 | len: sizeof(intval)); |
793 | if (status < 0) |
794 | return status; |
795 | |
796 | if (intval & DEV_INT) |
797 | break; |
798 | usleep_range(min: 500, max: 600); |
799 | } while (time_is_after_jiffies(target)); |
800 | |
801 | if (time_is_before_jiffies(target)) { |
802 | dev_err(dev, "response timeout error\n" ); |
803 | return -ETIME; |
804 | } |
805 | |
806 | status = ccg_read(uc, CCGX_RAB_RESPONSE, data: (u8 *)&uc->dev_resp, |
807 | len: sizeof(uc->dev_resp)); |
808 | if (status < 0) |
809 | return status; |
810 | |
811 | status = ccg_write(uc, CCGX_RAB_INTR_REG, data: &intval, len: sizeof(intval)); |
812 | if (status < 0) |
813 | return status; |
814 | |
815 | return 0; |
816 | } |
817 | |
818 | /* Caller must hold uc->lock */ |
819 | static int ccg_send_command(struct ucsi_ccg *uc, struct ccg_cmd *cmd) |
820 | { |
821 | struct device *dev = uc->dev; |
822 | int ret; |
823 | |
824 | switch (cmd->reg & 0xF000) { |
825 | case DEV_REG_IDX: |
826 | set_bit(DEV_CMD_PENDING, addr: &uc->flags); |
827 | break; |
828 | default: |
829 | dev_err(dev, "invalid cmd register\n" ); |
830 | break; |
831 | } |
832 | |
833 | ret = ccg_write(uc, rab: cmd->reg, data: (u8 *)&cmd->data, len: cmd->len); |
834 | if (ret < 0) |
835 | return ret; |
836 | |
837 | msleep(msecs: cmd->delay); |
838 | |
839 | ret = ccg_read_response(uc); |
840 | if (ret < 0) { |
841 | dev_err(dev, "response read error\n" ); |
842 | switch (cmd->reg & 0xF000) { |
843 | case DEV_REG_IDX: |
844 | clear_bit(DEV_CMD_PENDING, addr: &uc->flags); |
845 | break; |
846 | default: |
847 | dev_err(dev, "invalid cmd register\n" ); |
848 | break; |
849 | } |
850 | return -EIO; |
851 | } |
852 | ccg_process_response(uc); |
853 | |
854 | return uc->cmd_resp; |
855 | } |
856 | |
857 | static int ccg_cmd_enter_flashing(struct ucsi_ccg *uc) |
858 | { |
859 | struct ccg_cmd cmd; |
860 | int ret; |
861 | |
862 | cmd.reg = CCGX_RAB_ENTER_FLASHING; |
863 | cmd.data = FLASH_ENTER_SIG; |
864 | cmd.len = 1; |
865 | cmd.delay = 50; |
866 | |
867 | mutex_lock(&uc->lock); |
868 | |
869 | ret = ccg_send_command(uc, cmd: &cmd); |
870 | |
871 | mutex_unlock(lock: &uc->lock); |
872 | |
873 | if (ret != CMD_SUCCESS) { |
874 | dev_err(uc->dev, "enter flashing failed ret=%d\n" , ret); |
875 | return ret; |
876 | } |
877 | |
878 | return 0; |
879 | } |
880 | |
881 | static int ccg_cmd_reset(struct ucsi_ccg *uc) |
882 | { |
883 | struct ccg_cmd cmd; |
884 | u8 *p; |
885 | int ret; |
886 | |
887 | p = (u8 *)&cmd.data; |
888 | cmd.reg = CCGX_RAB_RESET_REQ; |
889 | p[0] = RESET_SIG; |
890 | p[1] = CMD_RESET_DEV; |
891 | cmd.len = 2; |
892 | cmd.delay = 5000; |
893 | |
894 | mutex_lock(&uc->lock); |
895 | |
896 | set_bit(RESET_PENDING, addr: &uc->flags); |
897 | |
898 | ret = ccg_send_command(uc, cmd: &cmd); |
899 | if (ret != RESET_COMPLETE) |
900 | goto err_clear_flag; |
901 | |
902 | ret = 0; |
903 | |
904 | err_clear_flag: |
905 | clear_bit(RESET_PENDING, addr: &uc->flags); |
906 | |
907 | mutex_unlock(lock: &uc->lock); |
908 | |
909 | return ret; |
910 | } |
911 | |
912 | static int ccg_cmd_port_control(struct ucsi_ccg *uc, bool enable) |
913 | { |
914 | struct ccg_cmd cmd; |
915 | int ret; |
916 | |
917 | cmd.reg = CCGX_RAB_PDPORT_ENABLE; |
918 | if (enable) |
919 | cmd.data = (uc->port_num == 1) ? |
920 | PDPORT_1 : (PDPORT_1 | PDPORT_2); |
921 | else |
922 | cmd.data = 0x0; |
923 | cmd.len = 1; |
924 | cmd.delay = 10; |
925 | |
926 | mutex_lock(&uc->lock); |
927 | |
928 | ret = ccg_send_command(uc, cmd: &cmd); |
929 | |
930 | mutex_unlock(lock: &uc->lock); |
931 | |
932 | if (ret != CMD_SUCCESS) { |
933 | dev_err(uc->dev, "port control failed ret=%d\n" , ret); |
934 | return ret; |
935 | } |
936 | return 0; |
937 | } |
938 | |
939 | static int ccg_cmd_jump_boot_mode(struct ucsi_ccg *uc, int bl_mode) |
940 | { |
941 | struct ccg_cmd cmd; |
942 | int ret; |
943 | |
944 | cmd.reg = CCGX_RAB_JUMP_TO_BOOT; |
945 | |
946 | if (bl_mode) |
947 | cmd.data = TO_BOOT; |
948 | else |
949 | cmd.data = TO_ALT_FW; |
950 | |
951 | cmd.len = 1; |
952 | cmd.delay = 100; |
953 | |
954 | mutex_lock(&uc->lock); |
955 | |
956 | set_bit(RESET_PENDING, addr: &uc->flags); |
957 | |
958 | ret = ccg_send_command(uc, cmd: &cmd); |
959 | if (ret != RESET_COMPLETE) |
960 | goto err_clear_flag; |
961 | |
962 | ret = 0; |
963 | |
964 | err_clear_flag: |
965 | clear_bit(RESET_PENDING, addr: &uc->flags); |
966 | |
967 | mutex_unlock(lock: &uc->lock); |
968 | |
969 | return ret; |
970 | } |
971 | |
972 | static int |
973 | ccg_cmd_write_flash_row(struct ucsi_ccg *uc, u16 row, |
974 | const void *data, u8 fcmd) |
975 | { |
976 | struct i2c_client *client = uc->client; |
977 | struct ccg_cmd cmd; |
978 | u8 buf[CCG4_ROW_SIZE + 2]; |
979 | u8 *p; |
980 | int ret; |
981 | |
982 | /* Copy the data into the flash read/write memory. */ |
983 | put_unaligned_le16(REG_FLASH_RW_MEM, p: buf); |
984 | |
985 | memcpy(buf + 2, data, CCG4_ROW_SIZE); |
986 | |
987 | mutex_lock(&uc->lock); |
988 | |
989 | ret = i2c_master_send(client, buf, CCG4_ROW_SIZE + 2); |
990 | if (ret != CCG4_ROW_SIZE + 2) { |
991 | dev_err(uc->dev, "REG_FLASH_RW_MEM write fail %d\n" , ret); |
992 | mutex_unlock(lock: &uc->lock); |
993 | return ret < 0 ? ret : -EIO; |
994 | } |
995 | |
996 | /* Use the FLASH_ROW_READ_WRITE register to trigger */ |
997 | /* writing of data to the desired flash row */ |
998 | p = (u8 *)&cmd.data; |
999 | cmd.reg = CCGX_RAB_FLASH_ROW_RW; |
1000 | p[0] = FLASH_SIG; |
1001 | p[1] = fcmd; |
1002 | put_unaligned_le16(val: row, p: &p[2]); |
1003 | cmd.len = 4; |
1004 | cmd.delay = 50; |
1005 | if (fcmd == FLASH_FWCT_SIG_WR_CMD) |
1006 | cmd.delay += 400; |
1007 | if (row == 510) |
1008 | cmd.delay += 220; |
1009 | ret = ccg_send_command(uc, cmd: &cmd); |
1010 | |
1011 | mutex_unlock(lock: &uc->lock); |
1012 | |
1013 | if (ret != CMD_SUCCESS) { |
1014 | dev_err(uc->dev, "write flash row failed ret=%d\n" , ret); |
1015 | return ret; |
1016 | } |
1017 | |
1018 | return 0; |
1019 | } |
1020 | |
1021 | static int ccg_cmd_validate_fw(struct ucsi_ccg *uc, unsigned int fwid) |
1022 | { |
1023 | struct ccg_cmd cmd; |
1024 | int ret; |
1025 | |
1026 | cmd.reg = CCGX_RAB_VALIDATE_FW; |
1027 | cmd.data = fwid; |
1028 | cmd.len = 1; |
1029 | cmd.delay = 500; |
1030 | |
1031 | mutex_lock(&uc->lock); |
1032 | |
1033 | ret = ccg_send_command(uc, cmd: &cmd); |
1034 | |
1035 | mutex_unlock(lock: &uc->lock); |
1036 | |
1037 | if (ret != CMD_SUCCESS) |
1038 | return ret; |
1039 | |
1040 | return 0; |
1041 | } |
1042 | |
1043 | static bool ccg_check_vendor_version(struct ucsi_ccg *uc, |
1044 | struct version_format *app, |
1045 | struct fw_config_table *fw_cfg) |
1046 | { |
1047 | struct device *dev = uc->dev; |
1048 | |
1049 | /* Check if the fw build is for supported vendors */ |
1050 | if (le16_to_cpu(app->build) != uc->fw_build) { |
1051 | dev_info(dev, "current fw is not from supported vendor\n" ); |
1052 | return false; |
1053 | } |
1054 | |
1055 | /* Check if the new fw build is for supported vendors */ |
1056 | if (le16_to_cpu(fw_cfg->app.build) != uc->fw_build) { |
1057 | dev_info(dev, "new fw is not from supported vendor\n" ); |
1058 | return false; |
1059 | } |
1060 | return true; |
1061 | } |
1062 | |
1063 | static bool ccg_check_fw_version(struct ucsi_ccg *uc, const char *fw_name, |
1064 | struct version_format *app) |
1065 | { |
1066 | const struct firmware *fw = NULL; |
1067 | struct device *dev = uc->dev; |
1068 | struct fw_config_table fw_cfg; |
1069 | u32 cur_version, new_version; |
1070 | bool is_later = false; |
1071 | |
1072 | if (request_firmware(fw: &fw, name: fw_name, device: dev) != 0) { |
1073 | dev_err(dev, "error: Failed to open cyacd file %s\n" , fw_name); |
1074 | return false; |
1075 | } |
1076 | |
1077 | /* |
1078 | * check if signed fw |
1079 | * last part of fw image is fw cfg table and signature |
1080 | */ |
1081 | if (fw->size < sizeof(fw_cfg) + FW_CFG_TABLE_SIG_SIZE) |
1082 | goto out_release_firmware; |
1083 | |
1084 | memcpy((uint8_t *)&fw_cfg, fw->data + fw->size - |
1085 | sizeof(fw_cfg) - FW_CFG_TABLE_SIG_SIZE, sizeof(fw_cfg)); |
1086 | |
1087 | if (fw_cfg.identity != ('F' | 'W' << 8 | 'C' << 16 | 'T' << 24)) { |
1088 | dev_info(dev, "not a signed image\n" ); |
1089 | goto out_release_firmware; |
1090 | } |
1091 | |
1092 | /* compare input version with FWCT version */ |
1093 | cur_version = le16_to_cpu(app->build) | CCG_VERSION_PATCH(app->patch) | |
1094 | CCG_VERSION(app->ver); |
1095 | |
1096 | new_version = le16_to_cpu(fw_cfg.app.build) | |
1097 | CCG_VERSION_PATCH(fw_cfg.app.patch) | |
1098 | CCG_VERSION(fw_cfg.app.ver); |
1099 | |
1100 | if (!ccg_check_vendor_version(uc, app, fw_cfg: &fw_cfg)) |
1101 | goto out_release_firmware; |
1102 | |
1103 | if (new_version > cur_version) |
1104 | is_later = true; |
1105 | |
1106 | out_release_firmware: |
1107 | release_firmware(fw); |
1108 | return is_later; |
1109 | } |
1110 | |
1111 | static int ccg_fw_update_needed(struct ucsi_ccg *uc, |
1112 | enum enum_flash_mode *mode) |
1113 | { |
1114 | struct device *dev = uc->dev; |
1115 | int err; |
1116 | struct version_info version[3]; |
1117 | |
1118 | err = ccg_read(uc, CCGX_RAB_DEVICE_MODE, data: (u8 *)(&uc->info), |
1119 | len: sizeof(uc->info)); |
1120 | if (err) { |
1121 | dev_err(dev, "read device mode failed\n" ); |
1122 | return err; |
1123 | } |
1124 | |
1125 | err = ccg_read(uc, CCGX_RAB_READ_ALL_VER, data: (u8 *)version, |
1126 | len: sizeof(version)); |
1127 | if (err) { |
1128 | dev_err(dev, "read device mode failed\n" ); |
1129 | return err; |
1130 | } |
1131 | |
1132 | if (memcmp(p: &version[FW1], q: "\0\0\0\0\0\0\0\0" , |
1133 | size: sizeof(struct version_info)) == 0) { |
1134 | dev_info(dev, "secondary fw is not flashed\n" ); |
1135 | *mode = SECONDARY_BL; |
1136 | } else if (le16_to_cpu(version[FW1].base.build) < |
1137 | secondary_fw_min_ver) { |
1138 | dev_info(dev, "secondary fw version is too low (< %d)\n" , |
1139 | secondary_fw_min_ver); |
1140 | *mode = SECONDARY; |
1141 | } else if (memcmp(p: &version[FW2], q: "\0\0\0\0\0\0\0\0" , |
1142 | size: sizeof(struct version_info)) == 0) { |
1143 | dev_info(dev, "primary fw is not flashed\n" ); |
1144 | *mode = PRIMARY; |
1145 | } else if (ccg_check_fw_version(uc, fw_name: ccg_fw_names[PRIMARY], |
1146 | app: &version[FW2].app)) { |
1147 | dev_info(dev, "found primary fw with later version\n" ); |
1148 | *mode = PRIMARY; |
1149 | } else { |
1150 | dev_info(dev, "secondary and primary fw are the latest\n" ); |
1151 | *mode = FLASH_NOT_NEEDED; |
1152 | } |
1153 | return 0; |
1154 | } |
1155 | |
1156 | static int do_flash(struct ucsi_ccg *uc, enum enum_flash_mode mode) |
1157 | { |
1158 | struct device *dev = uc->dev; |
1159 | const struct firmware *fw = NULL; |
1160 | const char *p, *s; |
1161 | const char *eof; |
1162 | int err, row, len, line_sz, line_cnt = 0; |
1163 | unsigned long start_time = jiffies; |
1164 | struct fw_config_table fw_cfg; |
1165 | u8 fw_cfg_sig[FW_CFG_TABLE_SIG_SIZE]; |
1166 | u8 *wr_buf; |
1167 | |
1168 | err = request_firmware(fw: &fw, name: ccg_fw_names[mode], device: dev); |
1169 | if (err) { |
1170 | dev_err(dev, "request %s failed err=%d\n" , |
1171 | ccg_fw_names[mode], err); |
1172 | return err; |
1173 | } |
1174 | |
1175 | if (((uc->info.mode & CCG_DEVINFO_FWMODE_MASK) >> |
1176 | CCG_DEVINFO_FWMODE_SHIFT) == FW2) { |
1177 | err = ccg_cmd_port_control(uc, enable: false); |
1178 | if (err < 0) |
1179 | goto release_fw; |
1180 | err = ccg_cmd_jump_boot_mode(uc, bl_mode: 0); |
1181 | if (err < 0) |
1182 | goto release_fw; |
1183 | } |
1184 | |
1185 | eof = fw->data + fw->size; |
1186 | |
1187 | /* |
1188 | * check if signed fw |
1189 | * last part of fw image is fw cfg table and signature |
1190 | */ |
1191 | if (fw->size < sizeof(fw_cfg) + sizeof(fw_cfg_sig)) |
1192 | goto not_signed_fw; |
1193 | |
1194 | memcpy((uint8_t *)&fw_cfg, fw->data + fw->size - |
1195 | sizeof(fw_cfg) - sizeof(fw_cfg_sig), sizeof(fw_cfg)); |
1196 | |
1197 | if (fw_cfg.identity != ('F' | ('W' << 8) | ('C' << 16) | ('T' << 24))) { |
1198 | dev_info(dev, "not a signed image\n" ); |
1199 | goto not_signed_fw; |
1200 | } |
1201 | eof = fw->data + fw->size - sizeof(fw_cfg) - sizeof(fw_cfg_sig); |
1202 | |
1203 | memcpy((uint8_t *)&fw_cfg_sig, |
1204 | fw->data + fw->size - sizeof(fw_cfg_sig), sizeof(fw_cfg_sig)); |
1205 | |
1206 | /* flash fw config table and signature first */ |
1207 | err = ccg_cmd_write_flash_row(uc, row: 0, data: (u8 *)&fw_cfg, |
1208 | FLASH_FWCT1_WR_CMD); |
1209 | if (err) |
1210 | goto release_fw; |
1211 | |
1212 | err = ccg_cmd_write_flash_row(uc, row: 0, data: (u8 *)&fw_cfg + CCG4_ROW_SIZE, |
1213 | FLASH_FWCT2_WR_CMD); |
1214 | if (err) |
1215 | goto release_fw; |
1216 | |
1217 | err = ccg_cmd_write_flash_row(uc, row: 0, data: &fw_cfg_sig, |
1218 | FLASH_FWCT_SIG_WR_CMD); |
1219 | if (err) |
1220 | goto release_fw; |
1221 | |
1222 | not_signed_fw: |
1223 | wr_buf = kzalloc(CCG4_ROW_SIZE + 4, GFP_KERNEL); |
1224 | if (!wr_buf) { |
1225 | err = -ENOMEM; |
1226 | goto release_fw; |
1227 | } |
1228 | |
1229 | err = ccg_cmd_enter_flashing(uc); |
1230 | if (err) |
1231 | goto release_mem; |
1232 | |
1233 | /***************************************************************** |
1234 | * CCG firmware image (.cyacd) file line format |
1235 | * |
1236 | * :00rrrrllll[dd....]cc/r/n |
1237 | * |
1238 | * :00 header |
1239 | * rrrr is row number to flash (4 char) |
1240 | * llll is data len to flash (4 char) |
1241 | * dd is a data field represents one byte of data (512 char) |
1242 | * cc is checksum (2 char) |
1243 | * \r\n newline |
1244 | * |
1245 | * Total length: 3 + 4 + 4 + 512 + 2 + 2 = 527 |
1246 | * |
1247 | *****************************************************************/ |
1248 | |
1249 | p = strnchr(fw->data, fw->size, ':'); |
1250 | while (p < eof) { |
1251 | s = strnchr(p + 1, eof - p - 1, ':'); |
1252 | |
1253 | if (!s) |
1254 | s = eof; |
1255 | |
1256 | line_sz = s - p; |
1257 | |
1258 | if (line_sz != CYACD_LINE_SIZE) { |
1259 | dev_err(dev, "Bad FW format line_sz=%d\n" , line_sz); |
1260 | err = -EINVAL; |
1261 | goto release_mem; |
1262 | } |
1263 | |
1264 | if (hex2bin(dst: wr_buf, src: p + 3, CCG4_ROW_SIZE + 4)) { |
1265 | err = -EINVAL; |
1266 | goto release_mem; |
1267 | } |
1268 | |
1269 | row = get_unaligned_be16(p: wr_buf); |
1270 | len = get_unaligned_be16(p: &wr_buf[2]); |
1271 | |
1272 | if (len != CCG4_ROW_SIZE) { |
1273 | err = -EINVAL; |
1274 | goto release_mem; |
1275 | } |
1276 | |
1277 | err = ccg_cmd_write_flash_row(uc, row, data: wr_buf + 4, |
1278 | FLASH_WR_CMD); |
1279 | if (err) |
1280 | goto release_mem; |
1281 | |
1282 | line_cnt++; |
1283 | p = s; |
1284 | } |
1285 | |
1286 | dev_info(dev, "total %d row flashed. time: %dms\n" , |
1287 | line_cnt, jiffies_to_msecs(jiffies - start_time)); |
1288 | |
1289 | err = ccg_cmd_validate_fw(uc, fwid: (mode == PRIMARY) ? FW2 : FW1); |
1290 | if (err) |
1291 | dev_err(dev, "%s validation failed err=%d\n" , |
1292 | (mode == PRIMARY) ? "FW2" : "FW1" , err); |
1293 | else |
1294 | dev_info(dev, "%s validated\n" , |
1295 | (mode == PRIMARY) ? "FW2" : "FW1" ); |
1296 | |
1297 | err = ccg_cmd_port_control(uc, enable: false); |
1298 | if (err < 0) |
1299 | goto release_mem; |
1300 | |
1301 | err = ccg_cmd_reset(uc); |
1302 | if (err < 0) |
1303 | goto release_mem; |
1304 | |
1305 | err = ccg_cmd_port_control(uc, enable: true); |
1306 | if (err < 0) |
1307 | goto release_mem; |
1308 | |
1309 | release_mem: |
1310 | kfree(objp: wr_buf); |
1311 | |
1312 | release_fw: |
1313 | release_firmware(fw); |
1314 | return err; |
1315 | } |
1316 | |
1317 | /******************************************************************************* |
1318 | * CCG4 has two copies of the firmware in addition to the bootloader. |
1319 | * If the device is running FW1, FW2 can be updated with the new version. |
1320 | * Dual firmware mode allows the CCG device to stay in a PD contract and support |
1321 | * USB PD and Type-C functionality while a firmware update is in progress. |
1322 | ******************************************************************************/ |
1323 | static int ccg_fw_update(struct ucsi_ccg *uc, enum enum_flash_mode flash_mode) |
1324 | { |
1325 | int err = 0; |
1326 | |
1327 | while (flash_mode != FLASH_NOT_NEEDED) { |
1328 | err = do_flash(uc, mode: flash_mode); |
1329 | if (err < 0) |
1330 | return err; |
1331 | err = ccg_fw_update_needed(uc, mode: &flash_mode); |
1332 | if (err < 0) |
1333 | return err; |
1334 | } |
1335 | dev_info(uc->dev, "CCG FW update successful\n" ); |
1336 | |
1337 | return err; |
1338 | } |
1339 | |
1340 | static int ccg_restart(struct ucsi_ccg *uc) |
1341 | { |
1342 | struct device *dev = uc->dev; |
1343 | int status; |
1344 | |
1345 | status = ucsi_ccg_init(uc); |
1346 | if (status < 0) { |
1347 | dev_err(dev, "ucsi_ccg_start fail, err=%d\n" , status); |
1348 | return status; |
1349 | } |
1350 | |
1351 | status = ccg_request_irq(uc); |
1352 | if (status < 0) { |
1353 | dev_err(dev, "request_threaded_irq failed - %d\n" , status); |
1354 | return status; |
1355 | } |
1356 | |
1357 | status = ucsi_register(ucsi: uc->ucsi); |
1358 | if (status) { |
1359 | dev_err(uc->dev, "failed to register the interface\n" ); |
1360 | return status; |
1361 | } |
1362 | |
1363 | pm_runtime_enable(dev: uc->dev); |
1364 | return 0; |
1365 | } |
1366 | |
1367 | static void ccg_update_firmware(struct work_struct *work) |
1368 | { |
1369 | struct ucsi_ccg *uc = container_of(work, struct ucsi_ccg, work); |
1370 | enum enum_flash_mode flash_mode; |
1371 | int status; |
1372 | |
1373 | status = ccg_fw_update_needed(uc, mode: &flash_mode); |
1374 | if (status < 0) |
1375 | return; |
1376 | |
1377 | if (flash_mode != FLASH_NOT_NEEDED) { |
1378 | ucsi_unregister(ucsi: uc->ucsi); |
1379 | pm_runtime_disable(dev: uc->dev); |
1380 | free_irq(uc->irq, uc); |
1381 | |
1382 | ccg_fw_update(uc, flash_mode); |
1383 | ccg_restart(uc); |
1384 | } |
1385 | } |
1386 | |
1387 | static ssize_t do_flash_store(struct device *dev, |
1388 | struct device_attribute *attr, |
1389 | const char *buf, size_t n) |
1390 | { |
1391 | struct ucsi_ccg *uc = i2c_get_clientdata(to_i2c_client(dev)); |
1392 | bool flash; |
1393 | |
1394 | if (kstrtobool(s: buf, res: &flash)) |
1395 | return -EINVAL; |
1396 | |
1397 | if (!flash) |
1398 | return n; |
1399 | |
1400 | if (uc->fw_build == 0x0) { |
1401 | dev_err(dev, "fail to flash FW due to missing FW build info\n" ); |
1402 | return -EINVAL; |
1403 | } |
1404 | |
1405 | schedule_work(work: &uc->work); |
1406 | return n; |
1407 | } |
1408 | |
1409 | static DEVICE_ATTR_WO(do_flash); |
1410 | |
1411 | static struct attribute *ucsi_ccg_attrs[] = { |
1412 | &dev_attr_do_flash.attr, |
1413 | NULL, |
1414 | }; |
1415 | ATTRIBUTE_GROUPS(ucsi_ccg); |
1416 | |
1417 | static int ucsi_ccg_probe(struct i2c_client *client) |
1418 | { |
1419 | struct device *dev = &client->dev; |
1420 | struct ucsi_ccg *uc; |
1421 | const char *fw_name; |
1422 | int status; |
1423 | |
1424 | uc = devm_kzalloc(dev, size: sizeof(*uc), GFP_KERNEL); |
1425 | if (!uc) |
1426 | return -ENOMEM; |
1427 | |
1428 | uc->dev = dev; |
1429 | uc->client = client; |
1430 | uc->irq = client->irq; |
1431 | mutex_init(&uc->lock); |
1432 | init_completion(x: &uc->complete); |
1433 | INIT_WORK(&uc->work, ccg_update_firmware); |
1434 | INIT_WORK(&uc->pm_work, ccg_pm_workaround_work); |
1435 | |
1436 | /* Only fail FW flashing when FW build information is not provided */ |
1437 | status = device_property_read_string(dev, propname: "firmware-name" , val: &fw_name); |
1438 | if (!status) { |
1439 | if (!strcmp(fw_name, "nvidia,jetson-agx-xavier" )) |
1440 | uc->fw_build = CCG_FW_BUILD_NVIDIA_TEGRA; |
1441 | else if (!strcmp(fw_name, "nvidia,gpu" )) |
1442 | uc->fw_build = CCG_FW_BUILD_NVIDIA; |
1443 | } |
1444 | |
1445 | if (!uc->fw_build) |
1446 | dev_err(uc->dev, "failed to get FW build information\n" ); |
1447 | |
1448 | /* reset ccg device and initialize ucsi */ |
1449 | status = ucsi_ccg_init(uc); |
1450 | if (status < 0) { |
1451 | dev_err(uc->dev, "ucsi_ccg_init failed - %d\n" , status); |
1452 | return status; |
1453 | } |
1454 | |
1455 | status = get_fw_info(uc); |
1456 | if (status < 0) { |
1457 | dev_err(uc->dev, "get_fw_info failed - %d\n" , status); |
1458 | return status; |
1459 | } |
1460 | |
1461 | uc->port_num = 1; |
1462 | |
1463 | if (uc->info.mode & CCG_DEVINFO_PDPORTS_MASK) |
1464 | uc->port_num++; |
1465 | |
1466 | uc->ucsi = ucsi_create(dev, ops: &ucsi_ccg_ops); |
1467 | if (IS_ERR(ptr: uc->ucsi)) |
1468 | return PTR_ERR(ptr: uc->ucsi); |
1469 | |
1470 | ucsi_set_drvdata(ucsi: uc->ucsi, data: uc); |
1471 | |
1472 | status = ccg_request_irq(uc); |
1473 | if (status < 0) { |
1474 | dev_err(uc->dev, "request_threaded_irq failed - %d\n" , status); |
1475 | goto out_ucsi_destroy; |
1476 | } |
1477 | |
1478 | status = ucsi_register(ucsi: uc->ucsi); |
1479 | if (status) |
1480 | goto out_free_irq; |
1481 | |
1482 | i2c_set_clientdata(client, data: uc); |
1483 | |
1484 | pm_runtime_set_active(dev: uc->dev); |
1485 | pm_runtime_enable(dev: uc->dev); |
1486 | pm_runtime_use_autosuspend(dev: uc->dev); |
1487 | pm_runtime_set_autosuspend_delay(dev: uc->dev, delay: 5000); |
1488 | pm_runtime_idle(dev: uc->dev); |
1489 | |
1490 | return 0; |
1491 | |
1492 | out_free_irq: |
1493 | free_irq(uc->irq, uc); |
1494 | out_ucsi_destroy: |
1495 | ucsi_destroy(ucsi: uc->ucsi); |
1496 | |
1497 | return status; |
1498 | } |
1499 | |
1500 | static void ucsi_ccg_remove(struct i2c_client *client) |
1501 | { |
1502 | struct ucsi_ccg *uc = i2c_get_clientdata(client); |
1503 | |
1504 | cancel_work_sync(work: &uc->pm_work); |
1505 | cancel_work_sync(work: &uc->work); |
1506 | pm_runtime_disable(dev: uc->dev); |
1507 | ucsi_unregister(ucsi: uc->ucsi); |
1508 | ucsi_destroy(ucsi: uc->ucsi); |
1509 | free_irq(uc->irq, uc); |
1510 | } |
1511 | |
1512 | static const struct of_device_id ucsi_ccg_of_match_table[] = { |
1513 | { .compatible = "cypress,cypd4226" , }, |
1514 | { /* sentinel */ } |
1515 | }; |
1516 | MODULE_DEVICE_TABLE(of, ucsi_ccg_of_match_table); |
1517 | |
1518 | static const struct i2c_device_id ucsi_ccg_device_id[] = { |
1519 | {"ccgx-ucsi" , 0}, |
1520 | {} |
1521 | }; |
1522 | MODULE_DEVICE_TABLE(i2c, ucsi_ccg_device_id); |
1523 | |
1524 | static const struct acpi_device_id amd_i2c_ucsi_match[] = { |
1525 | {"AMDI0042" }, |
1526 | {} |
1527 | }; |
1528 | MODULE_DEVICE_TABLE(acpi, amd_i2c_ucsi_match); |
1529 | |
1530 | static int ucsi_ccg_resume(struct device *dev) |
1531 | { |
1532 | struct i2c_client *client = to_i2c_client(dev); |
1533 | struct ucsi_ccg *uc = i2c_get_clientdata(client); |
1534 | |
1535 | return ucsi_resume(ucsi: uc->ucsi); |
1536 | } |
1537 | |
1538 | static int ucsi_ccg_runtime_suspend(struct device *dev) |
1539 | { |
1540 | return 0; |
1541 | } |
1542 | |
1543 | static int ucsi_ccg_runtime_resume(struct device *dev) |
1544 | { |
1545 | struct i2c_client *client = to_i2c_client(dev); |
1546 | struct ucsi_ccg *uc = i2c_get_clientdata(client); |
1547 | |
1548 | /* |
1549 | * Firmware version 3.1.10 or earlier, built for NVIDIA has known issue |
1550 | * of missing interrupt when a device is connected for runtime resume. |
1551 | * Schedule a work to call ISR as a workaround. |
1552 | */ |
1553 | if (uc->fw_build == CCG_FW_BUILD_NVIDIA && |
1554 | uc->fw_version <= CCG_OLD_FW_VERSION) |
1555 | schedule_work(work: &uc->pm_work); |
1556 | |
1557 | return 0; |
1558 | } |
1559 | |
1560 | static const struct dev_pm_ops ucsi_ccg_pm = { |
1561 | .resume = ucsi_ccg_resume, |
1562 | .runtime_suspend = ucsi_ccg_runtime_suspend, |
1563 | .runtime_resume = ucsi_ccg_runtime_resume, |
1564 | }; |
1565 | |
1566 | static struct i2c_driver ucsi_ccg_driver = { |
1567 | .driver = { |
1568 | .name = "ucsi_ccg" , |
1569 | .pm = &ucsi_ccg_pm, |
1570 | .dev_groups = ucsi_ccg_groups, |
1571 | .acpi_match_table = amd_i2c_ucsi_match, |
1572 | .of_match_table = ucsi_ccg_of_match_table, |
1573 | }, |
1574 | .probe = ucsi_ccg_probe, |
1575 | .remove = ucsi_ccg_remove, |
1576 | .id_table = ucsi_ccg_device_id, |
1577 | }; |
1578 | |
1579 | module_i2c_driver(ucsi_ccg_driver); |
1580 | |
1581 | MODULE_AUTHOR("Ajay Gupta <ajayg@nvidia.com>" ); |
1582 | MODULE_DESCRIPTION("UCSI driver for Cypress CCGx Type-C controller" ); |
1583 | MODULE_LICENSE("GPL v2" ); |
1584 | |