1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * xhci-dbc.c - xHCI debug capability early driver |
4 | * |
5 | * Copyright (C) 2016 Intel Corporation |
6 | * |
7 | * Author: Lu Baolu <baolu.lu@linux.intel.com> |
8 | */ |
9 | |
10 | #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ |
11 | |
12 | #include <linux/console.h> |
13 | #include <linux/pci_regs.h> |
14 | #include <linux/pci_ids.h> |
15 | #include <linux/memblock.h> |
16 | #include <linux/io.h> |
17 | #include <asm/pci-direct.h> |
18 | #include <asm/fixmap.h> |
19 | #include <linux/bcd.h> |
20 | #include <linux/export.h> |
21 | #include <linux/module.h> |
22 | #include <linux/delay.h> |
23 | #include <linux/kthread.h> |
24 | #include <linux/usb/xhci-dbgp.h> |
25 | |
26 | #include "../host/xhci.h" |
27 | #include "xhci-dbc.h" |
28 | |
29 | static struct xdbc_state xdbc; |
30 | static bool early_console_keep; |
31 | |
32 | #ifdef XDBC_TRACE |
33 | #define xdbc_trace trace_printk |
34 | #else |
35 | static inline void xdbc_trace(const char *fmt, ...) { } |
36 | #endif /* XDBC_TRACE */ |
37 | |
38 | static void __iomem * __init xdbc_map_pci_mmio(u32 bus, u32 dev, u32 func) |
39 | { |
40 | u64 val64, sz64, mask64; |
41 | void __iomem *base; |
42 | u32 val, sz; |
43 | u8 byte; |
44 | |
45 | val = read_pci_config(bus, slot: dev, func, PCI_BASE_ADDRESS_0); |
46 | write_pci_config(bus, slot: dev, func, PCI_BASE_ADDRESS_0, val: ~0); |
47 | sz = read_pci_config(bus, slot: dev, func, PCI_BASE_ADDRESS_0); |
48 | write_pci_config(bus, slot: dev, func, PCI_BASE_ADDRESS_0, val); |
49 | |
50 | if (val == 0xffffffff || sz == 0xffffffff) { |
51 | pr_notice("invalid mmio bar\n" ); |
52 | return NULL; |
53 | } |
54 | |
55 | val64 = val & PCI_BASE_ADDRESS_MEM_MASK; |
56 | sz64 = sz & PCI_BASE_ADDRESS_MEM_MASK; |
57 | mask64 = PCI_BASE_ADDRESS_MEM_MASK; |
58 | |
59 | if ((val & PCI_BASE_ADDRESS_MEM_TYPE_MASK) == PCI_BASE_ADDRESS_MEM_TYPE_64) { |
60 | val = read_pci_config(bus, slot: dev, func, PCI_BASE_ADDRESS_0 + 4); |
61 | write_pci_config(bus, slot: dev, func, PCI_BASE_ADDRESS_0 + 4, val: ~0); |
62 | sz = read_pci_config(bus, slot: dev, func, PCI_BASE_ADDRESS_0 + 4); |
63 | write_pci_config(bus, slot: dev, func, PCI_BASE_ADDRESS_0 + 4, val); |
64 | |
65 | val64 |= (u64)val << 32; |
66 | sz64 |= (u64)sz << 32; |
67 | mask64 |= ~0ULL << 32; |
68 | } |
69 | |
70 | sz64 &= mask64; |
71 | |
72 | if (!sz64) { |
73 | pr_notice("invalid mmio address\n" ); |
74 | return NULL; |
75 | } |
76 | |
77 | sz64 = 1ULL << __ffs64(word: sz64); |
78 | |
79 | /* Check if the mem space is enabled: */ |
80 | byte = read_pci_config_byte(bus, slot: dev, func, PCI_COMMAND); |
81 | if (!(byte & PCI_COMMAND_MEMORY)) { |
82 | byte |= PCI_COMMAND_MEMORY; |
83 | write_pci_config_byte(bus, slot: dev, func, PCI_COMMAND, val: byte); |
84 | } |
85 | |
86 | xdbc.xhci_start = val64; |
87 | xdbc.xhci_length = sz64; |
88 | base = early_ioremap(phys_addr: val64, size: sz64); |
89 | |
90 | return base; |
91 | } |
92 | |
93 | static void * __init xdbc_get_page(dma_addr_t *dma_addr) |
94 | { |
95 | void *virt; |
96 | |
97 | virt = memblock_alloc(PAGE_SIZE, PAGE_SIZE); |
98 | if (!virt) |
99 | return NULL; |
100 | |
101 | if (dma_addr) |
102 | *dma_addr = (dma_addr_t)__pa(virt); |
103 | |
104 | return virt; |
105 | } |
106 | |
107 | static u32 __init xdbc_find_dbgp(int xdbc_num, u32 *b, u32 *d, u32 *f) |
108 | { |
109 | u32 bus, dev, func, class; |
110 | |
111 | for (bus = 0; bus < XDBC_PCI_MAX_BUSES; bus++) { |
112 | for (dev = 0; dev < XDBC_PCI_MAX_DEVICES; dev++) { |
113 | for (func = 0; func < XDBC_PCI_MAX_FUNCTION; func++) { |
114 | |
115 | class = read_pci_config(bus, slot: dev, func, PCI_CLASS_REVISION); |
116 | if ((class >> 8) != PCI_CLASS_SERIAL_USB_XHCI) |
117 | continue; |
118 | |
119 | if (xdbc_num-- != 0) |
120 | continue; |
121 | |
122 | *b = bus; |
123 | *d = dev; |
124 | *f = func; |
125 | |
126 | return 0; |
127 | } |
128 | } |
129 | } |
130 | |
131 | return -1; |
132 | } |
133 | |
134 | static int handshake(void __iomem *ptr, u32 mask, u32 done, int wait, int delay) |
135 | { |
136 | u32 result; |
137 | |
138 | /* Can not use readl_poll_timeout_atomic() for early boot things */ |
139 | do { |
140 | result = readl(addr: ptr); |
141 | result &= mask; |
142 | if (result == done) |
143 | return 0; |
144 | udelay(delay); |
145 | wait -= delay; |
146 | } while (wait > 0); |
147 | |
148 | return -ETIMEDOUT; |
149 | } |
150 | |
151 | static void __init xdbc_bios_handoff(void) |
152 | { |
153 | int offset, timeout; |
154 | u32 val; |
155 | |
156 | offset = xhci_find_next_ext_cap(base: xdbc.xhci_base, start: 0, XHCI_EXT_CAPS_LEGACY); |
157 | val = readl(addr: xdbc.xhci_base + offset); |
158 | |
159 | if (val & XHCI_HC_BIOS_OWNED) { |
160 | writel(val: val | XHCI_HC_OS_OWNED, addr: xdbc.xhci_base + offset); |
161 | timeout = handshake(ptr: xdbc.xhci_base + offset, XHCI_HC_BIOS_OWNED, done: 0, wait: 5000, delay: 10); |
162 | |
163 | if (timeout) { |
164 | pr_notice("failed to hand over xHCI control from BIOS\n" ); |
165 | writel(val: val & ~XHCI_HC_BIOS_OWNED, addr: xdbc.xhci_base + offset); |
166 | } |
167 | } |
168 | |
169 | /* Disable BIOS SMIs and clear all SMI events: */ |
170 | val = readl(addr: xdbc.xhci_base + offset + XHCI_LEGACY_CONTROL_OFFSET); |
171 | val &= XHCI_LEGACY_DISABLE_SMI; |
172 | val |= XHCI_LEGACY_SMI_EVENTS; |
173 | writel(val, addr: xdbc.xhci_base + offset + XHCI_LEGACY_CONTROL_OFFSET); |
174 | } |
175 | |
176 | static int __init |
177 | xdbc_alloc_ring(struct xdbc_segment *seg, struct xdbc_ring *ring) |
178 | { |
179 | seg->trbs = xdbc_get_page(dma_addr: &seg->dma); |
180 | if (!seg->trbs) |
181 | return -ENOMEM; |
182 | |
183 | ring->segment = seg; |
184 | |
185 | return 0; |
186 | } |
187 | |
188 | static void __init xdbc_free_ring(struct xdbc_ring *ring) |
189 | { |
190 | struct xdbc_segment *seg = ring->segment; |
191 | |
192 | if (!seg) |
193 | return; |
194 | |
195 | memblock_phys_free(base: seg->dma, PAGE_SIZE); |
196 | ring->segment = NULL; |
197 | } |
198 | |
199 | static void xdbc_reset_ring(struct xdbc_ring *ring) |
200 | { |
201 | struct xdbc_segment *seg = ring->segment; |
202 | struct xdbc_trb *link_trb; |
203 | |
204 | memset(seg->trbs, 0, PAGE_SIZE); |
205 | |
206 | ring->enqueue = seg->trbs; |
207 | ring->dequeue = seg->trbs; |
208 | ring->cycle_state = 1; |
209 | |
210 | if (ring != &xdbc.evt_ring) { |
211 | link_trb = &seg->trbs[XDBC_TRBS_PER_SEGMENT - 1]; |
212 | link_trb->field[0] = cpu_to_le32(lower_32_bits(seg->dma)); |
213 | link_trb->field[1] = cpu_to_le32(upper_32_bits(seg->dma)); |
214 | link_trb->field[3] = cpu_to_le32(TRB_TYPE(TRB_LINK)) | cpu_to_le32(LINK_TOGGLE); |
215 | } |
216 | } |
217 | |
218 | static inline void xdbc_put_utf16(u16 *s, const char *c, size_t size) |
219 | { |
220 | int i; |
221 | |
222 | for (i = 0; i < size; i++) |
223 | s[i] = cpu_to_le16(c[i]); |
224 | } |
225 | |
226 | static void xdbc_mem_init(void) |
227 | { |
228 | struct xdbc_ep_context *ep_in, *ep_out; |
229 | struct usb_string_descriptor *s_desc; |
230 | struct xdbc_erst_entry *entry; |
231 | struct xdbc_strings *strings; |
232 | struct xdbc_context *ctx; |
233 | unsigned int max_burst; |
234 | u32 string_length; |
235 | int index = 0; |
236 | u32 dev_info; |
237 | |
238 | xdbc_reset_ring(ring: &xdbc.evt_ring); |
239 | xdbc_reset_ring(ring: &xdbc.in_ring); |
240 | xdbc_reset_ring(ring: &xdbc.out_ring); |
241 | memset(xdbc.table_base, 0, PAGE_SIZE); |
242 | memset(xdbc.out_buf, 0, PAGE_SIZE); |
243 | |
244 | /* Initialize event ring segment table: */ |
245 | xdbc.erst_size = 16; |
246 | xdbc.erst_base = xdbc.table_base + index * XDBC_TABLE_ENTRY_SIZE; |
247 | xdbc.erst_dma = xdbc.table_dma + index * XDBC_TABLE_ENTRY_SIZE; |
248 | |
249 | index += XDBC_ERST_ENTRY_NUM; |
250 | entry = (struct xdbc_erst_entry *)xdbc.erst_base; |
251 | |
252 | entry->seg_addr = cpu_to_le64(xdbc.evt_seg.dma); |
253 | entry->seg_size = cpu_to_le32(XDBC_TRBS_PER_SEGMENT); |
254 | entry->__reserved_0 = 0; |
255 | |
256 | /* Initialize ERST registers: */ |
257 | writel(val: 1, addr: &xdbc.xdbc_reg->ersts); |
258 | xdbc_write64(xdbc.erst_dma, &xdbc.xdbc_reg->erstba); |
259 | xdbc_write64(xdbc.evt_seg.dma, &xdbc.xdbc_reg->erdp); |
260 | |
261 | /* Debug capability contexts: */ |
262 | xdbc.dbcc_size = 64 * 3; |
263 | xdbc.dbcc_base = xdbc.table_base + index * XDBC_TABLE_ENTRY_SIZE; |
264 | xdbc.dbcc_dma = xdbc.table_dma + index * XDBC_TABLE_ENTRY_SIZE; |
265 | |
266 | index += XDBC_DBCC_ENTRY_NUM; |
267 | |
268 | /* Popluate the strings: */ |
269 | xdbc.string_size = sizeof(struct xdbc_strings); |
270 | xdbc.string_base = xdbc.table_base + index * XDBC_TABLE_ENTRY_SIZE; |
271 | xdbc.string_dma = xdbc.table_dma + index * XDBC_TABLE_ENTRY_SIZE; |
272 | strings = (struct xdbc_strings *)xdbc.string_base; |
273 | |
274 | index += XDBC_STRING_ENTRY_NUM; |
275 | |
276 | /* Serial string: */ |
277 | s_desc = (struct usb_string_descriptor *)strings->serial; |
278 | s_desc->bLength = (strlen(XDBC_STRING_SERIAL) + 1) * 2; |
279 | s_desc->bDescriptorType = USB_DT_STRING; |
280 | |
281 | xdbc_put_utf16(s: s_desc->wData, XDBC_STRING_SERIAL, strlen(XDBC_STRING_SERIAL)); |
282 | string_length = s_desc->bLength; |
283 | string_length <<= 8; |
284 | |
285 | /* Product string: */ |
286 | s_desc = (struct usb_string_descriptor *)strings->product; |
287 | s_desc->bLength = (strlen(XDBC_STRING_PRODUCT) + 1) * 2; |
288 | s_desc->bDescriptorType = USB_DT_STRING; |
289 | |
290 | xdbc_put_utf16(s: s_desc->wData, XDBC_STRING_PRODUCT, strlen(XDBC_STRING_PRODUCT)); |
291 | string_length += s_desc->bLength; |
292 | string_length <<= 8; |
293 | |
294 | /* Manufacture string: */ |
295 | s_desc = (struct usb_string_descriptor *)strings->manufacturer; |
296 | s_desc->bLength = (strlen(XDBC_STRING_MANUFACTURER) + 1) * 2; |
297 | s_desc->bDescriptorType = USB_DT_STRING; |
298 | |
299 | xdbc_put_utf16(s: s_desc->wData, XDBC_STRING_MANUFACTURER, strlen(XDBC_STRING_MANUFACTURER)); |
300 | string_length += s_desc->bLength; |
301 | string_length <<= 8; |
302 | |
303 | /* String0: */ |
304 | strings->string0[0] = 4; |
305 | strings->string0[1] = USB_DT_STRING; |
306 | strings->string0[2] = 0x09; |
307 | strings->string0[3] = 0x04; |
308 | |
309 | string_length += 4; |
310 | |
311 | /* Populate info Context: */ |
312 | ctx = (struct xdbc_context *)xdbc.dbcc_base; |
313 | |
314 | ctx->info.string0 = cpu_to_le64(xdbc.string_dma); |
315 | ctx->info.manufacturer = cpu_to_le64(xdbc.string_dma + XDBC_MAX_STRING_LENGTH); |
316 | ctx->info.product = cpu_to_le64(xdbc.string_dma + XDBC_MAX_STRING_LENGTH * 2); |
317 | ctx->info.serial = cpu_to_le64(xdbc.string_dma + XDBC_MAX_STRING_LENGTH * 3); |
318 | ctx->info.length = cpu_to_le32(string_length); |
319 | |
320 | /* Populate bulk out endpoint context: */ |
321 | max_burst = DEBUG_MAX_BURST(readl(&xdbc.xdbc_reg->control)); |
322 | ep_out = (struct xdbc_ep_context *)&ctx->out; |
323 | |
324 | ep_out->ep_info1 = 0; |
325 | ep_out->ep_info2 = cpu_to_le32(EP_TYPE(BULK_OUT_EP) | MAX_PACKET(1024) | MAX_BURST(max_burst)); |
326 | ep_out->deq = cpu_to_le64(xdbc.out_seg.dma | xdbc.out_ring.cycle_state); |
327 | |
328 | /* Populate bulk in endpoint context: */ |
329 | ep_in = (struct xdbc_ep_context *)&ctx->in; |
330 | |
331 | ep_in->ep_info1 = 0; |
332 | ep_in->ep_info2 = cpu_to_le32(EP_TYPE(BULK_IN_EP) | MAX_PACKET(1024) | MAX_BURST(max_burst)); |
333 | ep_in->deq = cpu_to_le64(xdbc.in_seg.dma | xdbc.in_ring.cycle_state); |
334 | |
335 | /* Set DbC context and info registers: */ |
336 | xdbc_write64(xdbc.dbcc_dma, &xdbc.xdbc_reg->dccp); |
337 | |
338 | dev_info = cpu_to_le32((XDBC_VENDOR_ID << 16) | XDBC_PROTOCOL); |
339 | writel(val: dev_info, addr: &xdbc.xdbc_reg->devinfo1); |
340 | |
341 | dev_info = cpu_to_le32((XDBC_DEVICE_REV << 16) | XDBC_PRODUCT_ID); |
342 | writel(val: dev_info, addr: &xdbc.xdbc_reg->devinfo2); |
343 | |
344 | xdbc.in_buf = xdbc.out_buf + XDBC_MAX_PACKET; |
345 | xdbc.in_dma = xdbc.out_dma + XDBC_MAX_PACKET; |
346 | } |
347 | |
348 | static void xdbc_do_reset_debug_port(u32 id, u32 count) |
349 | { |
350 | void __iomem *ops_reg; |
351 | void __iomem *portsc; |
352 | u32 val, cap_length; |
353 | int i; |
354 | |
355 | cap_length = readl(addr: xdbc.xhci_base) & 0xff; |
356 | ops_reg = xdbc.xhci_base + cap_length; |
357 | |
358 | id--; |
359 | for (i = id; i < (id + count); i++) { |
360 | portsc = ops_reg + 0x400 + i * 0x10; |
361 | val = readl(addr: portsc); |
362 | if (!(val & PORT_CONNECT)) |
363 | writel(val: val | PORT_RESET, addr: portsc); |
364 | } |
365 | } |
366 | |
367 | static void xdbc_reset_debug_port(void) |
368 | { |
369 | u32 val, port_offset, port_count; |
370 | int offset = 0; |
371 | |
372 | do { |
373 | offset = xhci_find_next_ext_cap(base: xdbc.xhci_base, start: offset, XHCI_EXT_CAPS_PROTOCOL); |
374 | if (!offset) |
375 | break; |
376 | |
377 | val = readl(addr: xdbc.xhci_base + offset); |
378 | if (XHCI_EXT_PORT_MAJOR(val) != 0x3) |
379 | continue; |
380 | |
381 | val = readl(addr: xdbc.xhci_base + offset + 8); |
382 | port_offset = XHCI_EXT_PORT_OFF(val); |
383 | port_count = XHCI_EXT_PORT_COUNT(val); |
384 | |
385 | xdbc_do_reset_debug_port(id: port_offset, count: port_count); |
386 | } while (1); |
387 | } |
388 | |
389 | static void |
390 | xdbc_queue_trb(struct xdbc_ring *ring, u32 field1, u32 field2, u32 field3, u32 field4) |
391 | { |
392 | struct xdbc_trb *trb, *link_trb; |
393 | |
394 | trb = ring->enqueue; |
395 | trb->field[0] = cpu_to_le32(field1); |
396 | trb->field[1] = cpu_to_le32(field2); |
397 | trb->field[2] = cpu_to_le32(field3); |
398 | trb->field[3] = cpu_to_le32(field4); |
399 | |
400 | ++(ring->enqueue); |
401 | if (ring->enqueue >= &ring->segment->trbs[TRBS_PER_SEGMENT - 1]) { |
402 | link_trb = ring->enqueue; |
403 | if (ring->cycle_state) |
404 | link_trb->field[3] |= cpu_to_le32(TRB_CYCLE); |
405 | else |
406 | link_trb->field[3] &= cpu_to_le32(~TRB_CYCLE); |
407 | |
408 | ring->enqueue = ring->segment->trbs; |
409 | ring->cycle_state ^= 1; |
410 | } |
411 | } |
412 | |
413 | static void xdbc_ring_doorbell(int target) |
414 | { |
415 | writel(DOOR_BELL_TARGET(target), addr: &xdbc.xdbc_reg->doorbell); |
416 | } |
417 | |
418 | static int xdbc_start(void) |
419 | { |
420 | u32 ctrl, status; |
421 | int ret; |
422 | |
423 | ctrl = readl(addr: &xdbc.xdbc_reg->control); |
424 | writel(val: ctrl | CTRL_DBC_ENABLE | CTRL_PORT_ENABLE, addr: &xdbc.xdbc_reg->control); |
425 | ret = handshake(ptr: &xdbc.xdbc_reg->control, CTRL_DBC_ENABLE, CTRL_DBC_ENABLE, wait: 100000, delay: 100); |
426 | if (ret) { |
427 | xdbc_trace(fmt: "failed to initialize hardware\n" ); |
428 | return ret; |
429 | } |
430 | |
431 | /* Reset port to avoid bus hang: */ |
432 | if (xdbc.vendor == PCI_VENDOR_ID_INTEL) |
433 | xdbc_reset_debug_port(); |
434 | |
435 | /* Wait for port connection: */ |
436 | ret = handshake(ptr: &xdbc.xdbc_reg->portsc, PORTSC_CONN_STATUS, PORTSC_CONN_STATUS, wait: 5000000, delay: 100); |
437 | if (ret) { |
438 | xdbc_trace(fmt: "waiting for connection timed out\n" ); |
439 | return ret; |
440 | } |
441 | |
442 | /* Wait for debug device to be configured: */ |
443 | ret = handshake(ptr: &xdbc.xdbc_reg->control, CTRL_DBC_RUN, CTRL_DBC_RUN, wait: 5000000, delay: 100); |
444 | if (ret) { |
445 | xdbc_trace(fmt: "waiting for device configuration timed out\n" ); |
446 | return ret; |
447 | } |
448 | |
449 | /* Check port number: */ |
450 | status = readl(addr: &xdbc.xdbc_reg->status); |
451 | if (!DCST_DEBUG_PORT(status)) { |
452 | xdbc_trace(fmt: "invalid root hub port number\n" ); |
453 | return -ENODEV; |
454 | } |
455 | |
456 | xdbc.port_number = DCST_DEBUG_PORT(status); |
457 | |
458 | xdbc_trace(fmt: "DbC is running now, control 0x%08x port ID %d\n" , |
459 | readl(addr: &xdbc.xdbc_reg->control), xdbc.port_number); |
460 | |
461 | return 0; |
462 | } |
463 | |
464 | static int xdbc_bulk_transfer(void *data, int size, bool read) |
465 | { |
466 | struct xdbc_ring *ring; |
467 | struct xdbc_trb *trb; |
468 | u32 length, control; |
469 | u32 cycle; |
470 | u64 addr; |
471 | |
472 | if (size > XDBC_MAX_PACKET) { |
473 | xdbc_trace(fmt: "bad parameter, size %d\n" , size); |
474 | return -EINVAL; |
475 | } |
476 | |
477 | if (!(xdbc.flags & XDBC_FLAGS_INITIALIZED) || |
478 | !(xdbc.flags & XDBC_FLAGS_CONFIGURED) || |
479 | (!read && (xdbc.flags & XDBC_FLAGS_OUT_STALL)) || |
480 | (read && (xdbc.flags & XDBC_FLAGS_IN_STALL))) { |
481 | |
482 | xdbc_trace(fmt: "connection not ready, flags %08x\n" , xdbc.flags); |
483 | return -EIO; |
484 | } |
485 | |
486 | ring = (read ? &xdbc.in_ring : &xdbc.out_ring); |
487 | trb = ring->enqueue; |
488 | cycle = ring->cycle_state; |
489 | length = TRB_LEN(size); |
490 | control = TRB_TYPE(TRB_NORMAL) | TRB_IOC; |
491 | |
492 | if (cycle) |
493 | control &= cpu_to_le32(~TRB_CYCLE); |
494 | else |
495 | control |= cpu_to_le32(TRB_CYCLE); |
496 | |
497 | if (read) { |
498 | memset(xdbc.in_buf, 0, XDBC_MAX_PACKET); |
499 | addr = xdbc.in_dma; |
500 | xdbc.flags |= XDBC_FLAGS_IN_PROCESS; |
501 | } else { |
502 | memcpy_and_pad(dest: xdbc.out_buf, XDBC_MAX_PACKET, src: data, count: size, pad: 0); |
503 | addr = xdbc.out_dma; |
504 | xdbc.flags |= XDBC_FLAGS_OUT_PROCESS; |
505 | } |
506 | |
507 | xdbc_queue_trb(ring, lower_32_bits(addr), upper_32_bits(addr), field3: length, field4: control); |
508 | |
509 | /* |
510 | * Add a barrier between writes of trb fields and flipping |
511 | * the cycle bit: |
512 | */ |
513 | wmb(); |
514 | if (cycle) |
515 | trb->field[3] |= cpu_to_le32(cycle); |
516 | else |
517 | trb->field[3] &= cpu_to_le32(~TRB_CYCLE); |
518 | |
519 | xdbc_ring_doorbell(target: read ? IN_EP_DOORBELL : OUT_EP_DOORBELL); |
520 | |
521 | return size; |
522 | } |
523 | |
524 | static int xdbc_handle_external_reset(void) |
525 | { |
526 | int ret = 0; |
527 | |
528 | xdbc.flags = 0; |
529 | writel(val: 0, addr: &xdbc.xdbc_reg->control); |
530 | ret = handshake(ptr: &xdbc.xdbc_reg->control, CTRL_DBC_ENABLE, done: 0, wait: 100000, delay: 10); |
531 | if (ret) |
532 | goto reset_out; |
533 | |
534 | xdbc_mem_init(); |
535 | |
536 | ret = xdbc_start(); |
537 | if (ret < 0) |
538 | goto reset_out; |
539 | |
540 | xdbc_trace(fmt: "dbc recovered\n" ); |
541 | |
542 | xdbc.flags |= XDBC_FLAGS_INITIALIZED | XDBC_FLAGS_CONFIGURED; |
543 | |
544 | xdbc_bulk_transfer(NULL, XDBC_MAX_PACKET, read: true); |
545 | |
546 | return 0; |
547 | |
548 | reset_out: |
549 | xdbc_trace(fmt: "failed to recover from external reset\n" ); |
550 | return ret; |
551 | } |
552 | |
553 | static int __init xdbc_early_setup(void) |
554 | { |
555 | int ret; |
556 | |
557 | writel(val: 0, addr: &xdbc.xdbc_reg->control); |
558 | ret = handshake(ptr: &xdbc.xdbc_reg->control, CTRL_DBC_ENABLE, done: 0, wait: 100000, delay: 100); |
559 | if (ret) |
560 | return ret; |
561 | |
562 | /* Allocate the table page: */ |
563 | xdbc.table_base = xdbc_get_page(dma_addr: &xdbc.table_dma); |
564 | if (!xdbc.table_base) |
565 | return -ENOMEM; |
566 | |
567 | /* Get and store the transfer buffer: */ |
568 | xdbc.out_buf = xdbc_get_page(dma_addr: &xdbc.out_dma); |
569 | if (!xdbc.out_buf) |
570 | return -ENOMEM; |
571 | |
572 | /* Allocate the event ring: */ |
573 | ret = xdbc_alloc_ring(seg: &xdbc.evt_seg, ring: &xdbc.evt_ring); |
574 | if (ret < 0) |
575 | return ret; |
576 | |
577 | /* Allocate IN/OUT endpoint transfer rings: */ |
578 | ret = xdbc_alloc_ring(seg: &xdbc.in_seg, ring: &xdbc.in_ring); |
579 | if (ret < 0) |
580 | return ret; |
581 | |
582 | ret = xdbc_alloc_ring(seg: &xdbc.out_seg, ring: &xdbc.out_ring); |
583 | if (ret < 0) |
584 | return ret; |
585 | |
586 | xdbc_mem_init(); |
587 | |
588 | ret = xdbc_start(); |
589 | if (ret < 0) { |
590 | writel(val: 0, addr: &xdbc.xdbc_reg->control); |
591 | return ret; |
592 | } |
593 | |
594 | xdbc.flags |= XDBC_FLAGS_INITIALIZED | XDBC_FLAGS_CONFIGURED; |
595 | |
596 | xdbc_bulk_transfer(NULL, XDBC_MAX_PACKET, read: true); |
597 | |
598 | return 0; |
599 | } |
600 | |
601 | int __init early_xdbc_parse_parameter(char *s, int keep_early) |
602 | { |
603 | unsigned long dbgp_num = 0; |
604 | u32 bus, dev, func, offset; |
605 | char *e; |
606 | int ret; |
607 | |
608 | if (!early_pci_allowed()) |
609 | return -EPERM; |
610 | |
611 | early_console_keep = keep_early; |
612 | |
613 | if (xdbc.xdbc_reg) |
614 | return 0; |
615 | |
616 | if (*s) { |
617 | dbgp_num = simple_strtoul(s, &e, 10); |
618 | if (s == e) |
619 | dbgp_num = 0; |
620 | } |
621 | |
622 | pr_notice("dbgp_num: %lu\n" , dbgp_num); |
623 | |
624 | /* Locate the host controller: */ |
625 | ret = xdbc_find_dbgp(xdbc_num: dbgp_num, b: &bus, d: &dev, f: &func); |
626 | if (ret) { |
627 | pr_notice("failed to locate xhci host\n" ); |
628 | return -ENODEV; |
629 | } |
630 | |
631 | xdbc.vendor = read_pci_config_16(bus, slot: dev, func, PCI_VENDOR_ID); |
632 | xdbc.device = read_pci_config_16(bus, slot: dev, func, PCI_DEVICE_ID); |
633 | xdbc.bus = bus; |
634 | xdbc.dev = dev; |
635 | xdbc.func = func; |
636 | |
637 | /* Map the IO memory: */ |
638 | xdbc.xhci_base = xdbc_map_pci_mmio(bus, dev, func); |
639 | if (!xdbc.xhci_base) |
640 | return -EINVAL; |
641 | |
642 | /* Locate DbC registers: */ |
643 | offset = xhci_find_next_ext_cap(base: xdbc.xhci_base, start: 0, XHCI_EXT_CAPS_DEBUG); |
644 | if (!offset) { |
645 | pr_notice("xhci host doesn't support debug capability\n" ); |
646 | early_iounmap(addr: xdbc.xhci_base, size: xdbc.xhci_length); |
647 | xdbc.xhci_base = NULL; |
648 | xdbc.xhci_length = 0; |
649 | |
650 | return -ENODEV; |
651 | } |
652 | xdbc.xdbc_reg = (struct xdbc_regs __iomem *)(xdbc.xhci_base + offset); |
653 | |
654 | return 0; |
655 | } |
656 | |
657 | int __init early_xdbc_setup_hardware(void) |
658 | { |
659 | int ret; |
660 | |
661 | if (!xdbc.xdbc_reg) |
662 | return -ENODEV; |
663 | |
664 | xdbc_bios_handoff(); |
665 | |
666 | raw_spin_lock_init(&xdbc.lock); |
667 | |
668 | ret = xdbc_early_setup(); |
669 | if (ret) { |
670 | pr_notice("failed to setup the connection to host\n" ); |
671 | |
672 | xdbc_free_ring(ring: &xdbc.evt_ring); |
673 | xdbc_free_ring(ring: &xdbc.out_ring); |
674 | xdbc_free_ring(ring: &xdbc.in_ring); |
675 | |
676 | if (xdbc.table_dma) |
677 | memblock_phys_free(base: xdbc.table_dma, PAGE_SIZE); |
678 | |
679 | if (xdbc.out_dma) |
680 | memblock_phys_free(base: xdbc.out_dma, PAGE_SIZE); |
681 | |
682 | xdbc.table_base = NULL; |
683 | xdbc.out_buf = NULL; |
684 | } |
685 | |
686 | return ret; |
687 | } |
688 | |
689 | static void xdbc_handle_port_status(struct xdbc_trb *evt_trb) |
690 | { |
691 | u32 port_reg; |
692 | |
693 | port_reg = readl(addr: &xdbc.xdbc_reg->portsc); |
694 | if (port_reg & PORTSC_CONN_CHANGE) { |
695 | xdbc_trace(fmt: "connect status change event\n" ); |
696 | |
697 | /* Check whether cable unplugged: */ |
698 | if (!(port_reg & PORTSC_CONN_STATUS)) { |
699 | xdbc.flags = 0; |
700 | xdbc_trace(fmt: "cable unplugged\n" ); |
701 | } |
702 | } |
703 | |
704 | if (port_reg & PORTSC_RESET_CHANGE) |
705 | xdbc_trace(fmt: "port reset change event\n" ); |
706 | |
707 | if (port_reg & PORTSC_LINK_CHANGE) |
708 | xdbc_trace(fmt: "port link status change event\n" ); |
709 | |
710 | if (port_reg & PORTSC_CONFIG_CHANGE) |
711 | xdbc_trace(fmt: "config error change\n" ); |
712 | |
713 | /* Write back the value to clear RW1C bits: */ |
714 | writel(val: port_reg, addr: &xdbc.xdbc_reg->portsc); |
715 | } |
716 | |
717 | static void xdbc_handle_tx_event(struct xdbc_trb *evt_trb) |
718 | { |
719 | u32 comp_code; |
720 | int ep_id; |
721 | |
722 | comp_code = GET_COMP_CODE(le32_to_cpu(evt_trb->field[2])); |
723 | ep_id = TRB_TO_EP_ID(le32_to_cpu(evt_trb->field[3])); |
724 | |
725 | switch (comp_code) { |
726 | case COMP_SUCCESS: |
727 | case COMP_SHORT_PACKET: |
728 | break; |
729 | case COMP_TRB_ERROR: |
730 | case COMP_BABBLE_DETECTED_ERROR: |
731 | case COMP_USB_TRANSACTION_ERROR: |
732 | case COMP_STALL_ERROR: |
733 | default: |
734 | if (ep_id == XDBC_EPID_OUT || ep_id == XDBC_EPID_OUT_INTEL) |
735 | xdbc.flags |= XDBC_FLAGS_OUT_STALL; |
736 | if (ep_id == XDBC_EPID_IN || ep_id == XDBC_EPID_IN_INTEL) |
737 | xdbc.flags |= XDBC_FLAGS_IN_STALL; |
738 | |
739 | xdbc_trace(fmt: "endpoint %d stalled\n" , ep_id); |
740 | break; |
741 | } |
742 | |
743 | if (ep_id == XDBC_EPID_IN || ep_id == XDBC_EPID_IN_INTEL) { |
744 | xdbc.flags &= ~XDBC_FLAGS_IN_PROCESS; |
745 | xdbc_bulk_transfer(NULL, XDBC_MAX_PACKET, read: true); |
746 | } else if (ep_id == XDBC_EPID_OUT || ep_id == XDBC_EPID_OUT_INTEL) { |
747 | xdbc.flags &= ~XDBC_FLAGS_OUT_PROCESS; |
748 | } else { |
749 | xdbc_trace(fmt: "invalid endpoint id %d\n" , ep_id); |
750 | } |
751 | } |
752 | |
753 | static void xdbc_handle_events(void) |
754 | { |
755 | struct xdbc_trb *evt_trb; |
756 | bool update_erdp = false; |
757 | u32 reg; |
758 | u8 cmd; |
759 | |
760 | cmd = read_pci_config_byte(bus: xdbc.bus, slot: xdbc.dev, func: xdbc.func, PCI_COMMAND); |
761 | if (!(cmd & PCI_COMMAND_MASTER)) { |
762 | cmd |= PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY; |
763 | write_pci_config_byte(bus: xdbc.bus, slot: xdbc.dev, func: xdbc.func, PCI_COMMAND, val: cmd); |
764 | } |
765 | |
766 | if (!(xdbc.flags & XDBC_FLAGS_INITIALIZED)) |
767 | return; |
768 | |
769 | /* Handle external reset events: */ |
770 | reg = readl(addr: &xdbc.xdbc_reg->control); |
771 | if (!(reg & CTRL_DBC_ENABLE)) { |
772 | if (xdbc_handle_external_reset()) { |
773 | xdbc_trace(fmt: "failed to recover connection\n" ); |
774 | return; |
775 | } |
776 | } |
777 | |
778 | /* Handle configure-exit event: */ |
779 | reg = readl(addr: &xdbc.xdbc_reg->control); |
780 | if (reg & CTRL_DBC_RUN_CHANGE) { |
781 | writel(val: reg, addr: &xdbc.xdbc_reg->control); |
782 | if (reg & CTRL_DBC_RUN) |
783 | xdbc.flags |= XDBC_FLAGS_CONFIGURED; |
784 | else |
785 | xdbc.flags &= ~XDBC_FLAGS_CONFIGURED; |
786 | } |
787 | |
788 | /* Handle endpoint stall event: */ |
789 | reg = readl(addr: &xdbc.xdbc_reg->control); |
790 | if (reg & CTRL_HALT_IN_TR) { |
791 | xdbc.flags |= XDBC_FLAGS_IN_STALL; |
792 | } else { |
793 | xdbc.flags &= ~XDBC_FLAGS_IN_STALL; |
794 | if (!(xdbc.flags & XDBC_FLAGS_IN_PROCESS)) |
795 | xdbc_bulk_transfer(NULL, XDBC_MAX_PACKET, read: true); |
796 | } |
797 | |
798 | if (reg & CTRL_HALT_OUT_TR) |
799 | xdbc.flags |= XDBC_FLAGS_OUT_STALL; |
800 | else |
801 | xdbc.flags &= ~XDBC_FLAGS_OUT_STALL; |
802 | |
803 | /* Handle the events in the event ring: */ |
804 | evt_trb = xdbc.evt_ring.dequeue; |
805 | while ((le32_to_cpu(evt_trb->field[3]) & TRB_CYCLE) == xdbc.evt_ring.cycle_state) { |
806 | /* |
807 | * Add a barrier between reading the cycle flag and any |
808 | * reads of the event's flags/data below: |
809 | */ |
810 | rmb(); |
811 | |
812 | switch ((le32_to_cpu(evt_trb->field[3]) & TRB_TYPE_BITMASK)) { |
813 | case TRB_TYPE(TRB_PORT_STATUS): |
814 | xdbc_handle_port_status(evt_trb); |
815 | break; |
816 | case TRB_TYPE(TRB_TRANSFER): |
817 | xdbc_handle_tx_event(evt_trb); |
818 | break; |
819 | default: |
820 | break; |
821 | } |
822 | |
823 | ++(xdbc.evt_ring.dequeue); |
824 | if (xdbc.evt_ring.dequeue == &xdbc.evt_seg.trbs[TRBS_PER_SEGMENT]) { |
825 | xdbc.evt_ring.dequeue = xdbc.evt_seg.trbs; |
826 | xdbc.evt_ring.cycle_state ^= 1; |
827 | } |
828 | |
829 | evt_trb = xdbc.evt_ring.dequeue; |
830 | update_erdp = true; |
831 | } |
832 | |
833 | /* Update event ring dequeue pointer: */ |
834 | if (update_erdp) |
835 | xdbc_write64(__pa(xdbc.evt_ring.dequeue), &xdbc.xdbc_reg->erdp); |
836 | } |
837 | |
838 | static int xdbc_bulk_write(const char *bytes, int size) |
839 | { |
840 | int ret, timeout = 0; |
841 | unsigned long flags; |
842 | |
843 | retry: |
844 | if (in_nmi()) { |
845 | if (!raw_spin_trylock_irqsave(&xdbc.lock, flags)) |
846 | return -EAGAIN; |
847 | } else { |
848 | raw_spin_lock_irqsave(&xdbc.lock, flags); |
849 | } |
850 | |
851 | xdbc_handle_events(); |
852 | |
853 | /* Check completion of the previous request: */ |
854 | if ((xdbc.flags & XDBC_FLAGS_OUT_PROCESS) && (timeout < 2000000)) { |
855 | raw_spin_unlock_irqrestore(&xdbc.lock, flags); |
856 | udelay(100); |
857 | timeout += 100; |
858 | goto retry; |
859 | } |
860 | |
861 | if (xdbc.flags & XDBC_FLAGS_OUT_PROCESS) { |
862 | raw_spin_unlock_irqrestore(&xdbc.lock, flags); |
863 | xdbc_trace(fmt: "previous transfer not completed yet\n" ); |
864 | |
865 | return -ETIMEDOUT; |
866 | } |
867 | |
868 | ret = xdbc_bulk_transfer(data: (void *)bytes, size, read: false); |
869 | raw_spin_unlock_irqrestore(&xdbc.lock, flags); |
870 | |
871 | return ret; |
872 | } |
873 | |
874 | static void early_xdbc_write(struct console *con, const char *str, u32 n) |
875 | { |
876 | /* static variables are zeroed, so buf is always NULL terminated */ |
877 | static char buf[XDBC_MAX_PACKET + 1]; |
878 | int chunk, ret; |
879 | int use_cr = 0; |
880 | |
881 | if (!xdbc.xdbc_reg) |
882 | return; |
883 | |
884 | while (n > 0) { |
885 | for (chunk = 0; chunk < XDBC_MAX_PACKET && n > 0; str++, chunk++, n--) { |
886 | |
887 | if (!use_cr && *str == '\n') { |
888 | use_cr = 1; |
889 | buf[chunk] = '\r'; |
890 | str--; |
891 | n++; |
892 | continue; |
893 | } |
894 | |
895 | if (use_cr) |
896 | use_cr = 0; |
897 | buf[chunk] = *str; |
898 | } |
899 | |
900 | if (chunk > 0) { |
901 | ret = xdbc_bulk_write(bytes: buf, size: chunk); |
902 | if (ret < 0) |
903 | xdbc_trace(fmt: "missed message {%s}\n" , buf); |
904 | } |
905 | } |
906 | } |
907 | |
908 | static struct console early_xdbc_console = { |
909 | .name = "earlyxdbc" , |
910 | .write = early_xdbc_write, |
911 | .flags = CON_PRINTBUFFER, |
912 | .index = -1, |
913 | }; |
914 | |
915 | void __init early_xdbc_register_console(void) |
916 | { |
917 | if (early_console) |
918 | return; |
919 | |
920 | early_console = &early_xdbc_console; |
921 | if (early_console_keep) |
922 | early_console->flags &= ~CON_BOOT; |
923 | else |
924 | early_console->flags |= CON_BOOT; |
925 | register_console(early_console); |
926 | } |
927 | |
928 | static void xdbc_unregister_console(void) |
929 | { |
930 | if (console_is_registered(con: &early_xdbc_console)) |
931 | unregister_console(&early_xdbc_console); |
932 | } |
933 | |
934 | static int xdbc_scrub_function(void *ptr) |
935 | { |
936 | unsigned long flags; |
937 | |
938 | while (true) { |
939 | raw_spin_lock_irqsave(&xdbc.lock, flags); |
940 | xdbc_handle_events(); |
941 | |
942 | if (!(xdbc.flags & XDBC_FLAGS_INITIALIZED)) { |
943 | raw_spin_unlock_irqrestore(&xdbc.lock, flags); |
944 | break; |
945 | } |
946 | |
947 | raw_spin_unlock_irqrestore(&xdbc.lock, flags); |
948 | schedule_timeout_interruptible(timeout: 1); |
949 | } |
950 | |
951 | xdbc_unregister_console(); |
952 | writel(val: 0, addr: &xdbc.xdbc_reg->control); |
953 | xdbc_trace(fmt: "dbc scrub function exits\n" ); |
954 | |
955 | return 0; |
956 | } |
957 | |
958 | static int __init xdbc_init(void) |
959 | { |
960 | unsigned long flags; |
961 | void __iomem *base; |
962 | int ret = 0; |
963 | u32 offset; |
964 | |
965 | if (!(xdbc.flags & XDBC_FLAGS_INITIALIZED)) |
966 | return 0; |
967 | |
968 | /* |
969 | * It's time to shut down the DbC, so that the debug |
970 | * port can be reused by the host controller: |
971 | */ |
972 | if (early_xdbc_console.index == -1 || |
973 | (early_xdbc_console.flags & CON_BOOT)) { |
974 | xdbc_trace(fmt: "hardware not used anymore\n" ); |
975 | goto free_and_quit; |
976 | } |
977 | |
978 | base = ioremap(offset: xdbc.xhci_start, size: xdbc.xhci_length); |
979 | if (!base) { |
980 | xdbc_trace(fmt: "failed to remap the io address\n" ); |
981 | ret = -ENOMEM; |
982 | goto free_and_quit; |
983 | } |
984 | |
985 | raw_spin_lock_irqsave(&xdbc.lock, flags); |
986 | early_iounmap(addr: xdbc.xhci_base, size: xdbc.xhci_length); |
987 | xdbc.xhci_base = base; |
988 | offset = xhci_find_next_ext_cap(base: xdbc.xhci_base, start: 0, XHCI_EXT_CAPS_DEBUG); |
989 | xdbc.xdbc_reg = (struct xdbc_regs __iomem *)(xdbc.xhci_base + offset); |
990 | raw_spin_unlock_irqrestore(&xdbc.lock, flags); |
991 | |
992 | kthread_run(xdbc_scrub_function, NULL, "%s" , "xdbc" ); |
993 | |
994 | return 0; |
995 | |
996 | free_and_quit: |
997 | xdbc_free_ring(ring: &xdbc.evt_ring); |
998 | xdbc_free_ring(ring: &xdbc.out_ring); |
999 | xdbc_free_ring(ring: &xdbc.in_ring); |
1000 | memblock_phys_free(base: xdbc.table_dma, PAGE_SIZE); |
1001 | memblock_phys_free(base: xdbc.out_dma, PAGE_SIZE); |
1002 | writel(val: 0, addr: &xdbc.xdbc_reg->control); |
1003 | early_iounmap(addr: xdbc.xhci_base, size: xdbc.xhci_length); |
1004 | |
1005 | return ret; |
1006 | } |
1007 | subsys_initcall(xdbc_init); |
1008 | |