1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | ** System Bus Adapter (SBA) I/O MMU manager |
4 | ** |
5 | ** (c) Copyright 2000-2004 Grant Grundler <grundler @ parisc-linux x org> |
6 | ** (c) Copyright 2004 Naresh Kumar Inna <knaresh at india x hp x com> |
7 | ** (c) Copyright 2000-2004 Hewlett-Packard Company |
8 | ** |
9 | ** Portions (c) 1999 Dave S. Miller (from sparc64 I/O MMU code) |
10 | ** |
11 | ** |
12 | ** |
13 | ** This module initializes the IOC (I/O Controller) found on B1000/C3000/ |
14 | ** J5000/J7000/N-class/L-class machines and their successors. |
15 | ** |
16 | ** FIXME: add DMA hint support programming in both sba and lba modules. |
17 | */ |
18 | |
19 | #include <linux/types.h> |
20 | #include <linux/kernel.h> |
21 | #include <linux/spinlock.h> |
22 | #include <linux/slab.h> |
23 | #include <linux/init.h> |
24 | |
25 | #include <linux/mm.h> |
26 | #include <linux/string.h> |
27 | #include <linux/pci.h> |
28 | #include <linux/dma-map-ops.h> |
29 | #include <linux/scatterlist.h> |
30 | #include <linux/iommu-helper.h> |
31 | /* |
32 | * The semantics of 64 register access on 32bit systems can't be guaranteed |
33 | * by the C standard, we hope the _lo_hi() macros defining readq and writeq |
34 | * here will behave as expected. |
35 | */ |
36 | #include <linux/io-64-nonatomic-lo-hi.h> |
37 | |
38 | #include <asm/byteorder.h> |
39 | #include <asm/io.h> |
40 | #include <asm/dma.h> /* for DMA_CHUNK_SIZE */ |
41 | |
42 | #include <asm/hardware.h> /* for register_parisc_driver() stuff */ |
43 | |
44 | #include <linux/proc_fs.h> |
45 | #include <linux/seq_file.h> |
46 | #include <linux/module.h> |
47 | |
48 | #include <asm/ropes.h> |
49 | #include <asm/page.h> /* for PAGE0 */ |
50 | #include <asm/pdc.h> /* for PDC_MODEL_* */ |
51 | #include <asm/pdcpat.h> /* for is_pdc_pat() */ |
52 | #include <asm/parisc-device.h> |
53 | |
54 | #include "iommu.h" |
55 | |
56 | #define MODULE_NAME "SBA" |
57 | |
58 | /* |
59 | ** The number of debug flags is a clue - this code is fragile. |
60 | ** Don't even think about messing with it unless you have |
61 | ** plenty of 710's to sacrifice to the computer gods. :^) |
62 | */ |
63 | #undef DEBUG_SBA_INIT |
64 | #undef DEBUG_SBA_RUN |
65 | #undef DEBUG_SBA_RUN_SG |
66 | #undef DEBUG_SBA_RESOURCE |
67 | #undef ASSERT_PDIR_SANITY |
68 | #undef DEBUG_LARGE_SG_ENTRIES |
69 | #undef DEBUG_DMB_TRAP |
70 | |
71 | #ifdef DEBUG_SBA_INIT |
72 | #define DBG_INIT(x...) printk(x) |
73 | #else |
74 | #define DBG_INIT(x...) |
75 | #endif |
76 | |
77 | #ifdef DEBUG_SBA_RUN |
78 | #define DBG_RUN(x...) printk(x) |
79 | #else |
80 | #define DBG_RUN(x...) |
81 | #endif |
82 | |
83 | #ifdef DEBUG_SBA_RUN_SG |
84 | #define DBG_RUN_SG(x...) printk(x) |
85 | #else |
86 | #define DBG_RUN_SG(x...) |
87 | #endif |
88 | |
89 | |
90 | #ifdef DEBUG_SBA_RESOURCE |
91 | #define DBG_RES(x...) printk(x) |
92 | #else |
93 | #define DBG_RES(x...) |
94 | #endif |
95 | |
96 | #define DEFAULT_DMA_HINT_REG 0 |
97 | |
98 | struct sba_device *sba_list; |
99 | EXPORT_SYMBOL_GPL(sba_list); |
100 | |
101 | static unsigned long ioc_needs_fdc = 0; |
102 | |
103 | /* global count of IOMMUs in the system */ |
104 | static unsigned int global_ioc_cnt = 0; |
105 | |
106 | /* PA8700 (Piranha 2.2) bug workaround */ |
107 | static unsigned long piranha_bad_128k = 0; |
108 | |
109 | /* Looks nice and keeps the compiler happy */ |
110 | #define SBA_DEV(d) ((struct sba_device *) (d)) |
111 | |
112 | #ifdef CONFIG_AGP_PARISC |
113 | #define SBA_AGP_SUPPORT |
114 | #endif /*CONFIG_AGP_PARISC*/ |
115 | |
116 | #ifdef SBA_AGP_SUPPORT |
117 | static int sba_reserve_agpgart = 1; |
118 | module_param(sba_reserve_agpgart, int, 0444); |
119 | MODULE_PARM_DESC(sba_reserve_agpgart, "Reserve half of IO pdir as AGPGART" ); |
120 | #endif |
121 | |
122 | static struct proc_dir_entry *proc_runway_root __ro_after_init; |
123 | static struct proc_dir_entry *proc_mckinley_root __ro_after_init; |
124 | |
125 | /************************************ |
126 | ** SBA register read and write support |
127 | ** |
128 | ** BE WARNED: register writes are posted. |
129 | ** (ie follow writes which must reach HW with a read) |
130 | ** |
131 | ** Superdome (in particular, REO) allows only 64-bit CSR accesses. |
132 | */ |
133 | #define READ_REG32(addr) readl(addr) |
134 | #define READ_REG64(addr) readq(addr) |
135 | #define WRITE_REG32(val, addr) writel((val), (addr)) |
136 | #define WRITE_REG64(val, addr) writeq((val), (addr)) |
137 | |
138 | #ifdef CONFIG_64BIT |
139 | #define READ_REG(addr) READ_REG64(addr) |
140 | #define WRITE_REG(value, addr) WRITE_REG64(value, addr) |
141 | #else |
142 | #define READ_REG(addr) READ_REG32(addr) |
143 | #define WRITE_REG(value, addr) WRITE_REG32(value, addr) |
144 | #endif |
145 | |
146 | #ifdef DEBUG_SBA_INIT |
147 | |
148 | /* NOTE: When CONFIG_64BIT isn't defined, READ_REG64() is two 32-bit reads */ |
149 | |
150 | /** |
151 | * sba_dump_ranges - debugging only - print ranges assigned to this IOA |
152 | * @hpa: base address of the sba |
153 | * |
154 | * Print the MMIO and IO Port address ranges forwarded by an Astro/Ike/RIO |
155 | * IO Adapter (aka Bus Converter). |
156 | */ |
157 | static void |
158 | sba_dump_ranges(void __iomem *hpa) |
159 | { |
160 | DBG_INIT("SBA at 0x%p\n" , hpa); |
161 | DBG_INIT("IOS_DIST_BASE : %Lx\n" , READ_REG64(hpa+IOS_DIST_BASE)); |
162 | DBG_INIT("IOS_DIST_MASK : %Lx\n" , READ_REG64(hpa+IOS_DIST_MASK)); |
163 | DBG_INIT("IOS_DIST_ROUTE : %Lx\n" , READ_REG64(hpa+IOS_DIST_ROUTE)); |
164 | DBG_INIT("\n" ); |
165 | DBG_INIT("IOS_DIRECT_BASE : %Lx\n" , READ_REG64(hpa+IOS_DIRECT_BASE)); |
166 | DBG_INIT("IOS_DIRECT_MASK : %Lx\n" , READ_REG64(hpa+IOS_DIRECT_MASK)); |
167 | DBG_INIT("IOS_DIRECT_ROUTE: %Lx\n" , READ_REG64(hpa+IOS_DIRECT_ROUTE)); |
168 | } |
169 | |
170 | /** |
171 | * sba_dump_tlb - debugging only - print IOMMU operating parameters |
172 | * @hpa: base address of the IOMMU |
173 | * |
174 | * Print the size/location of the IO MMU PDIR. |
175 | */ |
176 | static void sba_dump_tlb(void __iomem *hpa) |
177 | { |
178 | DBG_INIT("IO TLB at 0x%p\n" , hpa); |
179 | DBG_INIT("IOC_IBASE : 0x%Lx\n" , READ_REG64(hpa+IOC_IBASE)); |
180 | DBG_INIT("IOC_IMASK : 0x%Lx\n" , READ_REG64(hpa+IOC_IMASK)); |
181 | DBG_INIT("IOC_TCNFG : 0x%Lx\n" , READ_REG64(hpa+IOC_TCNFG)); |
182 | DBG_INIT("IOC_PDIR_BASE: 0x%Lx\n" , READ_REG64(hpa+IOC_PDIR_BASE)); |
183 | DBG_INIT("\n" ); |
184 | } |
185 | #else |
186 | #define sba_dump_ranges(x) |
187 | #define sba_dump_tlb(x) |
188 | #endif /* DEBUG_SBA_INIT */ |
189 | |
190 | |
191 | #ifdef ASSERT_PDIR_SANITY |
192 | |
193 | /** |
194 | * sba_dump_pdir_entry - debugging only - print one IOMMU PDIR entry |
195 | * @ioc: IO MMU structure which owns the pdir we are interested in. |
196 | * @msg: text to print ont the output line. |
197 | * @pide: pdir index. |
198 | * |
199 | * Print one entry of the IO MMU PDIR in human readable form. |
200 | */ |
201 | static void |
202 | sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide) |
203 | { |
204 | /* start printing from lowest pde in rval */ |
205 | __le64 *ptr = &(ioc->pdir_base[pide & (~0U * BITS_PER_LONG)]); |
206 | unsigned long *rptr = (unsigned long *) &(ioc->res_map[(pide >>3) & ~(sizeof(unsigned long) - 1)]); |
207 | uint rcnt; |
208 | |
209 | printk(KERN_DEBUG "SBA: %s rp %p bit %d rval 0x%lx\n" , |
210 | msg, |
211 | rptr, pide & (BITS_PER_LONG - 1), *rptr); |
212 | |
213 | rcnt = 0; |
214 | while (rcnt < BITS_PER_LONG) { |
215 | printk(KERN_DEBUG "%s %2d %p %016Lx\n" , |
216 | (rcnt == (pide & (BITS_PER_LONG - 1))) |
217 | ? " -->" : " " , |
218 | rcnt, ptr, *ptr ); |
219 | rcnt++; |
220 | ptr++; |
221 | } |
222 | printk(KERN_DEBUG "%s" , msg); |
223 | } |
224 | |
225 | |
226 | /** |
227 | * sba_check_pdir - debugging only - consistency checker |
228 | * @ioc: IO MMU structure which owns the pdir we are interested in. |
229 | * @msg: text to print ont the output line. |
230 | * |
231 | * Verify the resource map and pdir state is consistent |
232 | */ |
233 | static int |
234 | sba_check_pdir(struct ioc *ioc, char *msg) |
235 | { |
236 | u32 *rptr_end = (u32 *) &(ioc->res_map[ioc->res_size]); |
237 | u32 *rptr = (u32 *) ioc->res_map; /* resource map ptr */ |
238 | u64 *pptr = ioc->pdir_base; /* pdir ptr */ |
239 | uint pide = 0; |
240 | |
241 | while (rptr < rptr_end) { |
242 | u32 rval = *rptr; |
243 | int rcnt = 32; /* number of bits we might check */ |
244 | |
245 | while (rcnt) { |
246 | /* Get last byte and highest bit from that */ |
247 | u32 pde = ((u32) (((char *)pptr)[7])) << 24; |
248 | if ((rval ^ pde) & 0x80000000) |
249 | { |
250 | /* |
251 | ** BUMMER! -- res_map != pdir -- |
252 | ** Dump rval and matching pdir entries |
253 | */ |
254 | sba_dump_pdir_entry(ioc, msg, pide); |
255 | return(1); |
256 | } |
257 | rcnt--; |
258 | rval <<= 1; /* try the next bit */ |
259 | pptr++; |
260 | pide++; |
261 | } |
262 | rptr++; /* look at next word of res_map */ |
263 | } |
264 | /* It'd be nice if we always got here :^) */ |
265 | return 0; |
266 | } |
267 | |
268 | |
269 | /** |
270 | * sba_dump_sg - debugging only - print Scatter-Gather list |
271 | * @ioc: IO MMU structure which owns the pdir we are interested in. |
272 | * @startsg: head of the SG list |
273 | * @nents: number of entries in SG list |
274 | * |
275 | * print the SG list so we can verify it's correct by hand. |
276 | */ |
277 | static void |
278 | sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents) |
279 | { |
280 | while (nents-- > 0) { |
281 | printk(KERN_DEBUG " %d : %08lx/%05x %p/%05x\n" , |
282 | nents, |
283 | (unsigned long) sg_dma_address(startsg), |
284 | sg_dma_len(startsg), |
285 | sg_virt(startsg), startsg->length); |
286 | startsg++; |
287 | } |
288 | } |
289 | |
290 | #endif /* ASSERT_PDIR_SANITY */ |
291 | |
292 | |
293 | |
294 | |
295 | /************************************************************** |
296 | * |
297 | * I/O Pdir Resource Management |
298 | * |
299 | * Bits set in the resource map are in use. |
300 | * Each bit can represent a number of pages. |
301 | * LSbs represent lower addresses (IOVA's). |
302 | * |
303 | ***************************************************************/ |
304 | #define PAGES_PER_RANGE 1 /* could increase this to 4 or 8 if needed */ |
305 | |
306 | /* Convert from IOVP to IOVA and vice versa. */ |
307 | |
308 | #ifdef ZX1_SUPPORT |
309 | /* Pluto (aka ZX1) boxes need to set or clear the ibase bits appropriately */ |
310 | #define SBA_IOVA(ioc,iovp,offset,hint_reg) ((ioc->ibase) | (iovp) | (offset)) |
311 | #define SBA_IOVP(ioc,iova) ((iova) & (ioc)->iovp_mask) |
312 | #else |
313 | /* only support Astro and ancestors. Saves a few cycles in key places */ |
314 | #define SBA_IOVA(ioc,iovp,offset,hint_reg) ((iovp) | (offset)) |
315 | #define SBA_IOVP(ioc,iova) (iova) |
316 | #endif |
317 | |
318 | #define PDIR_INDEX(iovp) ((iovp)>>IOVP_SHIFT) |
319 | |
320 | #define RESMAP_MASK(n) (~0UL << (BITS_PER_LONG - (n))) |
321 | #define RESMAP_IDX_MASK (sizeof(unsigned long) - 1) |
322 | |
323 | static unsigned long ptr_to_pide(struct ioc *ioc, unsigned long *res_ptr, |
324 | unsigned int bitshiftcnt) |
325 | { |
326 | return (((unsigned long)res_ptr - (unsigned long)ioc->res_map) << 3) |
327 | + bitshiftcnt; |
328 | } |
329 | |
330 | /** |
331 | * sba_search_bitmap - find free space in IO PDIR resource bitmap |
332 | * @ioc: IO MMU structure which owns the pdir we are interested in. |
333 | * @dev: device to query the bitmap for |
334 | * @bits_wanted: number of entries we need. |
335 | * |
336 | * Find consecutive free bits in resource bitmap. |
337 | * Each bit represents one entry in the IO Pdir. |
338 | * Cool perf optimization: search for log2(size) bits at a time. |
339 | */ |
340 | static unsigned long |
341 | sba_search_bitmap(struct ioc *ioc, struct device *dev, |
342 | unsigned long bits_wanted) |
343 | { |
344 | unsigned long *res_ptr = ioc->res_hint; |
345 | unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]); |
346 | unsigned long pide = ~0UL, tpide; |
347 | unsigned long boundary_size; |
348 | unsigned long shift; |
349 | int ret; |
350 | |
351 | boundary_size = dma_get_seg_boundary_nr_pages(dev, page_shift: IOVP_SHIFT); |
352 | |
353 | #if defined(ZX1_SUPPORT) |
354 | BUG_ON(ioc->ibase & ~IOVP_MASK); |
355 | shift = ioc->ibase >> IOVP_SHIFT; |
356 | #else |
357 | shift = 0; |
358 | #endif |
359 | |
360 | if (bits_wanted > (BITS_PER_LONG/2)) { |
361 | /* Search word at a time - no mask needed */ |
362 | for(; res_ptr < res_end; ++res_ptr) { |
363 | tpide = ptr_to_pide(ioc, res_ptr, bitshiftcnt: 0); |
364 | ret = iommu_is_span_boundary(index: tpide, nr: bits_wanted, |
365 | shift, |
366 | boundary_size); |
367 | if ((*res_ptr == 0) && !ret) { |
368 | *res_ptr = RESMAP_MASK(bits_wanted); |
369 | pide = tpide; |
370 | break; |
371 | } |
372 | } |
373 | /* point to the next word on next pass */ |
374 | res_ptr++; |
375 | ioc->res_bitshift = 0; |
376 | } else { |
377 | /* |
378 | ** Search the resource bit map on well-aligned values. |
379 | ** "o" is the alignment. |
380 | ** We need the alignment to invalidate I/O TLB using |
381 | ** SBA HW features in the unmap path. |
382 | */ |
383 | unsigned long o = 1 << get_order(size: bits_wanted << PAGE_SHIFT); |
384 | uint bitshiftcnt = ALIGN(ioc->res_bitshift, o); |
385 | unsigned long mask; |
386 | |
387 | if (bitshiftcnt >= BITS_PER_LONG) { |
388 | bitshiftcnt = 0; |
389 | res_ptr++; |
390 | } |
391 | mask = RESMAP_MASK(bits_wanted) >> bitshiftcnt; |
392 | |
393 | DBG_RES("%s() o %ld %p" , __func__, o, res_ptr); |
394 | while(res_ptr < res_end) |
395 | { |
396 | DBG_RES(" %p %lx %lx\n" , res_ptr, mask, *res_ptr); |
397 | WARN_ON(mask == 0); |
398 | tpide = ptr_to_pide(ioc, res_ptr, bitshiftcnt); |
399 | ret = iommu_is_span_boundary(index: tpide, nr: bits_wanted, |
400 | shift, |
401 | boundary_size); |
402 | if ((((*res_ptr) & mask) == 0) && !ret) { |
403 | *res_ptr |= mask; /* mark resources busy! */ |
404 | pide = tpide; |
405 | break; |
406 | } |
407 | mask >>= o; |
408 | bitshiftcnt += o; |
409 | if (mask == 0) { |
410 | mask = RESMAP_MASK(bits_wanted); |
411 | bitshiftcnt=0; |
412 | res_ptr++; |
413 | } |
414 | } |
415 | /* look in the same word on the next pass */ |
416 | ioc->res_bitshift = bitshiftcnt + bits_wanted; |
417 | } |
418 | |
419 | /* wrapped ? */ |
420 | if (res_end <= res_ptr) { |
421 | ioc->res_hint = (unsigned long *) ioc->res_map; |
422 | ioc->res_bitshift = 0; |
423 | } else { |
424 | ioc->res_hint = res_ptr; |
425 | } |
426 | return (pide); |
427 | } |
428 | |
429 | |
430 | /** |
431 | * sba_alloc_range - find free bits and mark them in IO PDIR resource bitmap |
432 | * @ioc: IO MMU structure which owns the pdir we are interested in. |
433 | * @dev: device for which pages should be alloced |
434 | * @size: number of bytes to create a mapping for |
435 | * |
436 | * Given a size, find consecutive unmarked and then mark those bits in the |
437 | * resource bit map. |
438 | */ |
439 | static int |
440 | sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size) |
441 | { |
442 | unsigned int pages_needed = size >> IOVP_SHIFT; |
443 | #ifdef SBA_COLLECT_STATS |
444 | unsigned long cr_start = mfctl(16); |
445 | #endif |
446 | unsigned long pide; |
447 | |
448 | pide = sba_search_bitmap(ioc, dev, bits_wanted: pages_needed); |
449 | if (pide >= (ioc->res_size << 3)) { |
450 | pide = sba_search_bitmap(ioc, dev, bits_wanted: pages_needed); |
451 | if (pide >= (ioc->res_size << 3)) |
452 | panic(fmt: "%s: I/O MMU @ %p is out of mapping resources\n" , |
453 | __FILE__, ioc->ioc_hpa); |
454 | } |
455 | |
456 | #ifdef ASSERT_PDIR_SANITY |
457 | /* verify the first enable bit is clear */ |
458 | if(0x00 != ((u8 *) ioc->pdir_base)[pide*sizeof(u64) + 7]) { |
459 | sba_dump_pdir_entry(ioc, "sba_search_bitmap() botched it?" , pide); |
460 | } |
461 | #endif |
462 | |
463 | DBG_RES("%s(%x) %d -> %lx hint %x/%x\n" , |
464 | __func__, size, pages_needed, pide, |
465 | (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map), |
466 | ioc->res_bitshift ); |
467 | |
468 | #ifdef SBA_COLLECT_STATS |
469 | { |
470 | unsigned long cr_end = mfctl(16); |
471 | unsigned long tmp = cr_end - cr_start; |
472 | /* check for roll over */ |
473 | cr_start = (cr_end < cr_start) ? -(tmp) : (tmp); |
474 | } |
475 | ioc->avg_search[ioc->avg_idx++] = cr_start; |
476 | ioc->avg_idx &= SBA_SEARCH_SAMPLE - 1; |
477 | |
478 | ioc->used_pages += pages_needed; |
479 | #endif |
480 | |
481 | return (pide); |
482 | } |
483 | |
484 | |
485 | /** |
486 | * sba_free_range - unmark bits in IO PDIR resource bitmap |
487 | * @ioc: IO MMU structure which owns the pdir we are interested in. |
488 | * @iova: IO virtual address which was previously allocated. |
489 | * @size: number of bytes to create a mapping for |
490 | * |
491 | * clear bits in the ioc's resource map |
492 | */ |
493 | static void |
494 | sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size) |
495 | { |
496 | unsigned long iovp = SBA_IOVP(ioc, iova); |
497 | unsigned int pide = PDIR_INDEX(iovp); |
498 | unsigned int ridx = pide >> 3; /* convert bit to byte address */ |
499 | unsigned long *res_ptr = (unsigned long *) &((ioc)->res_map[ridx & ~RESMAP_IDX_MASK]); |
500 | |
501 | int bits_not_wanted = size >> IOVP_SHIFT; |
502 | |
503 | /* 3-bits "bit" address plus 2 (or 3) bits for "byte" == bit in word */ |
504 | unsigned long m = RESMAP_MASK(bits_not_wanted) >> (pide & (BITS_PER_LONG - 1)); |
505 | |
506 | DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n" , |
507 | __func__, (uint) iova, size, |
508 | bits_not_wanted, m, pide, res_ptr, *res_ptr); |
509 | |
510 | #ifdef SBA_COLLECT_STATS |
511 | ioc->used_pages -= bits_not_wanted; |
512 | #endif |
513 | |
514 | *res_ptr &= ~m; |
515 | } |
516 | |
517 | |
518 | /************************************************************** |
519 | * |
520 | * "Dynamic DMA Mapping" support (aka "Coherent I/O") |
521 | * |
522 | ***************************************************************/ |
523 | |
524 | #ifdef SBA_HINT_SUPPORT |
525 | #define SBA_DMA_HINT(ioc, val) ((val) << (ioc)->hint_shift_pdir) |
526 | #endif |
527 | |
528 | typedef unsigned long space_t; |
529 | #define KERNEL_SPACE 0 |
530 | |
531 | /** |
532 | * sba_io_pdir_entry - fill in one IO PDIR entry |
533 | * @pdir_ptr: pointer to IO PDIR entry |
534 | * @sid: process Space ID - currently only support KERNEL_SPACE |
535 | * @vba: Virtual CPU address of buffer to map |
536 | * @hint: DMA hint set to use for this mapping |
537 | * |
538 | * SBA Mapping Routine |
539 | * |
540 | * Given a virtual address (vba, arg2) and space id, (sid, arg1) |
541 | * sba_io_pdir_entry() loads the I/O PDIR entry pointed to by |
542 | * pdir_ptr (arg0). |
543 | * Using the bass-ackwards HP bit numbering, Each IO Pdir entry |
544 | * for Astro/Ike looks like: |
545 | * |
546 | * |
547 | * 0 19 51 55 63 |
548 | * +-+---------------------+----------------------------------+----+--------+ |
549 | * |V| U | PPN[43:12] | U | VI | |
550 | * +-+---------------------+----------------------------------+----+--------+ |
551 | * |
552 | * Pluto is basically identical, supports fewer physical address bits: |
553 | * |
554 | * 0 23 51 55 63 |
555 | * +-+------------------------+-------------------------------+----+--------+ |
556 | * |V| U | PPN[39:12] | U | VI | |
557 | * +-+------------------------+-------------------------------+----+--------+ |
558 | * |
559 | * V == Valid Bit (Most Significant Bit is bit 0) |
560 | * U == Unused |
561 | * PPN == Physical Page Number |
562 | * VI == Virtual Index (aka Coherent Index) |
563 | * |
564 | * LPA instruction output is put into PPN field. |
565 | * LCI (Load Coherence Index) instruction provides the "VI" bits. |
566 | * |
567 | * We pre-swap the bytes since PCX-W is Big Endian and the |
568 | * IOMMU uses little endian for the pdir. |
569 | */ |
570 | |
571 | static void |
572 | sba_io_pdir_entry(__le64 *pdir_ptr, space_t sid, unsigned long vba, |
573 | unsigned long hint) |
574 | { |
575 | u64 pa; /* physical address */ |
576 | register unsigned ci; /* coherent index */ |
577 | |
578 | pa = lpa(vba); |
579 | pa &= IOVP_MASK; |
580 | |
581 | asm("lci 0(%1), %0" : "=r" (ci) : "r" (vba)); |
582 | pa |= (ci >> PAGE_SHIFT) & 0xff; /* move CI (8 bits) into lowest byte */ |
583 | |
584 | pa |= SBA_PDIR_VALID_BIT; /* set "valid" bit */ |
585 | *pdir_ptr = cpu_to_le64(pa); /* swap and store into I/O Pdir */ |
586 | |
587 | /* |
588 | * If the PDC_MODEL capabilities has Non-coherent IO-PDIR bit set |
589 | * (bit #61, big endian), we have to flush and sync every time |
590 | * IO-PDIR is changed in Ike/Astro. |
591 | */ |
592 | asm_io_fdc(pdir_ptr); |
593 | } |
594 | |
595 | |
596 | /** |
597 | * sba_mark_invalid - invalidate one or more IO PDIR entries |
598 | * @ioc: IO MMU structure which owns the pdir we are interested in. |
599 | * @iova: IO Virtual Address mapped earlier |
600 | * @byte_cnt: number of bytes this mapping covers. |
601 | * |
602 | * Marking the IO PDIR entry(ies) as Invalid and invalidate |
603 | * corresponding IO TLB entry. The Ike PCOM (Purge Command Register) |
604 | * is to purge stale entries in the IO TLB when unmapping entries. |
605 | * |
606 | * The PCOM register supports purging of multiple pages, with a minium |
607 | * of 1 page and a maximum of 2GB. Hardware requires the address be |
608 | * aligned to the size of the range being purged. The size of the range |
609 | * must be a power of 2. The "Cool perf optimization" in the |
610 | * allocation routine helps keep that true. |
611 | */ |
612 | static void |
613 | sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt) |
614 | { |
615 | u32 iovp = (u32) SBA_IOVP(ioc,iova); |
616 | __le64 *pdir_ptr = &ioc->pdir_base[PDIR_INDEX(iovp)]; |
617 | |
618 | #ifdef ASSERT_PDIR_SANITY |
619 | /* Assert first pdir entry is set. |
620 | ** |
621 | ** Even though this is a big-endian machine, the entries |
622 | ** in the iopdir are little endian. That's why we look at |
623 | ** the byte at +7 instead of at +0. |
624 | */ |
625 | if (0x80 != (((u8 *) pdir_ptr)[7])) { |
626 | sba_dump_pdir_entry(ioc,"sba_mark_invalid()" , PDIR_INDEX(iovp)); |
627 | } |
628 | #endif |
629 | |
630 | if (byte_cnt > IOVP_SIZE) |
631 | { |
632 | #if 0 |
633 | unsigned long entries_per_cacheline = ioc_needs_fdc ? |
634 | L1_CACHE_ALIGN(((unsigned long) pdir_ptr)) |
635 | - (unsigned long) pdir_ptr; |
636 | : 262144; |
637 | #endif |
638 | |
639 | /* set "size" field for PCOM */ |
640 | iovp |= get_order(size: byte_cnt) + PAGE_SHIFT; |
641 | |
642 | do { |
643 | /* clear I/O Pdir entry "valid" bit first */ |
644 | ((u8 *) pdir_ptr)[7] = 0; |
645 | asm_io_fdc(pdir_ptr); |
646 | if (ioc_needs_fdc) { |
647 | #if 0 |
648 | entries_per_cacheline = L1_CACHE_SHIFT - 3; |
649 | #endif |
650 | } |
651 | pdir_ptr++; |
652 | byte_cnt -= IOVP_SIZE; |
653 | } while (byte_cnt > IOVP_SIZE); |
654 | } else |
655 | iovp |= IOVP_SHIFT; /* set "size" field for PCOM */ |
656 | |
657 | /* |
658 | ** clear I/O PDIR entry "valid" bit. |
659 | ** We have to R/M/W the cacheline regardless how much of the |
660 | ** pdir entry that we clobber. |
661 | ** The rest of the entry would be useful for debugging if we |
662 | ** could dump core on HPMC. |
663 | */ |
664 | ((u8 *) pdir_ptr)[7] = 0; |
665 | asm_io_fdc(pdir_ptr); |
666 | |
667 | WRITE_REG( SBA_IOVA(ioc, iovp, 0, 0), ioc->ioc_hpa+IOC_PCOM); |
668 | } |
669 | |
670 | /** |
671 | * sba_dma_supported - PCI driver can query DMA support |
672 | * @dev: instance of PCI owned by the driver that's asking |
673 | * @mask: number of address bits this PCI device can handle |
674 | * |
675 | * See Documentation/core-api/dma-api-howto.rst |
676 | */ |
677 | static int sba_dma_supported( struct device *dev, u64 mask) |
678 | { |
679 | struct ioc *ioc; |
680 | |
681 | if (dev == NULL) { |
682 | printk(KERN_ERR MODULE_NAME ": EISA/ISA/et al not supported\n" ); |
683 | BUG(); |
684 | return(0); |
685 | } |
686 | |
687 | ioc = GET_IOC(dev); |
688 | if (!ioc) |
689 | return 0; |
690 | |
691 | /* |
692 | * check if mask is >= than the current max IO Virt Address |
693 | * The max IO Virt address will *always* < 30 bits. |
694 | */ |
695 | return((int)(mask >= (ioc->ibase - 1 + |
696 | (ioc->pdir_size / sizeof(u64) * IOVP_SIZE) ))); |
697 | } |
698 | |
699 | |
700 | /** |
701 | * sba_map_single - map one buffer and return IOVA for DMA |
702 | * @dev: instance of PCI owned by the driver that's asking. |
703 | * @addr: driver buffer to map. |
704 | * @size: number of bytes to map in driver buffer. |
705 | * @direction: R/W or both. |
706 | * |
707 | * See Documentation/core-api/dma-api-howto.rst |
708 | */ |
709 | static dma_addr_t |
710 | sba_map_single(struct device *dev, void *addr, size_t size, |
711 | enum dma_data_direction direction) |
712 | { |
713 | struct ioc *ioc; |
714 | unsigned long flags; |
715 | dma_addr_t iovp; |
716 | dma_addr_t offset; |
717 | __le64 *pdir_start; |
718 | int pide; |
719 | |
720 | ioc = GET_IOC(dev); |
721 | if (!ioc) |
722 | return DMA_MAPPING_ERROR; |
723 | |
724 | /* save offset bits */ |
725 | offset = ((dma_addr_t) (long) addr) & ~IOVP_MASK; |
726 | |
727 | /* round up to nearest IOVP_SIZE */ |
728 | size = (size + offset + ~IOVP_MASK) & IOVP_MASK; |
729 | |
730 | spin_lock_irqsave(&ioc->res_lock, flags); |
731 | #ifdef ASSERT_PDIR_SANITY |
732 | sba_check_pdir(ioc,"Check before sba_map_single()" ); |
733 | #endif |
734 | |
735 | #ifdef SBA_COLLECT_STATS |
736 | ioc->msingle_calls++; |
737 | ioc->msingle_pages += size >> IOVP_SHIFT; |
738 | #endif |
739 | pide = sba_alloc_range(ioc, dev, size); |
740 | iovp = (dma_addr_t) pide << IOVP_SHIFT; |
741 | |
742 | DBG_RUN("%s() 0x%p -> 0x%lx\n" , |
743 | __func__, addr, (long) iovp | offset); |
744 | |
745 | pdir_start = &(ioc->pdir_base[pide]); |
746 | |
747 | while (size > 0) { |
748 | sba_io_pdir_entry(pdir_ptr: pdir_start, KERNEL_SPACE, vba: (unsigned long) addr, hint: 0); |
749 | |
750 | DBG_RUN(" pdir 0x%p %02x%02x%02x%02x%02x%02x%02x%02x\n" , |
751 | pdir_start, |
752 | (u8) (((u8 *) pdir_start)[7]), |
753 | (u8) (((u8 *) pdir_start)[6]), |
754 | (u8) (((u8 *) pdir_start)[5]), |
755 | (u8) (((u8 *) pdir_start)[4]), |
756 | (u8) (((u8 *) pdir_start)[3]), |
757 | (u8) (((u8 *) pdir_start)[2]), |
758 | (u8) (((u8 *) pdir_start)[1]), |
759 | (u8) (((u8 *) pdir_start)[0]) |
760 | ); |
761 | |
762 | addr += IOVP_SIZE; |
763 | size -= IOVP_SIZE; |
764 | pdir_start++; |
765 | } |
766 | |
767 | /* force FDC ops in io_pdir_entry() to be visible to IOMMU */ |
768 | asm_io_sync(); |
769 | |
770 | #ifdef ASSERT_PDIR_SANITY |
771 | sba_check_pdir(ioc,"Check after sba_map_single()" ); |
772 | #endif |
773 | spin_unlock_irqrestore(lock: &ioc->res_lock, flags); |
774 | |
775 | /* form complete address */ |
776 | return SBA_IOVA(ioc, iovp, offset, DEFAULT_DMA_HINT_REG); |
777 | } |
778 | |
779 | |
780 | static dma_addr_t |
781 | sba_map_page(struct device *dev, struct page *page, unsigned long offset, |
782 | size_t size, enum dma_data_direction direction, |
783 | unsigned long attrs) |
784 | { |
785 | return sba_map_single(dev, page_address(page) + offset, size, |
786 | direction); |
787 | } |
788 | |
789 | |
790 | /** |
791 | * sba_unmap_page - unmap one IOVA and free resources |
792 | * @dev: instance of PCI owned by the driver that's asking. |
793 | * @iova: IOVA of driver buffer previously mapped. |
794 | * @size: number of bytes mapped in driver buffer. |
795 | * @direction: R/W or both. |
796 | * @attrs: attributes |
797 | * |
798 | * See Documentation/core-api/dma-api-howto.rst |
799 | */ |
800 | static void |
801 | sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size, |
802 | enum dma_data_direction direction, unsigned long attrs) |
803 | { |
804 | struct ioc *ioc; |
805 | #if DELAYED_RESOURCE_CNT > 0 |
806 | struct sba_dma_pair *d; |
807 | #endif |
808 | unsigned long flags; |
809 | dma_addr_t offset; |
810 | |
811 | DBG_RUN("%s() iovp 0x%lx/%x\n" , __func__, (long) iova, size); |
812 | |
813 | ioc = GET_IOC(dev); |
814 | if (!ioc) { |
815 | WARN_ON(!ioc); |
816 | return; |
817 | } |
818 | offset = iova & ~IOVP_MASK; |
819 | iova ^= offset; /* clear offset bits */ |
820 | size += offset; |
821 | size = ALIGN(size, IOVP_SIZE); |
822 | |
823 | spin_lock_irqsave(&ioc->res_lock, flags); |
824 | |
825 | #ifdef SBA_COLLECT_STATS |
826 | ioc->usingle_calls++; |
827 | ioc->usingle_pages += size >> IOVP_SHIFT; |
828 | #endif |
829 | |
830 | sba_mark_invalid(ioc, iova, byte_cnt: size); |
831 | |
832 | #if DELAYED_RESOURCE_CNT > 0 |
833 | /* Delaying when we re-use a IO Pdir entry reduces the number |
834 | * of MMIO reads needed to flush writes to the PCOM register. |
835 | */ |
836 | d = &(ioc->saved[ioc->saved_cnt]); |
837 | d->iova = iova; |
838 | d->size = size; |
839 | if (++(ioc->saved_cnt) >= DELAYED_RESOURCE_CNT) { |
840 | int cnt = ioc->saved_cnt; |
841 | while (cnt--) { |
842 | sba_free_range(ioc, d->iova, d->size); |
843 | d--; |
844 | } |
845 | ioc->saved_cnt = 0; |
846 | |
847 | READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */ |
848 | } |
849 | #else /* DELAYED_RESOURCE_CNT == 0 */ |
850 | sba_free_range(ioc, iova, size); |
851 | |
852 | /* If fdc's were issued, force fdc's to be visible now */ |
853 | asm_io_sync(); |
854 | |
855 | READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */ |
856 | #endif /* DELAYED_RESOURCE_CNT == 0 */ |
857 | |
858 | spin_unlock_irqrestore(lock: &ioc->res_lock, flags); |
859 | |
860 | /* XXX REVISIT for 2.5 Linux - need syncdma for zero-copy support. |
861 | ** For Astro based systems this isn't a big deal WRT performance. |
862 | ** As long as 2.4 kernels copyin/copyout data from/to userspace, |
863 | ** we don't need the syncdma. The issue here is I/O MMU cachelines |
864 | ** are *not* coherent in all cases. May be hwrev dependent. |
865 | ** Need to investigate more. |
866 | asm volatile("syncdma"); |
867 | */ |
868 | } |
869 | |
870 | |
871 | /** |
872 | * sba_alloc - allocate/map shared mem for DMA |
873 | * @hwdev: instance of PCI owned by the driver that's asking. |
874 | * @size: number of bytes mapped in driver buffer. |
875 | * @dma_handle: IOVA of new buffer. |
876 | * @gfp: allocation flags |
877 | * @attrs: attributes |
878 | * |
879 | * See Documentation/core-api/dma-api-howto.rst |
880 | */ |
881 | static void *sba_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle, |
882 | gfp_t gfp, unsigned long attrs) |
883 | { |
884 | void *ret; |
885 | |
886 | if (!hwdev) { |
887 | /* only support PCI */ |
888 | *dma_handle = 0; |
889 | return NULL; |
890 | } |
891 | |
892 | ret = (void *) __get_free_pages(gfp_mask: gfp, order: get_order(size)); |
893 | |
894 | if (ret) { |
895 | memset(ret, 0, size); |
896 | *dma_handle = sba_map_single(dev: hwdev, addr: ret, size, direction: 0); |
897 | } |
898 | |
899 | return ret; |
900 | } |
901 | |
902 | |
903 | /** |
904 | * sba_free - free/unmap shared mem for DMA |
905 | * @hwdev: instance of PCI owned by the driver that's asking. |
906 | * @size: number of bytes mapped in driver buffer. |
907 | * @vaddr: virtual address IOVA of "consistent" buffer. |
908 | * @dma_handle: IO virtual address of "consistent" buffer. |
909 | * @attrs: attributes |
910 | * |
911 | * See Documentation/core-api/dma-api-howto.rst |
912 | */ |
913 | static void |
914 | sba_free(struct device *hwdev, size_t size, void *vaddr, |
915 | dma_addr_t dma_handle, unsigned long attrs) |
916 | { |
917 | sba_unmap_page(dev: hwdev, iova: dma_handle, size, direction: 0, attrs: 0); |
918 | free_pages(addr: (unsigned long) vaddr, order: get_order(size)); |
919 | } |
920 | |
921 | |
922 | /* |
923 | ** Since 0 is a valid pdir_base index value, can't use that |
924 | ** to determine if a value is valid or not. Use a flag to indicate |
925 | ** the SG list entry contains a valid pdir index. |
926 | */ |
927 | #define PIDE_FLAG 0x80000000UL |
928 | |
929 | #ifdef SBA_COLLECT_STATS |
930 | #define IOMMU_MAP_STATS |
931 | #endif |
932 | #include "iommu-helpers.h" |
933 | |
934 | #ifdef DEBUG_LARGE_SG_ENTRIES |
935 | int dump_run_sg = 0; |
936 | #endif |
937 | |
938 | |
939 | /** |
940 | * sba_map_sg - map Scatter/Gather list |
941 | * @dev: instance of PCI owned by the driver that's asking. |
942 | * @sglist: array of buffer/length pairs |
943 | * @nents: number of entries in list |
944 | * @direction: R/W or both. |
945 | * @attrs: attributes |
946 | * |
947 | * See Documentation/core-api/dma-api-howto.rst |
948 | */ |
949 | static int |
950 | sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, |
951 | enum dma_data_direction direction, unsigned long attrs) |
952 | { |
953 | struct ioc *ioc; |
954 | int filled = 0; |
955 | unsigned long flags; |
956 | |
957 | DBG_RUN_SG("%s() START %d entries\n" , __func__, nents); |
958 | |
959 | ioc = GET_IOC(dev); |
960 | if (!ioc) |
961 | return -EINVAL; |
962 | |
963 | /* Fast path single entry scatterlists. */ |
964 | if (nents == 1) { |
965 | sg_dma_address(sglist) = sba_map_single(dev, addr: sg_virt(sg: sglist), |
966 | size: sglist->length, direction); |
967 | sg_dma_len(sglist) = sglist->length; |
968 | return 1; |
969 | } |
970 | |
971 | spin_lock_irqsave(&ioc->res_lock, flags); |
972 | |
973 | #ifdef ASSERT_PDIR_SANITY |
974 | if (sba_check_pdir(ioc,"Check before sba_map_sg()" )) |
975 | { |
976 | sba_dump_sg(ioc, sglist, nents); |
977 | panic("Check before sba_map_sg()" ); |
978 | } |
979 | #endif |
980 | |
981 | #ifdef SBA_COLLECT_STATS |
982 | ioc->msg_calls++; |
983 | #endif |
984 | |
985 | /* |
986 | ** First coalesce the chunks and allocate I/O pdir space |
987 | ** |
988 | ** If this is one DMA stream, we can properly map using the |
989 | ** correct virtual address associated with each DMA page. |
990 | ** w/o this association, we wouldn't have coherent DMA! |
991 | ** Access to the virtual address is what forces a two pass algorithm. |
992 | */ |
993 | iommu_coalesce_chunks(ioc, dev, startsg: sglist, nents, iommu_alloc_range: sba_alloc_range); |
994 | |
995 | /* |
996 | ** Program the I/O Pdir |
997 | ** |
998 | ** map the virtual addresses to the I/O Pdir |
999 | ** o dma_address will contain the pdir index |
1000 | ** o dma_len will contain the number of bytes to map |
1001 | ** o address contains the virtual address. |
1002 | */ |
1003 | filled = iommu_fill_pdir(ioc, startsg: sglist, nents, hint: 0, iommu_io_pdir_entry: sba_io_pdir_entry); |
1004 | |
1005 | /* force FDC ops in io_pdir_entry() to be visible to IOMMU */ |
1006 | asm_io_sync(); |
1007 | |
1008 | #ifdef ASSERT_PDIR_SANITY |
1009 | if (sba_check_pdir(ioc,"Check after sba_map_sg()" )) |
1010 | { |
1011 | sba_dump_sg(ioc, sglist, nents); |
1012 | panic("Check after sba_map_sg()\n" ); |
1013 | } |
1014 | #endif |
1015 | |
1016 | spin_unlock_irqrestore(lock: &ioc->res_lock, flags); |
1017 | |
1018 | DBG_RUN_SG("%s() DONE %d mappings\n" , __func__, filled); |
1019 | |
1020 | return filled; |
1021 | } |
1022 | |
1023 | |
1024 | /** |
1025 | * sba_unmap_sg - unmap Scatter/Gather list |
1026 | * @dev: instance of PCI owned by the driver that's asking. |
1027 | * @sglist: array of buffer/length pairs |
1028 | * @nents: number of entries in list |
1029 | * @direction: R/W or both. |
1030 | * @attrs: attributes |
1031 | * |
1032 | * See Documentation/core-api/dma-api-howto.rst |
1033 | */ |
1034 | static void |
1035 | sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, |
1036 | enum dma_data_direction direction, unsigned long attrs) |
1037 | { |
1038 | struct ioc *ioc; |
1039 | #ifdef ASSERT_PDIR_SANITY |
1040 | unsigned long flags; |
1041 | #endif |
1042 | |
1043 | DBG_RUN_SG("%s() START %d entries, %p,%x\n" , |
1044 | __func__, nents, sg_virt(sglist), sglist->length); |
1045 | |
1046 | ioc = GET_IOC(dev); |
1047 | if (!ioc) { |
1048 | WARN_ON(!ioc); |
1049 | return; |
1050 | } |
1051 | |
1052 | #ifdef SBA_COLLECT_STATS |
1053 | ioc->usg_calls++; |
1054 | #endif |
1055 | |
1056 | #ifdef ASSERT_PDIR_SANITY |
1057 | spin_lock_irqsave(&ioc->res_lock, flags); |
1058 | sba_check_pdir(ioc,"Check before sba_unmap_sg()" ); |
1059 | spin_unlock_irqrestore(&ioc->res_lock, flags); |
1060 | #endif |
1061 | |
1062 | while (nents && sg_dma_len(sglist)) { |
1063 | |
1064 | sba_unmap_page(dev, sg_dma_address(sglist), sg_dma_len(sglist), |
1065 | direction, attrs: 0); |
1066 | #ifdef SBA_COLLECT_STATS |
1067 | ioc->usg_pages += ((sg_dma_address(sglist) & ~IOVP_MASK) + sg_dma_len(sglist) + IOVP_SIZE - 1) >> PAGE_SHIFT; |
1068 | ioc->usingle_calls--; /* kluge since call is unmap_sg() */ |
1069 | #endif |
1070 | ++sglist; |
1071 | nents--; |
1072 | } |
1073 | |
1074 | DBG_RUN_SG("%s() DONE (nents %d)\n" , __func__, nents); |
1075 | |
1076 | #ifdef ASSERT_PDIR_SANITY |
1077 | spin_lock_irqsave(&ioc->res_lock, flags); |
1078 | sba_check_pdir(ioc,"Check after sba_unmap_sg()" ); |
1079 | spin_unlock_irqrestore(&ioc->res_lock, flags); |
1080 | #endif |
1081 | |
1082 | } |
1083 | |
1084 | static const struct dma_map_ops sba_ops = { |
1085 | .dma_supported = sba_dma_supported, |
1086 | .alloc = sba_alloc, |
1087 | .free = sba_free, |
1088 | .map_page = sba_map_page, |
1089 | .unmap_page = sba_unmap_page, |
1090 | .map_sg = sba_map_sg, |
1091 | .unmap_sg = sba_unmap_sg, |
1092 | .get_sgtable = dma_common_get_sgtable, |
1093 | .alloc_pages = dma_common_alloc_pages, |
1094 | .free_pages = dma_common_free_pages, |
1095 | }; |
1096 | |
1097 | |
1098 | /************************************************************************** |
1099 | ** |
1100 | ** SBA PAT PDC support |
1101 | ** |
1102 | ** o call pdc_pat_cell_module() |
1103 | ** o store ranges in PCI "resource" structures |
1104 | ** |
1105 | **************************************************************************/ |
1106 | |
1107 | static void |
1108 | sba_get_pat_resources(struct sba_device *sba_dev) |
1109 | { |
1110 | #if 0 |
1111 | /* |
1112 | ** TODO/REVISIT/FIXME: support for directed ranges requires calls to |
1113 | ** PAT PDC to program the SBA/LBA directed range registers...this |
1114 | ** burden may fall on the LBA code since it directly supports the |
1115 | ** PCI subsystem. It's not clear yet. - ggg |
1116 | */ |
1117 | PAT_MOD(mod)->mod_info.mod_pages = PAT_GET_MOD_PAGES(temp); |
1118 | FIXME : ??? |
1119 | PAT_MOD(mod)->mod_info.dvi = PAT_GET_DVI(temp); |
1120 | Tells where the dvi bits are located in the address. |
1121 | PAT_MOD(mod)->mod_info.ioc = PAT_GET_IOC(temp); |
1122 | FIXME : ??? |
1123 | #endif |
1124 | } |
1125 | |
1126 | |
1127 | /************************************************************** |
1128 | * |
1129 | * Initialization and claim |
1130 | * |
1131 | ***************************************************************/ |
1132 | #define PIRANHA_ADDR_MASK 0x00160000UL /* bit 17,18,20 */ |
1133 | #define PIRANHA_ADDR_VAL 0x00060000UL /* bit 17,18 on */ |
1134 | static void * |
1135 | sba_alloc_pdir(unsigned int pdir_size) |
1136 | { |
1137 | unsigned long pdir_base; |
1138 | unsigned long pdir_order = get_order(size: pdir_size); |
1139 | |
1140 | pdir_base = __get_free_pages(GFP_KERNEL, order: pdir_order); |
1141 | if (NULL == (void *) pdir_base) { |
1142 | panic(fmt: "%s() could not allocate I/O Page Table\n" , |
1143 | __func__); |
1144 | } |
1145 | |
1146 | /* If this is not PA8700 (PCX-W2) |
1147 | ** OR newer than ver 2.2 |
1148 | ** OR in a system that doesn't need VINDEX bits from SBA, |
1149 | ** |
1150 | ** then we aren't exposed to the HW bug. |
1151 | */ |
1152 | if ( ((boot_cpu_data.pdc.cpuid >> 5) & 0x7f) != 0x13 |
1153 | || (boot_cpu_data.pdc.versions > 0x202) |
1154 | || (boot_cpu_data.pdc.capabilities & 0x08L) ) |
1155 | return (void *) pdir_base; |
1156 | |
1157 | /* |
1158 | * PA8700 (PCX-W2, aka piranha) silent data corruption fix |
1159 | * |
1160 | * An interaction between PA8700 CPU (Ver 2.2 or older) and |
1161 | * Ike/Astro can cause silent data corruption. This is only |
1162 | * a problem if the I/O PDIR is located in memory such that |
1163 | * (little-endian) bits 17 and 18 are on and bit 20 is off. |
1164 | * |
1165 | * Since the max IO Pdir size is 2MB, by cleverly allocating the |
1166 | * right physical address, we can either avoid (IOPDIR <= 1MB) |
1167 | * or minimize (2MB IO Pdir) the problem if we restrict the |
1168 | * IO Pdir to a maximum size of 2MB-128K (1902K). |
1169 | * |
1170 | * Because we always allocate 2^N sized IO pdirs, either of the |
1171 | * "bad" regions will be the last 128K if at all. That's easy |
1172 | * to test for. |
1173 | * |
1174 | */ |
1175 | if (pdir_order <= (19-12)) { |
1176 | if (((virt_to_phys(address: pdir_base)+pdir_size-1) & PIRANHA_ADDR_MASK) == PIRANHA_ADDR_VAL) { |
1177 | /* allocate a new one on 512k alignment */ |
1178 | unsigned long new_pdir = __get_free_pages(GFP_KERNEL, order: (19-12)); |
1179 | /* release original */ |
1180 | free_pages(addr: pdir_base, order: pdir_order); |
1181 | |
1182 | pdir_base = new_pdir; |
1183 | |
1184 | /* release excess */ |
1185 | while (pdir_order < (19-12)) { |
1186 | new_pdir += pdir_size; |
1187 | free_pages(addr: new_pdir, order: pdir_order); |
1188 | pdir_order +=1; |
1189 | pdir_size <<=1; |
1190 | } |
1191 | } |
1192 | } else { |
1193 | /* |
1194 | ** 1MB or 2MB Pdir |
1195 | ** Needs to be aligned on an "odd" 1MB boundary. |
1196 | */ |
1197 | unsigned long new_pdir = __get_free_pages(GFP_KERNEL, order: pdir_order+1); /* 2 or 4MB */ |
1198 | |
1199 | /* release original */ |
1200 | free_pages( addr: pdir_base, order: pdir_order); |
1201 | |
1202 | /* release first 1MB */ |
1203 | free_pages(addr: new_pdir, order: 20-12); |
1204 | |
1205 | pdir_base = new_pdir + 1024*1024; |
1206 | |
1207 | if (pdir_order > (20-12)) { |
1208 | /* |
1209 | ** 2MB Pdir. |
1210 | ** |
1211 | ** Flag tells init_bitmap() to mark bad 128k as used |
1212 | ** and to reduce the size by 128k. |
1213 | */ |
1214 | piranha_bad_128k = 1; |
1215 | |
1216 | new_pdir += 3*1024*1024; |
1217 | /* release last 1MB */ |
1218 | free_pages(addr: new_pdir, order: 20-12); |
1219 | |
1220 | /* release unusable 128KB */ |
1221 | free_pages(addr: new_pdir - 128*1024 , order: 17-12); |
1222 | |
1223 | pdir_size -= 128*1024; |
1224 | } |
1225 | } |
1226 | |
1227 | memset((void *) pdir_base, 0, pdir_size); |
1228 | return (void *) pdir_base; |
1229 | } |
1230 | |
1231 | struct ibase_data_struct { |
1232 | struct ioc *ioc; |
1233 | int ioc_num; |
1234 | }; |
1235 | |
1236 | static int setup_ibase_imask_callback(struct device *dev, void *data) |
1237 | { |
1238 | struct parisc_device *lba = to_parisc_device(dev); |
1239 | struct ibase_data_struct *ibd = data; |
1240 | int rope_num = (lba->hpa.start >> 13) & 0xf; |
1241 | if (rope_num >> 3 == ibd->ioc_num) |
1242 | lba_set_iregs(lba, ibd->ioc->ibase, ibd->ioc->imask); |
1243 | return 0; |
1244 | } |
1245 | |
1246 | /* setup Mercury or Elroy IBASE/IMASK registers. */ |
1247 | static void |
1248 | setup_ibase_imask(struct parisc_device *sba, struct ioc *ioc, int ioc_num) |
1249 | { |
1250 | struct ibase_data_struct ibase_data = { |
1251 | .ioc = ioc, |
1252 | .ioc_num = ioc_num, |
1253 | }; |
1254 | |
1255 | device_for_each_child(dev: &sba->dev, data: &ibase_data, |
1256 | fn: setup_ibase_imask_callback); |
1257 | } |
1258 | |
1259 | #ifdef SBA_AGP_SUPPORT |
1260 | static int |
1261 | sba_ioc_find_quicksilver(struct device *dev, void *data) |
1262 | { |
1263 | int *agp_found = data; |
1264 | struct parisc_device *lba = to_parisc_device(dev); |
1265 | |
1266 | if (IS_QUICKSILVER(lba)) |
1267 | *agp_found = 1; |
1268 | return 0; |
1269 | } |
1270 | #endif |
1271 | |
1272 | static void |
1273 | sba_ioc_init_pluto(struct parisc_device *sba, struct ioc *ioc, int ioc_num) |
1274 | { |
1275 | u32 iova_space_mask; |
1276 | u32 iova_space_size; |
1277 | int iov_order, tcnfg; |
1278 | #ifdef SBA_AGP_SUPPORT |
1279 | int agp_found = 0; |
1280 | #endif |
1281 | /* |
1282 | ** Firmware programs the base and size of a "safe IOVA space" |
1283 | ** (one that doesn't overlap memory or LMMIO space) in the |
1284 | ** IBASE and IMASK registers. |
1285 | */ |
1286 | ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE) & ~0x1fffffULL; |
1287 | iova_space_size = ~(READ_REG(ioc->ioc_hpa + IOC_IMASK) & 0xFFFFFFFFUL) + 1; |
1288 | |
1289 | if ((ioc->ibase < 0xfed00000UL) && ((ioc->ibase + iova_space_size) > 0xfee00000UL)) { |
1290 | printk("WARNING: IOV space overlaps local config and interrupt message, truncating\n" ); |
1291 | iova_space_size /= 2; |
1292 | } |
1293 | |
1294 | /* |
1295 | ** iov_order is always based on a 1GB IOVA space since we want to |
1296 | ** turn on the other half for AGP GART. |
1297 | */ |
1298 | iov_order = get_order(iova_space_size >> (IOVP_SHIFT - PAGE_SHIFT)); |
1299 | ioc->pdir_size = (iova_space_size / IOVP_SIZE) * sizeof(u64); |
1300 | |
1301 | DBG_INIT("%s() hpa 0x%p IOV %dMB (%d bits)\n" , |
1302 | __func__, ioc->ioc_hpa, iova_space_size >> 20, |
1303 | iov_order + PAGE_SHIFT); |
1304 | |
1305 | ioc->pdir_base = (void *) __get_free_pages(GFP_KERNEL, |
1306 | order: get_order(size: ioc->pdir_size)); |
1307 | if (!ioc->pdir_base) |
1308 | panic(fmt: "Couldn't allocate I/O Page Table\n" ); |
1309 | |
1310 | memset(ioc->pdir_base, 0, ioc->pdir_size); |
1311 | |
1312 | DBG_INIT("%s() pdir %p size %x\n" , |
1313 | __func__, ioc->pdir_base, ioc->pdir_size); |
1314 | |
1315 | #ifdef SBA_HINT_SUPPORT |
1316 | ioc->hint_shift_pdir = iov_order + PAGE_SHIFT; |
1317 | ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT)); |
1318 | |
1319 | DBG_INIT(" hint_shift_pdir %x hint_mask_pdir %lx\n" , |
1320 | ioc->hint_shift_pdir, ioc->hint_mask_pdir); |
1321 | #endif |
1322 | |
1323 | WARN_ON((((unsigned long) ioc->pdir_base) & PAGE_MASK) != (unsigned long) ioc->pdir_base); |
1324 | WRITE_REG(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE); |
1325 | |
1326 | /* build IMASK for IOC and Elroy */ |
1327 | iova_space_mask = 0xffffffff; |
1328 | iova_space_mask <<= (iov_order + PAGE_SHIFT); |
1329 | ioc->imask = iova_space_mask; |
1330 | #ifdef ZX1_SUPPORT |
1331 | ioc->iovp_mask = ~(iova_space_mask + PAGE_SIZE - 1); |
1332 | #endif |
1333 | sba_dump_tlb(ioc->ioc_hpa); |
1334 | |
1335 | setup_ibase_imask(sba, ioc, ioc_num); |
1336 | |
1337 | WRITE_REG(ioc->imask, ioc->ioc_hpa + IOC_IMASK); |
1338 | |
1339 | #ifdef CONFIG_64BIT |
1340 | /* |
1341 | ** Setting the upper bits makes checking for bypass addresses |
1342 | ** a little faster later on. |
1343 | */ |
1344 | ioc->imask |= 0xFFFFFFFF00000000UL; |
1345 | #endif |
1346 | |
1347 | /* Set I/O PDIR Page size to system page size */ |
1348 | switch (PAGE_SHIFT) { |
1349 | case 12: tcnfg = 0; break; /* 4K */ |
1350 | case 13: tcnfg = 1; break; /* 8K */ |
1351 | case 14: tcnfg = 2; break; /* 16K */ |
1352 | case 16: tcnfg = 3; break; /* 64K */ |
1353 | default: |
1354 | panic(__FILE__ "Unsupported system page size %d" , |
1355 | 1 << PAGE_SHIFT); |
1356 | break; |
1357 | } |
1358 | WRITE_REG(tcnfg, ioc->ioc_hpa + IOC_TCNFG); |
1359 | |
1360 | /* |
1361 | ** Program the IOC's ibase and enable IOVA translation |
1362 | ** Bit zero == enable bit. |
1363 | */ |
1364 | WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa + IOC_IBASE); |
1365 | |
1366 | /* |
1367 | ** Clear I/O TLB of any possible entries. |
1368 | ** (Yes. This is a bit paranoid...but so what) |
1369 | */ |
1370 | WRITE_REG(ioc->ibase | 31, ioc->ioc_hpa + IOC_PCOM); |
1371 | |
1372 | #ifdef SBA_AGP_SUPPORT |
1373 | |
1374 | /* |
1375 | ** If an AGP device is present, only use half of the IOV space |
1376 | ** for PCI DMA. Unfortunately we can't know ahead of time |
1377 | ** whether GART support will actually be used, for now we |
1378 | ** can just key on any AGP device found in the system. |
1379 | ** We program the next pdir index after we stop w/ a key for |
1380 | ** the GART code to handshake on. |
1381 | */ |
1382 | device_for_each_child(&sba->dev, &agp_found, sba_ioc_find_quicksilver); |
1383 | |
1384 | if (agp_found && sba_reserve_agpgart) { |
1385 | printk(KERN_INFO "%s: reserving %dMb of IOVA space for agpgart\n" , |
1386 | __func__, (iova_space_size/2) >> 20); |
1387 | ioc->pdir_size /= 2; |
1388 | ioc->pdir_base[PDIR_INDEX(iova_space_size/2)] = SBA_AGPGART_COOKIE; |
1389 | } |
1390 | #endif /*SBA_AGP_SUPPORT*/ |
1391 | } |
1392 | |
1393 | static void |
1394 | sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num) |
1395 | { |
1396 | u32 iova_space_size, iova_space_mask; |
1397 | unsigned int pdir_size, iov_order, tcnfg; |
1398 | |
1399 | /* |
1400 | ** Determine IOVA Space size from memory size. |
1401 | ** |
1402 | ** Ideally, PCI drivers would register the maximum number |
1403 | ** of DMA they can have outstanding for each device they |
1404 | ** own. Next best thing would be to guess how much DMA |
1405 | ** can be outstanding based on PCI Class/sub-class. Both |
1406 | ** methods still require some "extra" to support PCI |
1407 | ** Hot-Plug/Removal of PCI cards. (aka PCI OLARD). |
1408 | ** |
1409 | ** While we have 32-bits "IOVA" space, top two 2 bits are used |
1410 | ** for DMA hints - ergo only 30 bits max. |
1411 | */ |
1412 | |
1413 | iova_space_size = (u32) (totalram_pages()/global_ioc_cnt); |
1414 | |
1415 | /* limit IOVA space size to 1MB-1GB */ |
1416 | if (iova_space_size < (1 << (20 - PAGE_SHIFT))) { |
1417 | iova_space_size = 1 << (20 - PAGE_SHIFT); |
1418 | } |
1419 | else if (iova_space_size > (1 << (30 - PAGE_SHIFT))) { |
1420 | iova_space_size = 1 << (30 - PAGE_SHIFT); |
1421 | } |
1422 | |
1423 | /* |
1424 | ** iova space must be log2() in size. |
1425 | ** thus, pdir/res_map will also be log2(). |
1426 | ** PIRANHA BUG: Exception is when IO Pdir is 2MB (gets reduced) |
1427 | */ |
1428 | iov_order = get_order(size: iova_space_size << PAGE_SHIFT); |
1429 | |
1430 | /* iova_space_size is now bytes, not pages */ |
1431 | iova_space_size = 1 << (iov_order + PAGE_SHIFT); |
1432 | |
1433 | ioc->pdir_size = pdir_size = (iova_space_size/IOVP_SIZE) * sizeof(u64); |
1434 | |
1435 | DBG_INIT("%s() hpa %px mem %ldMB IOV %dMB (%d bits)\n" , |
1436 | __func__, |
1437 | ioc->ioc_hpa, |
1438 | (unsigned long) totalram_pages() >> (20 - PAGE_SHIFT), |
1439 | iova_space_size>>20, |
1440 | iov_order + PAGE_SHIFT); |
1441 | |
1442 | ioc->pdir_base = sba_alloc_pdir(pdir_size); |
1443 | |
1444 | DBG_INIT("%s() pdir %p size %x\n" , |
1445 | __func__, ioc->pdir_base, pdir_size); |
1446 | |
1447 | #ifdef SBA_HINT_SUPPORT |
1448 | /* FIXME : DMA HINTs not used */ |
1449 | ioc->hint_shift_pdir = iov_order + PAGE_SHIFT; |
1450 | ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT)); |
1451 | |
1452 | DBG_INIT(" hint_shift_pdir %x hint_mask_pdir %lx\n" , |
1453 | ioc->hint_shift_pdir, ioc->hint_mask_pdir); |
1454 | #endif |
1455 | |
1456 | WRITE_REG64(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE); |
1457 | |
1458 | /* build IMASK for IOC and Elroy */ |
1459 | iova_space_mask = 0xffffffff; |
1460 | iova_space_mask <<= (iov_order + PAGE_SHIFT); |
1461 | |
1462 | /* |
1463 | ** On C3000 w/512MB mem, HP-UX 10.20 reports: |
1464 | ** ibase=0, imask=0xFE000000, size=0x2000000. |
1465 | */ |
1466 | ioc->ibase = 0; |
1467 | ioc->imask = iova_space_mask; /* save it */ |
1468 | #ifdef ZX1_SUPPORT |
1469 | ioc->iovp_mask = ~(iova_space_mask + PAGE_SIZE - 1); |
1470 | #endif |
1471 | |
1472 | DBG_INIT("%s() IOV base %#lx mask %#0lx\n" , |
1473 | __func__, ioc->ibase, ioc->imask); |
1474 | |
1475 | /* |
1476 | ** FIXME: Hint registers are programmed with default hint |
1477 | ** values during boot, so hints should be sane even if we |
1478 | ** can't reprogram them the way drivers want. |
1479 | */ |
1480 | |
1481 | setup_ibase_imask(sba, ioc, ioc_num); |
1482 | |
1483 | /* |
1484 | ** Program the IOC's ibase and enable IOVA translation |
1485 | */ |
1486 | WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa+IOC_IBASE); |
1487 | WRITE_REG(ioc->imask, ioc->ioc_hpa+IOC_IMASK); |
1488 | |
1489 | /* Set I/O PDIR Page size to system page size */ |
1490 | switch (PAGE_SHIFT) { |
1491 | case 12: tcnfg = 0; break; /* 4K */ |
1492 | case 13: tcnfg = 1; break; /* 8K */ |
1493 | case 14: tcnfg = 2; break; /* 16K */ |
1494 | case 16: tcnfg = 3; break; /* 64K */ |
1495 | default: |
1496 | panic(__FILE__ "Unsupported system page size %d" , |
1497 | 1 << PAGE_SHIFT); |
1498 | break; |
1499 | } |
1500 | /* Set I/O PDIR Page size to PAGE_SIZE (4k/16k/...) */ |
1501 | WRITE_REG(tcnfg, ioc->ioc_hpa+IOC_TCNFG); |
1502 | |
1503 | /* |
1504 | ** Clear I/O TLB of any possible entries. |
1505 | ** (Yes. This is a bit paranoid...but so what) |
1506 | */ |
1507 | WRITE_REG(0 | 31, ioc->ioc_hpa+IOC_PCOM); |
1508 | |
1509 | ioc->ibase = 0; /* used by SBA_IOVA and related macros */ |
1510 | |
1511 | DBG_INIT("%s() DONE\n" , __func__); |
1512 | } |
1513 | |
1514 | |
1515 | |
1516 | /************************************************************************** |
1517 | ** |
1518 | ** SBA initialization code (HW and SW) |
1519 | ** |
1520 | ** o identify SBA chip itself |
1521 | ** o initialize SBA chip modes (HardFail) |
1522 | ** o initialize SBA chip modes (HardFail) |
1523 | ** o FIXME: initialize DMA hints for reasonable defaults |
1524 | ** |
1525 | **************************************************************************/ |
1526 | |
1527 | static void __iomem *ioc_remap(struct sba_device *sba_dev, unsigned int offset) |
1528 | { |
1529 | return ioremap(sba_dev->dev->hpa.start + offset, SBA_FUNC_SIZE); |
1530 | } |
1531 | |
1532 | static void sba_hw_init(struct sba_device *sba_dev) |
1533 | { |
1534 | int i; |
1535 | int num_ioc; |
1536 | u64 ioc_ctl; |
1537 | |
1538 | if (!is_pdc_pat()) { |
1539 | /* Shutdown the USB controller on Astro-based workstations. |
1540 | ** Once we reprogram the IOMMU, the next DMA performed by |
1541 | ** USB will HPMC the box. USB is only enabled if a |
1542 | ** keyboard is present and found. |
1543 | ** |
1544 | ** With serial console, j6k v5.0 firmware says: |
1545 | ** mem_kbd hpa 0xfee003f8 sba 0x0 pad 0x0 cl_class 0x7 |
1546 | ** |
1547 | ** FIXME: Using GFX+USB console at power up but direct |
1548 | ** linux to serial console is still broken. |
1549 | ** USB could generate DMA so we must reset USB. |
1550 | ** The proper sequence would be: |
1551 | ** o block console output |
1552 | ** o reset USB device |
1553 | ** o reprogram serial port |
1554 | ** o unblock console output |
1555 | */ |
1556 | if (PAGE0->mem_kbd.cl_class == CL_KEYBD) { |
1557 | pdc_io_reset_devices(); |
1558 | } |
1559 | |
1560 | } |
1561 | |
1562 | |
1563 | #if 0 |
1564 | printk("sba_hw_init(): mem_boot 0x%x 0x%x 0x%x 0x%x\n" , PAGE0->mem_boot.hpa, |
1565 | PAGE0->mem_boot.spa, PAGE0->mem_boot.pad, PAGE0->mem_boot.cl_class); |
1566 | |
1567 | /* |
1568 | ** Need to deal with DMA from LAN. |
1569 | ** Maybe use page zero boot device as a handle to talk |
1570 | ** to PDC about which device to shutdown. |
1571 | ** |
1572 | ** Netbooting, j6k v5.0 firmware says: |
1573 | ** mem_boot hpa 0xf4008000 sba 0x0 pad 0x0 cl_class 0x1002 |
1574 | ** ARGH! invalid class. |
1575 | */ |
1576 | if ((PAGE0->mem_boot.cl_class != CL_RANDOM) |
1577 | && (PAGE0->mem_boot.cl_class != CL_SEQU)) { |
1578 | pdc_io_reset(); |
1579 | } |
1580 | #endif |
1581 | |
1582 | if (!IS_PLUTO(sba_dev->dev)) { |
1583 | ioc_ctl = READ_REG(sba_dev->sba_hpa+IOC_CTRL); |
1584 | DBG_INIT("%s() hpa %px ioc_ctl 0x%Lx ->" , |
1585 | __func__, sba_dev->sba_hpa, ioc_ctl); |
1586 | ioc_ctl &= ~(IOC_CTRL_RM | IOC_CTRL_NC | IOC_CTRL_CE); |
1587 | ioc_ctl |= IOC_CTRL_DD | IOC_CTRL_D4 | IOC_CTRL_TC; |
1588 | /* j6700 v1.6 firmware sets 0x294f */ |
1589 | /* A500 firmware sets 0x4d */ |
1590 | |
1591 | WRITE_REG(ioc_ctl, sba_dev->sba_hpa+IOC_CTRL); |
1592 | |
1593 | #ifdef DEBUG_SBA_INIT |
1594 | ioc_ctl = READ_REG64(sba_dev->sba_hpa+IOC_CTRL); |
1595 | DBG_INIT(" 0x%Lx\n" , ioc_ctl); |
1596 | #endif |
1597 | } /* if !PLUTO */ |
1598 | |
1599 | if (IS_ASTRO(sba_dev->dev)) { |
1600 | int err; |
1601 | sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, ASTRO_IOC_OFFSET); |
1602 | num_ioc = 1; |
1603 | |
1604 | sba_dev->chip_resv.name = "Astro Intr Ack" ; |
1605 | sba_dev->chip_resv.start = PCI_F_EXTEND | 0xfef00000UL; |
1606 | sba_dev->chip_resv.end = PCI_F_EXTEND | (0xff000000UL - 1) ; |
1607 | err = request_resource(root: &iomem_resource, new: &(sba_dev->chip_resv)); |
1608 | BUG_ON(err < 0); |
1609 | |
1610 | } else if (IS_PLUTO(sba_dev->dev)) { |
1611 | int err; |
1612 | |
1613 | sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, PLUTO_IOC_OFFSET); |
1614 | num_ioc = 1; |
1615 | |
1616 | sba_dev->chip_resv.name = "Pluto Intr/PIOP/VGA" ; |
1617 | sba_dev->chip_resv.start = PCI_F_EXTEND | 0xfee00000UL; |
1618 | sba_dev->chip_resv.end = PCI_F_EXTEND | (0xff200000UL - 1); |
1619 | err = request_resource(root: &iomem_resource, new: &(sba_dev->chip_resv)); |
1620 | WARN_ON(err < 0); |
1621 | |
1622 | sba_dev->iommu_resv.name = "IOVA Space" ; |
1623 | sba_dev->iommu_resv.start = 0x40000000UL; |
1624 | sba_dev->iommu_resv.end = 0x50000000UL - 1; |
1625 | err = request_resource(root: &iomem_resource, new: &(sba_dev->iommu_resv)); |
1626 | WARN_ON(err < 0); |
1627 | } else { |
1628 | /* IKE, REO */ |
1629 | sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, offset: IKE_IOC_OFFSET(0)); |
1630 | sba_dev->ioc[1].ioc_hpa = ioc_remap(sba_dev, offset: IKE_IOC_OFFSET(1)); |
1631 | num_ioc = 2; |
1632 | |
1633 | /* TODO - LOOKUP Ike/Stretch chipset mem map */ |
1634 | } |
1635 | /* XXX: What about Reo Grande? */ |
1636 | |
1637 | sba_dev->num_ioc = num_ioc; |
1638 | for (i = 0; i < num_ioc; i++) { |
1639 | void __iomem *ioc_hpa = sba_dev->ioc[i].ioc_hpa; |
1640 | unsigned int j; |
1641 | |
1642 | for (j=0; j < sizeof(u64) * ROPES_PER_IOC; j+=sizeof(u64)) { |
1643 | |
1644 | /* |
1645 | * Clear ROPE(N)_CONFIG AO bit. |
1646 | * Disables "NT Ordering" (~= !"Relaxed Ordering") |
1647 | * Overrides bit 1 in DMA Hint Sets. |
1648 | * Improves netperf UDP_STREAM by ~10% for bcm5701. |
1649 | */ |
1650 | if (IS_PLUTO(sba_dev->dev)) { |
1651 | void __iomem *rope_cfg; |
1652 | unsigned long cfg_val; |
1653 | |
1654 | rope_cfg = ioc_hpa + IOC_ROPE0_CFG + j; |
1655 | cfg_val = READ_REG(rope_cfg); |
1656 | cfg_val &= ~IOC_ROPE_AO; |
1657 | WRITE_REG(cfg_val, rope_cfg); |
1658 | } |
1659 | |
1660 | /* |
1661 | ** Make sure the box crashes on rope errors. |
1662 | */ |
1663 | WRITE_REG(HF_ENABLE, ioc_hpa + ROPE0_CTL + j); |
1664 | } |
1665 | |
1666 | /* flush out the last writes */ |
1667 | READ_REG(sba_dev->ioc[i].ioc_hpa + ROPE7_CTL); |
1668 | |
1669 | DBG_INIT(" ioc[%d] ROPE_CFG %#lx ROPE_DBG %lx\n" , |
1670 | i, |
1671 | (unsigned long) READ_REG(sba_dev->ioc[i].ioc_hpa + 0x40), |
1672 | (unsigned long) READ_REG(sba_dev->ioc[i].ioc_hpa + 0x50) |
1673 | ); |
1674 | DBG_INIT(" STATUS_CONTROL %#lx FLUSH_CTRL %#lx\n" , |
1675 | (unsigned long) READ_REG(sba_dev->ioc[i].ioc_hpa + 0x108), |
1676 | (unsigned long) READ_REG(sba_dev->ioc[i].ioc_hpa + 0x400) |
1677 | ); |
1678 | |
1679 | if (IS_PLUTO(sba_dev->dev)) { |
1680 | sba_ioc_init_pluto(sba: sba_dev->dev, ioc: &(sba_dev->ioc[i]), ioc_num: i); |
1681 | } else { |
1682 | sba_ioc_init(sba: sba_dev->dev, ioc: &(sba_dev->ioc[i]), ioc_num: i); |
1683 | } |
1684 | } |
1685 | } |
1686 | |
1687 | static void |
1688 | sba_common_init(struct sba_device *sba_dev) |
1689 | { |
1690 | int i; |
1691 | |
1692 | /* add this one to the head of the list (order doesn't matter) |
1693 | ** This will be useful for debugging - especially if we get coredumps |
1694 | */ |
1695 | sba_dev->next = sba_list; |
1696 | sba_list = sba_dev; |
1697 | |
1698 | for(i=0; i< sba_dev->num_ioc; i++) { |
1699 | int res_size; |
1700 | #ifdef DEBUG_DMB_TRAP |
1701 | extern void iterate_pages(unsigned long , unsigned long , |
1702 | void (*)(pte_t * , unsigned long), |
1703 | unsigned long ); |
1704 | void set_data_memory_break(pte_t * , unsigned long); |
1705 | #endif |
1706 | /* resource map size dictated by pdir_size */ |
1707 | res_size = sba_dev->ioc[i].pdir_size/sizeof(u64); /* entries */ |
1708 | |
1709 | /* Second part of PIRANHA BUG */ |
1710 | if (piranha_bad_128k) { |
1711 | res_size -= (128*1024)/sizeof(u64); |
1712 | } |
1713 | |
1714 | res_size >>= 3; /* convert bit count to byte count */ |
1715 | DBG_INIT("%s() res_size 0x%x\n" , |
1716 | __func__, res_size); |
1717 | |
1718 | sba_dev->ioc[i].res_size = res_size; |
1719 | sba_dev->ioc[i].res_map = (char *) __get_free_pages(GFP_KERNEL, order: get_order(size: res_size)); |
1720 | |
1721 | #ifdef DEBUG_DMB_TRAP |
1722 | iterate_pages( sba_dev->ioc[i].res_map, res_size, |
1723 | set_data_memory_break, 0); |
1724 | #endif |
1725 | |
1726 | if (NULL == sba_dev->ioc[i].res_map) |
1727 | { |
1728 | panic(fmt: "%s:%s() could not allocate resource map\n" , |
1729 | __FILE__, __func__ ); |
1730 | } |
1731 | |
1732 | memset(sba_dev->ioc[i].res_map, 0, res_size); |
1733 | /* next available IOVP - circular search */ |
1734 | sba_dev->ioc[i].res_hint = (unsigned long *) |
1735 | &(sba_dev->ioc[i].res_map[L1_CACHE_BYTES]); |
1736 | |
1737 | #ifdef ASSERT_PDIR_SANITY |
1738 | /* Mark first bit busy - ie no IOVA 0 */ |
1739 | sba_dev->ioc[i].res_map[0] = 0x80; |
1740 | sba_dev->ioc[i].pdir_base[0] = (__force __le64) 0xeeffc0addbba0080ULL; |
1741 | #endif |
1742 | |
1743 | /* Third (and last) part of PIRANHA BUG */ |
1744 | if (piranha_bad_128k) { |
1745 | /* region from +1408K to +1536 is un-usable. */ |
1746 | |
1747 | int idx_start = (1408*1024/sizeof(u64)) >> 3; |
1748 | int idx_end = (1536*1024/sizeof(u64)) >> 3; |
1749 | long *p_start = (long *) &(sba_dev->ioc[i].res_map[idx_start]); |
1750 | long *p_end = (long *) &(sba_dev->ioc[i].res_map[idx_end]); |
1751 | |
1752 | /* mark that part of the io pdir busy */ |
1753 | while (p_start < p_end) |
1754 | *p_start++ = -1; |
1755 | |
1756 | } |
1757 | |
1758 | #ifdef DEBUG_DMB_TRAP |
1759 | iterate_pages( sba_dev->ioc[i].res_map, res_size, |
1760 | set_data_memory_break, 0); |
1761 | iterate_pages( sba_dev->ioc[i].pdir_base, sba_dev->ioc[i].pdir_size, |
1762 | set_data_memory_break, 0); |
1763 | #endif |
1764 | |
1765 | DBG_INIT("%s() %d res_map %x %p\n" , |
1766 | __func__, i, res_size, sba_dev->ioc[i].res_map); |
1767 | } |
1768 | |
1769 | spin_lock_init(&sba_dev->sba_lock); |
1770 | ioc_needs_fdc = boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC; |
1771 | |
1772 | #ifdef DEBUG_SBA_INIT |
1773 | /* |
1774 | * If the PDC_MODEL capabilities has Non-coherent IO-PDIR bit set |
1775 | * (bit #61, big endian), we have to flush and sync every time |
1776 | * IO-PDIR is changed in Ike/Astro. |
1777 | */ |
1778 | if (ioc_needs_fdc) { |
1779 | printk(KERN_INFO MODULE_NAME " FDC/SYNC required.\n" ); |
1780 | } else { |
1781 | printk(KERN_INFO MODULE_NAME " IOC has cache coherent PDIR.\n" ); |
1782 | } |
1783 | #endif |
1784 | } |
1785 | |
1786 | #ifdef CONFIG_PROC_FS |
1787 | static int sba_proc_info(struct seq_file *m, void *p) |
1788 | { |
1789 | struct sba_device *sba_dev = sba_list; |
1790 | struct ioc *ioc = &sba_dev->ioc[0]; /* FIXME: Multi-IOC support! */ |
1791 | int total_pages = (int) (ioc->res_size << 3); /* 8 bits per byte */ |
1792 | #ifdef SBA_COLLECT_STATS |
1793 | unsigned long avg = 0, min, max; |
1794 | #endif |
1795 | int i; |
1796 | |
1797 | seq_printf(m, fmt: "%s rev %d.%d\n" , |
1798 | sba_dev->name, |
1799 | (sba_dev->hw_rev & 0x7) + 1, |
1800 | (sba_dev->hw_rev & 0x18) >> 3); |
1801 | seq_printf(m, fmt: "IO PDIR size : %d bytes (%d entries)\n" , |
1802 | (int)((ioc->res_size << 3) * sizeof(u64)), /* 8 bits/byte */ |
1803 | total_pages); |
1804 | |
1805 | seq_printf(m, fmt: "Resource bitmap : %d bytes (%d pages)\n" , |
1806 | ioc->res_size, ioc->res_size << 3); /* 8 bits per byte */ |
1807 | |
1808 | seq_printf(m, "LMMIO_BASE/MASK/ROUTE %08x %08x %08x\n" , |
1809 | READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_BASE), |
1810 | READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_MASK), |
1811 | READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_ROUTE)); |
1812 | |
1813 | for (i=0; i<4; i++) |
1814 | seq_printf(m, "DIR%d_BASE/MASK/ROUTE %08x %08x %08x\n" , |
1815 | i, |
1816 | READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_BASE + i*0x18), |
1817 | READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_MASK + i*0x18), |
1818 | READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_ROUTE + i*0x18)); |
1819 | |
1820 | #ifdef SBA_COLLECT_STATS |
1821 | seq_printf(m, "IO PDIR entries : %ld free %ld used (%d%%)\n" , |
1822 | total_pages - ioc->used_pages, ioc->used_pages, |
1823 | (int)(ioc->used_pages * 100 / total_pages)); |
1824 | |
1825 | min = max = ioc->avg_search[0]; |
1826 | for (i = 0; i < SBA_SEARCH_SAMPLE; i++) { |
1827 | avg += ioc->avg_search[i]; |
1828 | if (ioc->avg_search[i] > max) max = ioc->avg_search[i]; |
1829 | if (ioc->avg_search[i] < min) min = ioc->avg_search[i]; |
1830 | } |
1831 | avg /= SBA_SEARCH_SAMPLE; |
1832 | seq_printf(m, " Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles)\n" , |
1833 | min, avg, max); |
1834 | |
1835 | seq_printf(m, "pci_map_single(): %12ld calls %12ld pages (avg %d/1000)\n" , |
1836 | ioc->msingle_calls, ioc->msingle_pages, |
1837 | (int)((ioc->msingle_pages * 1000)/ioc->msingle_calls)); |
1838 | |
1839 | /* KLUGE - unmap_sg calls unmap_single for each mapped page */ |
1840 | min = ioc->usingle_calls; |
1841 | max = ioc->usingle_pages - ioc->usg_pages; |
1842 | seq_printf(m, "pci_unmap_single: %12ld calls %12ld pages (avg %d/1000)\n" , |
1843 | min, max, (int)((max * 1000)/min)); |
1844 | |
1845 | seq_printf(m, "pci_map_sg() : %12ld calls %12ld pages (avg %d/1000)\n" , |
1846 | ioc->msg_calls, ioc->msg_pages, |
1847 | (int)((ioc->msg_pages * 1000)/ioc->msg_calls)); |
1848 | |
1849 | seq_printf(m, "pci_unmap_sg() : %12ld calls %12ld pages (avg %d/1000)\n" , |
1850 | ioc->usg_calls, ioc->usg_pages, |
1851 | (int)((ioc->usg_pages * 1000)/ioc->usg_calls)); |
1852 | #endif |
1853 | |
1854 | return 0; |
1855 | } |
1856 | |
1857 | static int |
1858 | sba_proc_bitmap_info(struct seq_file *m, void *p) |
1859 | { |
1860 | struct sba_device *sba_dev = sba_list; |
1861 | struct ioc *ioc = &sba_dev->ioc[0]; /* FIXME: Multi-IOC support! */ |
1862 | |
1863 | seq_hex_dump(m, prefix_str: " " , prefix_type: DUMP_PREFIX_NONE, rowsize: 32, groupsize: 4, buf: ioc->res_map, |
1864 | len: ioc->res_size, ascii: false); |
1865 | seq_putc(m, c: '\n'); |
1866 | |
1867 | return 0; |
1868 | } |
1869 | #endif /* CONFIG_PROC_FS */ |
1870 | |
1871 | static const struct parisc_device_id sba_tbl[] __initconst = { |
1872 | { HPHW_IOA, HVERSION_REV_ANY_ID, ASTRO_RUNWAY_PORT, 0xb }, |
1873 | { HPHW_BCPORT, HVERSION_REV_ANY_ID, IKE_MERCED_PORT, 0xc }, |
1874 | { HPHW_BCPORT, HVERSION_REV_ANY_ID, REO_MERCED_PORT, 0xc }, |
1875 | { HPHW_BCPORT, HVERSION_REV_ANY_ID, REOG_MERCED_PORT, 0xc }, |
1876 | { HPHW_IOA, HVERSION_REV_ANY_ID, PLUTO_MCKINLEY_PORT, 0xc }, |
1877 | { 0, } |
1878 | }; |
1879 | |
1880 | static int sba_driver_callback(struct parisc_device *); |
1881 | |
1882 | static struct parisc_driver sba_driver __refdata = { |
1883 | .name = MODULE_NAME, |
1884 | .id_table = sba_tbl, |
1885 | .probe = sba_driver_callback, |
1886 | }; |
1887 | |
1888 | /* |
1889 | ** Determine if sba should claim this chip (return 0) or not (return 1). |
1890 | ** If so, initialize the chip and tell other partners in crime they |
1891 | ** have work to do. |
1892 | */ |
1893 | static int __init sba_driver_callback(struct parisc_device *dev) |
1894 | { |
1895 | struct sba_device *sba_dev; |
1896 | u32 func_class; |
1897 | int i; |
1898 | char *version; |
1899 | void __iomem *sba_addr = ioremap(dev->hpa.start, SBA_FUNC_SIZE); |
1900 | struct proc_dir_entry *root __maybe_unused; |
1901 | |
1902 | sba_dump_ranges(sba_addr); |
1903 | |
1904 | /* Read HW Rev First */ |
1905 | func_class = READ_REG(sba_addr + SBA_FCLASS); |
1906 | |
1907 | if (IS_ASTRO(dev)) { |
1908 | unsigned long fclass; |
1909 | static char astro_rev[]="Astro ?.?" ; |
1910 | |
1911 | /* Astro is broken...Read HW Rev First */ |
1912 | fclass = READ_REG(sba_addr); |
1913 | |
1914 | astro_rev[6] = '1' + (char) (fclass & 0x7); |
1915 | astro_rev[8] = '0' + (char) ((fclass & 0x18) >> 3); |
1916 | version = astro_rev; |
1917 | |
1918 | } else if (IS_IKE(dev)) { |
1919 | static char ike_rev[] = "Ike rev ?" ; |
1920 | ike_rev[8] = '0' + (char) (func_class & 0xff); |
1921 | version = ike_rev; |
1922 | } else if (IS_PLUTO(dev)) { |
1923 | static char pluto_rev[]="Pluto ?.?" ; |
1924 | pluto_rev[6] = '0' + (char) ((func_class & 0xf0) >> 4); |
1925 | pluto_rev[8] = '0' + (char) (func_class & 0x0f); |
1926 | version = pluto_rev; |
1927 | } else { |
1928 | static char reo_rev[] = "REO rev ?" ; |
1929 | reo_rev[8] = '0' + (char) (func_class & 0xff); |
1930 | version = reo_rev; |
1931 | } |
1932 | |
1933 | if (!global_ioc_cnt) { |
1934 | global_ioc_cnt = count_parisc_driver(&sba_driver); |
1935 | |
1936 | /* Astro and Pluto have one IOC per SBA */ |
1937 | if ((!IS_ASTRO(dev)) || (!IS_PLUTO(dev))) |
1938 | global_ioc_cnt *= 2; |
1939 | } |
1940 | |
1941 | printk(KERN_INFO "%s found %s at 0x%llx\n" , |
1942 | MODULE_NAME, version, (unsigned long long)dev->hpa.start); |
1943 | |
1944 | sba_dev = kzalloc(sizeof(struct sba_device), GFP_KERNEL); |
1945 | if (!sba_dev) { |
1946 | printk(KERN_ERR MODULE_NAME " - couldn't alloc sba_device\n" ); |
1947 | return -ENOMEM; |
1948 | } |
1949 | |
1950 | parisc_set_drvdata(dev, sba_dev); |
1951 | |
1952 | for(i=0; i<MAX_IOC; i++) |
1953 | spin_lock_init(&(sba_dev->ioc[i].res_lock)); |
1954 | |
1955 | sba_dev->dev = dev; |
1956 | sba_dev->hw_rev = func_class; |
1957 | sba_dev->name = dev->name; |
1958 | sba_dev->sba_hpa = sba_addr; |
1959 | |
1960 | sba_get_pat_resources(sba_dev); |
1961 | sba_hw_init(sba_dev); |
1962 | sba_common_init(sba_dev); |
1963 | |
1964 | hppa_dma_ops = &sba_ops; |
1965 | |
1966 | switch (dev->id.hversion) { |
1967 | case PLUTO_MCKINLEY_PORT: |
1968 | if (!proc_mckinley_root) |
1969 | proc_mckinley_root = proc_mkdir("bus/mckinley" , NULL); |
1970 | root = proc_mckinley_root; |
1971 | break; |
1972 | case ASTRO_RUNWAY_PORT: |
1973 | case IKE_MERCED_PORT: |
1974 | default: |
1975 | if (!proc_runway_root) |
1976 | proc_runway_root = proc_mkdir("bus/runway" , NULL); |
1977 | root = proc_runway_root; |
1978 | break; |
1979 | } |
1980 | |
1981 | proc_create_single("sba_iommu" , 0, root, sba_proc_info); |
1982 | proc_create_single("sba_iommu-bitmap" , 0, root, sba_proc_bitmap_info); |
1983 | return 0; |
1984 | } |
1985 | |
1986 | /* |
1987 | ** One time initialization to let the world know the SBA was found. |
1988 | ** This is the only routine which is NOT static. |
1989 | ** Must be called exactly once before pci_init(). |
1990 | */ |
1991 | static int __init sba_init(void) |
1992 | { |
1993 | return register_parisc_driver(&sba_driver); |
1994 | } |
1995 | arch_initcall(sba_init); |
1996 | |
1997 | |
1998 | /** |
1999 | * sba_get_iommu - Assign the iommu pointer for the pci bus controller. |
2000 | * @pci_hba: The parisc device. |
2001 | * |
2002 | * Returns the appropriate IOMMU data for the given parisc PCI controller. |
2003 | * This is cached and used later for PCI DMA Mapping. |
2004 | */ |
2005 | void * sba_get_iommu(struct parisc_device *pci_hba) |
2006 | { |
2007 | struct parisc_device *sba_dev = parisc_parent(pci_hba); |
2008 | struct sba_device *sba = dev_get_drvdata(dev: &sba_dev->dev); |
2009 | char t = sba_dev->id.hw_type; |
2010 | int iocnum = (pci_hba->hw_path >> 3); /* IOC # */ |
2011 | |
2012 | WARN_ON((t != HPHW_IOA) && (t != HPHW_BCPORT)); |
2013 | |
2014 | return &(sba->ioc[iocnum]); |
2015 | } |
2016 | |
2017 | |
2018 | /** |
2019 | * sba_directed_lmmio - return first directed LMMIO range routed to rope |
2020 | * @pci_hba: The parisc device. |
2021 | * @r: resource PCI host controller wants start/end fields assigned. |
2022 | * |
2023 | * For the given parisc PCI controller, determine if any direct ranges |
2024 | * are routed down the corresponding rope. |
2025 | */ |
2026 | void sba_directed_lmmio(struct parisc_device *pci_hba, struct resource *r) |
2027 | { |
2028 | struct parisc_device *sba_dev = parisc_parent(pci_hba); |
2029 | struct sba_device *sba = dev_get_drvdata(dev: &sba_dev->dev); |
2030 | char t = sba_dev->id.hw_type; |
2031 | int i; |
2032 | int rope = (pci_hba->hw_path & (ROPES_PER_IOC-1)); /* rope # */ |
2033 | |
2034 | BUG_ON((t!=HPHW_IOA) && (t!=HPHW_BCPORT)); |
2035 | |
2036 | r->start = r->end = 0; |
2037 | |
2038 | /* Astro has 4 directed ranges. Not sure about Ike/Pluto/et al */ |
2039 | for (i=0; i<4; i++) { |
2040 | int base, size; |
2041 | void __iomem *reg = sba->sba_hpa + i*0x18; |
2042 | |
2043 | base = READ_REG32(reg + LMMIO_DIRECT0_BASE); |
2044 | if ((base & 1) == 0) |
2045 | continue; /* not enabled */ |
2046 | |
2047 | size = READ_REG32(reg + LMMIO_DIRECT0_ROUTE); |
2048 | |
2049 | if ((size & (ROPES_PER_IOC-1)) != rope) |
2050 | continue; /* directed down different rope */ |
2051 | |
2052 | r->start = (base & ~1UL) | PCI_F_EXTEND; |
2053 | size = ~ READ_REG32(reg + LMMIO_DIRECT0_MASK); |
2054 | r->end = r->start + size; |
2055 | r->flags = IORESOURCE_MEM; |
2056 | } |
2057 | } |
2058 | |
2059 | |
2060 | /** |
2061 | * sba_distributed_lmmio - return portion of distributed LMMIO range |
2062 | * @pci_hba: The parisc device. |
2063 | * @r: resource PCI host controller wants start/end fields assigned. |
2064 | * |
2065 | * For the given parisc PCI controller, return portion of distributed LMMIO |
2066 | * range. The distributed LMMIO is always present and it's just a question |
2067 | * of the base address and size of the range. |
2068 | */ |
2069 | void sba_distributed_lmmio(struct parisc_device *pci_hba, struct resource *r ) |
2070 | { |
2071 | struct parisc_device *sba_dev = parisc_parent(pci_hba); |
2072 | struct sba_device *sba = dev_get_drvdata(dev: &sba_dev->dev); |
2073 | char t = sba_dev->id.hw_type; |
2074 | int base, size; |
2075 | int rope = (pci_hba->hw_path & (ROPES_PER_IOC-1)); /* rope # */ |
2076 | |
2077 | BUG_ON((t!=HPHW_IOA) && (t!=HPHW_BCPORT)); |
2078 | |
2079 | r->start = r->end = 0; |
2080 | |
2081 | base = READ_REG32(sba->sba_hpa + LMMIO_DIST_BASE); |
2082 | if ((base & 1) == 0) { |
2083 | BUG(); /* Gah! Distr Range wasn't enabled! */ |
2084 | return; |
2085 | } |
2086 | |
2087 | r->start = (base & ~1UL) | PCI_F_EXTEND; |
2088 | |
2089 | size = (~READ_REG32(sba->sba_hpa + LMMIO_DIST_MASK)) / ROPES_PER_IOC; |
2090 | r->start += rope * (size + 1); /* adjust base for this rope */ |
2091 | r->end = r->start + size; |
2092 | r->flags = IORESOURCE_MEM; |
2093 | } |
2094 | |