1 | /* |
2 | * Intel GTT (Graphics Translation Table) routines |
3 | * |
4 | * Caveat: This driver implements the linux agp interface, but this is far from |
5 | * a agp driver! GTT support ended up here for purely historical reasons: The |
6 | * old userspace intel graphics drivers needed an interface to map memory into |
7 | * the GTT. And the drm provides a default interface for graphic devices sitting |
8 | * on an agp port. So it made sense to fake the GTT support as an agp port to |
9 | * avoid having to create a new api. |
10 | * |
11 | * With gem this does not make much sense anymore, just needlessly complicates |
12 | * the code. But as long as the old graphics stack is still support, it's stuck |
13 | * here. |
14 | * |
15 | * /fairy-tale-mode off |
16 | */ |
17 | |
18 | #include <linux/module.h> |
19 | #include <linux/pci.h> |
20 | #include <linux/kernel.h> |
21 | #include <linux/pagemap.h> |
22 | #include <linux/agp_backend.h> |
23 | #include <linux/iommu.h> |
24 | #include <linux/delay.h> |
25 | #include <asm/smp.h> |
26 | #include "agp.h" |
27 | #include "intel-agp.h" |
28 | #include <drm/intel-gtt.h> |
29 | #include <asm/set_memory.h> |
30 | |
31 | /* |
32 | * If we have Intel graphics, we're not going to have anything other than |
33 | * an Intel IOMMU. So make the correct use of the PCI DMA API contingent |
34 | * on the Intel IOMMU support (CONFIG_INTEL_IOMMU). |
35 | * Only newer chipsets need to bother with this, of course. |
36 | */ |
37 | #ifdef CONFIG_INTEL_IOMMU |
38 | #define USE_PCI_DMA_API 1 |
39 | #else |
40 | #define USE_PCI_DMA_API 0 |
41 | #endif |
42 | |
43 | struct intel_gtt_driver { |
44 | unsigned int gen : 8; |
45 | unsigned int is_g33 : 1; |
46 | unsigned int is_pineview : 1; |
47 | unsigned int is_ironlake : 1; |
48 | unsigned int has_pgtbl_enable : 1; |
49 | unsigned int dma_mask_size : 8; |
50 | /* Chipset specific GTT setup */ |
51 | int (*setup)(void); |
52 | /* This should undo anything done in ->setup() save the unmapping |
53 | * of the mmio register file, that's done in the generic code. */ |
54 | void (*cleanup)(void); |
55 | void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags); |
56 | /* Flags is a more or less chipset specific opaque value. |
57 | * For chipsets that need to support old ums (non-gem) code, this |
58 | * needs to be identical to the various supported agp memory types! */ |
59 | bool (*check_flags)(unsigned int flags); |
60 | void (*chipset_flush)(void); |
61 | }; |
62 | |
63 | static struct _intel_private { |
64 | const struct intel_gtt_driver *driver; |
65 | struct pci_dev *pcidev; /* device one */ |
66 | struct pci_dev *bridge_dev; |
67 | u8 __iomem *registers; |
68 | phys_addr_t gtt_phys_addr; |
69 | u32 PGETBL_save; |
70 | u32 __iomem *gtt; /* I915G */ |
71 | bool clear_fake_agp; /* on first access via agp, fill with scratch */ |
72 | int num_dcache_entries; |
73 | void __iomem *i9xx_flush_page; |
74 | char *i81x_gtt_table; |
75 | struct resource ifp_resource; |
76 | int resource_valid; |
77 | struct page *scratch_page; |
78 | phys_addr_t scratch_page_dma; |
79 | int refcount; |
80 | /* Whether i915 needs to use the dmar apis or not. */ |
81 | unsigned int needs_dmar : 1; |
82 | phys_addr_t gma_bus_addr; |
83 | /* Size of memory reserved for graphics by the BIOS */ |
84 | resource_size_t stolen_size; |
85 | /* Total number of gtt entries. */ |
86 | unsigned int gtt_total_entries; |
87 | /* Part of the gtt that is mappable by the cpu, for those chips where |
88 | * this is not the full gtt. */ |
89 | unsigned int gtt_mappable_entries; |
90 | } intel_private; |
91 | |
92 | #define INTEL_GTT_GEN intel_private.driver->gen |
93 | #define IS_G33 intel_private.driver->is_g33 |
94 | #define IS_PINEVIEW intel_private.driver->is_pineview |
95 | #define IS_IRONLAKE intel_private.driver->is_ironlake |
96 | #define HAS_PGTBL_EN intel_private.driver->has_pgtbl_enable |
97 | |
98 | #if IS_ENABLED(CONFIG_AGP_INTEL) |
99 | static int intel_gtt_map_memory(struct page **pages, |
100 | unsigned int num_entries, |
101 | struct sg_table *st) |
102 | { |
103 | struct scatterlist *sg; |
104 | int i; |
105 | |
106 | DBG("try mapping %lu pages\n" , (unsigned long)num_entries); |
107 | |
108 | if (sg_alloc_table(st, num_entries, GFP_KERNEL)) |
109 | goto err; |
110 | |
111 | for_each_sg(st->sgl, sg, num_entries, i) |
112 | sg_set_page(sg, page: pages[i], PAGE_SIZE, offset: 0); |
113 | |
114 | if (!dma_map_sg(&intel_private.pcidev->dev, st->sgl, st->nents, |
115 | DMA_BIDIRECTIONAL)) |
116 | goto err; |
117 | |
118 | return 0; |
119 | |
120 | err: |
121 | sg_free_table(st); |
122 | return -ENOMEM; |
123 | } |
124 | |
125 | static void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg) |
126 | { |
127 | struct sg_table st; |
128 | DBG("try unmapping %lu pages\n" , (unsigned long)mem->page_count); |
129 | |
130 | dma_unmap_sg(&intel_private.pcidev->dev, sg_list, num_sg, |
131 | DMA_BIDIRECTIONAL); |
132 | |
133 | st.sgl = sg_list; |
134 | st.orig_nents = st.nents = num_sg; |
135 | |
136 | sg_free_table(&st); |
137 | } |
138 | |
139 | static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode) |
140 | { |
141 | return; |
142 | } |
143 | |
144 | /* Exists to support ARGB cursors */ |
145 | static struct page *i8xx_alloc_pages(void) |
146 | { |
147 | struct page *page; |
148 | |
149 | page = alloc_pages(GFP_KERNEL | GFP_DMA32, order: 2); |
150 | if (page == NULL) |
151 | return NULL; |
152 | |
153 | if (set_pages_uc(page, numpages: 4) < 0) { |
154 | set_pages_wb(page, numpages: 4); |
155 | __free_pages(page, order: 2); |
156 | return NULL; |
157 | } |
158 | atomic_inc(v: &agp_bridge->current_memory_agp); |
159 | return page; |
160 | } |
161 | |
162 | static void i8xx_destroy_pages(struct page *page) |
163 | { |
164 | if (page == NULL) |
165 | return; |
166 | |
167 | set_pages_wb(page, numpages: 4); |
168 | __free_pages(page, order: 2); |
169 | atomic_dec(v: &agp_bridge->current_memory_agp); |
170 | } |
171 | #endif |
172 | |
173 | #define I810_GTT_ORDER 4 |
174 | static int i810_setup(void) |
175 | { |
176 | phys_addr_t reg_addr; |
177 | char *gtt_table; |
178 | |
179 | /* i81x does not preallocate the gtt. It's always 64kb in size. */ |
180 | gtt_table = alloc_gatt_pages(I810_GTT_ORDER); |
181 | if (gtt_table == NULL) |
182 | return -ENOMEM; |
183 | intel_private.i81x_gtt_table = gtt_table; |
184 | |
185 | reg_addr = pci_resource_start(intel_private.pcidev, I810_MMADR_BAR); |
186 | |
187 | intel_private.registers = ioremap(offset: reg_addr, KB(64)); |
188 | if (!intel_private.registers) |
189 | return -ENOMEM; |
190 | |
191 | writel(virt_to_phys(address: gtt_table) | I810_PGETBL_ENABLED, |
192 | addr: intel_private.registers+I810_PGETBL_CTL); |
193 | |
194 | intel_private.gtt_phys_addr = reg_addr + I810_PTE_BASE; |
195 | |
196 | if ((readl(addr: intel_private.registers+I810_DRAM_CTL) |
197 | & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) { |
198 | dev_info(&intel_private.pcidev->dev, |
199 | "detected 4MB dedicated video ram\n" ); |
200 | intel_private.num_dcache_entries = 1024; |
201 | } |
202 | |
203 | return 0; |
204 | } |
205 | |
206 | static void i810_cleanup(void) |
207 | { |
208 | writel(val: 0, addr: intel_private.registers+I810_PGETBL_CTL); |
209 | free_gatt_pages(intel_private.i81x_gtt_table, I810_GTT_ORDER); |
210 | } |
211 | |
212 | #if IS_ENABLED(CONFIG_AGP_INTEL) |
213 | static int i810_insert_dcache_entries(struct agp_memory *mem, off_t pg_start, |
214 | int type) |
215 | { |
216 | int i; |
217 | |
218 | if ((pg_start + mem->page_count) |
219 | > intel_private.num_dcache_entries) |
220 | return -EINVAL; |
221 | |
222 | if (!mem->is_flushed) |
223 | global_cache_flush(); |
224 | |
225 | for (i = pg_start; i < (pg_start + mem->page_count); i++) { |
226 | dma_addr_t addr = i << PAGE_SHIFT; |
227 | intel_private.driver->write_entry(addr, |
228 | i, type); |
229 | } |
230 | wmb(); |
231 | |
232 | return 0; |
233 | } |
234 | |
235 | /* |
236 | * The i810/i830 requires a physical address to program its mouse |
237 | * pointer into hardware. |
238 | * However the Xserver still writes to it through the agp aperture. |
239 | */ |
240 | static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type) |
241 | { |
242 | struct agp_memory *new; |
243 | struct page *page; |
244 | |
245 | switch (pg_count) { |
246 | case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge); |
247 | break; |
248 | case 4: |
249 | /* kludge to get 4 physical pages for ARGB cursor */ |
250 | page = i8xx_alloc_pages(); |
251 | break; |
252 | default: |
253 | return NULL; |
254 | } |
255 | |
256 | if (page == NULL) |
257 | return NULL; |
258 | |
259 | new = agp_create_memory(scratch_pages: pg_count); |
260 | if (new == NULL) |
261 | return NULL; |
262 | |
263 | new->pages[0] = page; |
264 | if (pg_count == 4) { |
265 | /* kludge to get 4 physical pages for ARGB cursor */ |
266 | new->pages[1] = new->pages[0] + 1; |
267 | new->pages[2] = new->pages[1] + 1; |
268 | new->pages[3] = new->pages[2] + 1; |
269 | } |
270 | new->page_count = pg_count; |
271 | new->num_scratch_pages = pg_count; |
272 | new->type = AGP_PHYS_MEMORY; |
273 | new->physical = page_to_phys(new->pages[0]); |
274 | return new; |
275 | } |
276 | |
277 | static void intel_i810_free_by_type(struct agp_memory *curr) |
278 | { |
279 | agp_free_key(key: curr->key); |
280 | if (curr->type == AGP_PHYS_MEMORY) { |
281 | if (curr->page_count == 4) |
282 | i8xx_destroy_pages(page: curr->pages[0]); |
283 | else { |
284 | agp_bridge->driver->agp_destroy_page(curr->pages[0], |
285 | AGP_PAGE_DESTROY_UNMAP); |
286 | agp_bridge->driver->agp_destroy_page(curr->pages[0], |
287 | AGP_PAGE_DESTROY_FREE); |
288 | } |
289 | agp_free_page_array(mem: curr); |
290 | } |
291 | kfree(objp: curr); |
292 | } |
293 | #endif |
294 | |
295 | static int intel_gtt_setup_scratch_page(void) |
296 | { |
297 | struct page *page; |
298 | dma_addr_t dma_addr; |
299 | |
300 | page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); |
301 | if (page == NULL) |
302 | return -ENOMEM; |
303 | set_pages_uc(page, numpages: 1); |
304 | |
305 | if (intel_private.needs_dmar) { |
306 | dma_addr = dma_map_page(&intel_private.pcidev->dev, page, 0, |
307 | PAGE_SIZE, DMA_BIDIRECTIONAL); |
308 | if (dma_mapping_error(dev: &intel_private.pcidev->dev, dma_addr)) { |
309 | __free_page(page); |
310 | return -EINVAL; |
311 | } |
312 | |
313 | intel_private.scratch_page_dma = dma_addr; |
314 | } else |
315 | intel_private.scratch_page_dma = page_to_phys(page); |
316 | |
317 | intel_private.scratch_page = page; |
318 | |
319 | return 0; |
320 | } |
321 | |
322 | static void i810_write_entry(dma_addr_t addr, unsigned int entry, |
323 | unsigned int flags) |
324 | { |
325 | u32 pte_flags = I810_PTE_VALID; |
326 | |
327 | switch (flags) { |
328 | case AGP_DCACHE_MEMORY: |
329 | pte_flags |= I810_PTE_LOCAL; |
330 | break; |
331 | case AGP_USER_CACHED_MEMORY: |
332 | pte_flags |= I830_PTE_SYSTEM_CACHED; |
333 | break; |
334 | } |
335 | |
336 | writel_relaxed(addr | pte_flags, intel_private.gtt + entry); |
337 | } |
338 | |
339 | static resource_size_t intel_gtt_stolen_size(void) |
340 | { |
341 | u16 gmch_ctrl; |
342 | u8 rdct; |
343 | int local = 0; |
344 | static const int ddt[4] = { 0, 16, 32, 64 }; |
345 | resource_size_t stolen_size = 0; |
346 | |
347 | if (INTEL_GTT_GEN == 1) |
348 | return 0; /* no stolen mem on i81x */ |
349 | |
350 | pci_read_config_word(dev: intel_private.bridge_dev, |
351 | I830_GMCH_CTRL, val: &gmch_ctrl); |
352 | |
353 | if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB || |
354 | intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) { |
355 | switch (gmch_ctrl & I830_GMCH_GMS_MASK) { |
356 | case I830_GMCH_GMS_STOLEN_512: |
357 | stolen_size = KB(512); |
358 | break; |
359 | case I830_GMCH_GMS_STOLEN_1024: |
360 | stolen_size = MB(1); |
361 | break; |
362 | case I830_GMCH_GMS_STOLEN_8192: |
363 | stolen_size = MB(8); |
364 | break; |
365 | case I830_GMCH_GMS_LOCAL: |
366 | rdct = readb(addr: intel_private.registers+I830_RDRAM_CHANNEL_TYPE); |
367 | stolen_size = (I830_RDRAM_ND(rdct) + 1) * |
368 | MB(ddt[I830_RDRAM_DDT(rdct)]); |
369 | local = 1; |
370 | break; |
371 | default: |
372 | stolen_size = 0; |
373 | break; |
374 | } |
375 | } else { |
376 | switch (gmch_ctrl & I855_GMCH_GMS_MASK) { |
377 | case I855_GMCH_GMS_STOLEN_1M: |
378 | stolen_size = MB(1); |
379 | break; |
380 | case I855_GMCH_GMS_STOLEN_4M: |
381 | stolen_size = MB(4); |
382 | break; |
383 | case I855_GMCH_GMS_STOLEN_8M: |
384 | stolen_size = MB(8); |
385 | break; |
386 | case I855_GMCH_GMS_STOLEN_16M: |
387 | stolen_size = MB(16); |
388 | break; |
389 | case I855_GMCH_GMS_STOLEN_32M: |
390 | stolen_size = MB(32); |
391 | break; |
392 | case I915_GMCH_GMS_STOLEN_48M: |
393 | stolen_size = MB(48); |
394 | break; |
395 | case I915_GMCH_GMS_STOLEN_64M: |
396 | stolen_size = MB(64); |
397 | break; |
398 | case G33_GMCH_GMS_STOLEN_128M: |
399 | stolen_size = MB(128); |
400 | break; |
401 | case G33_GMCH_GMS_STOLEN_256M: |
402 | stolen_size = MB(256); |
403 | break; |
404 | case INTEL_GMCH_GMS_STOLEN_96M: |
405 | stolen_size = MB(96); |
406 | break; |
407 | case INTEL_GMCH_GMS_STOLEN_160M: |
408 | stolen_size = MB(160); |
409 | break; |
410 | case INTEL_GMCH_GMS_STOLEN_224M: |
411 | stolen_size = MB(224); |
412 | break; |
413 | case INTEL_GMCH_GMS_STOLEN_352M: |
414 | stolen_size = MB(352); |
415 | break; |
416 | default: |
417 | stolen_size = 0; |
418 | break; |
419 | } |
420 | } |
421 | |
422 | if (stolen_size > 0) { |
423 | dev_info(&intel_private.bridge_dev->dev, "detected %lluK %s memory\n" , |
424 | (u64)stolen_size / KB(1), local ? "local" : "stolen" ); |
425 | } else { |
426 | dev_info(&intel_private.bridge_dev->dev, |
427 | "no pre-allocated video memory detected\n" ); |
428 | stolen_size = 0; |
429 | } |
430 | |
431 | return stolen_size; |
432 | } |
433 | |
434 | static void i965_adjust_pgetbl_size(unsigned int size_flag) |
435 | { |
436 | u32 pgetbl_ctl, pgetbl_ctl2; |
437 | |
438 | /* ensure that ppgtt is disabled */ |
439 | pgetbl_ctl2 = readl(addr: intel_private.registers+I965_PGETBL_CTL2); |
440 | pgetbl_ctl2 &= ~I810_PGETBL_ENABLED; |
441 | writel(val: pgetbl_ctl2, addr: intel_private.registers+I965_PGETBL_CTL2); |
442 | |
443 | /* write the new ggtt size */ |
444 | pgetbl_ctl = readl(addr: intel_private.registers+I810_PGETBL_CTL); |
445 | pgetbl_ctl &= ~I965_PGETBL_SIZE_MASK; |
446 | pgetbl_ctl |= size_flag; |
447 | writel(val: pgetbl_ctl, addr: intel_private.registers+I810_PGETBL_CTL); |
448 | } |
449 | |
450 | static unsigned int i965_gtt_total_entries(void) |
451 | { |
452 | int size; |
453 | u32 pgetbl_ctl; |
454 | u16 gmch_ctl; |
455 | |
456 | pci_read_config_word(dev: intel_private.bridge_dev, |
457 | I830_GMCH_CTRL, val: &gmch_ctl); |
458 | |
459 | if (INTEL_GTT_GEN == 5) { |
460 | switch (gmch_ctl & G4x_GMCH_SIZE_MASK) { |
461 | case G4x_GMCH_SIZE_1M: |
462 | case G4x_GMCH_SIZE_VT_1M: |
463 | i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1MB); |
464 | break; |
465 | case G4x_GMCH_SIZE_VT_1_5M: |
466 | i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1_5MB); |
467 | break; |
468 | case G4x_GMCH_SIZE_2M: |
469 | case G4x_GMCH_SIZE_VT_2M: |
470 | i965_adjust_pgetbl_size(I965_PGETBL_SIZE_2MB); |
471 | break; |
472 | } |
473 | } |
474 | |
475 | pgetbl_ctl = readl(addr: intel_private.registers+I810_PGETBL_CTL); |
476 | |
477 | switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) { |
478 | case I965_PGETBL_SIZE_128KB: |
479 | size = KB(128); |
480 | break; |
481 | case I965_PGETBL_SIZE_256KB: |
482 | size = KB(256); |
483 | break; |
484 | case I965_PGETBL_SIZE_512KB: |
485 | size = KB(512); |
486 | break; |
487 | /* GTT pagetable sizes bigger than 512KB are not possible on G33! */ |
488 | case I965_PGETBL_SIZE_1MB: |
489 | size = KB(1024); |
490 | break; |
491 | case I965_PGETBL_SIZE_2MB: |
492 | size = KB(2048); |
493 | break; |
494 | case I965_PGETBL_SIZE_1_5MB: |
495 | size = KB(1024 + 512); |
496 | break; |
497 | default: |
498 | dev_info(&intel_private.pcidev->dev, |
499 | "unknown page table size, assuming 512KB\n" ); |
500 | size = KB(512); |
501 | } |
502 | |
503 | return size/4; |
504 | } |
505 | |
506 | static unsigned int intel_gtt_total_entries(void) |
507 | { |
508 | if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5) |
509 | return i965_gtt_total_entries(); |
510 | else { |
511 | /* On previous hardware, the GTT size was just what was |
512 | * required to map the aperture. |
513 | */ |
514 | return intel_private.gtt_mappable_entries; |
515 | } |
516 | } |
517 | |
518 | static unsigned int intel_gtt_mappable_entries(void) |
519 | { |
520 | unsigned int aperture_size; |
521 | |
522 | if (INTEL_GTT_GEN == 1) { |
523 | u32 smram_miscc; |
524 | |
525 | pci_read_config_dword(dev: intel_private.bridge_dev, |
526 | I810_SMRAM_MISCC, val: &smram_miscc); |
527 | |
528 | if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) |
529 | == I810_GFX_MEM_WIN_32M) |
530 | aperture_size = MB(32); |
531 | else |
532 | aperture_size = MB(64); |
533 | } else if (INTEL_GTT_GEN == 2) { |
534 | u16 gmch_ctrl; |
535 | |
536 | pci_read_config_word(dev: intel_private.bridge_dev, |
537 | I830_GMCH_CTRL, val: &gmch_ctrl); |
538 | |
539 | if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_64M) |
540 | aperture_size = MB(64); |
541 | else |
542 | aperture_size = MB(128); |
543 | } else { |
544 | /* 9xx supports large sizes, just look at the length */ |
545 | aperture_size = pci_resource_len(intel_private.pcidev, 2); |
546 | } |
547 | |
548 | return aperture_size >> PAGE_SHIFT; |
549 | } |
550 | |
551 | static void intel_gtt_teardown_scratch_page(void) |
552 | { |
553 | set_pages_wb(page: intel_private.scratch_page, numpages: 1); |
554 | if (intel_private.needs_dmar) |
555 | dma_unmap_page(&intel_private.pcidev->dev, |
556 | intel_private.scratch_page_dma, PAGE_SIZE, |
557 | DMA_BIDIRECTIONAL); |
558 | __free_page(intel_private.scratch_page); |
559 | } |
560 | |
561 | static void intel_gtt_cleanup(void) |
562 | { |
563 | intel_private.driver->cleanup(); |
564 | |
565 | iounmap(addr: intel_private.gtt); |
566 | iounmap(addr: intel_private.registers); |
567 | |
568 | intel_gtt_teardown_scratch_page(); |
569 | } |
570 | |
571 | /* Certain Gen5 chipsets require require idling the GPU before |
572 | * unmapping anything from the GTT when VT-d is enabled. |
573 | */ |
574 | static inline int needs_ilk_vtd_wa(void) |
575 | { |
576 | const unsigned short gpu_devid = intel_private.pcidev->device; |
577 | |
578 | /* |
579 | * Query iommu subsystem to see if we need the workaround. Presumably |
580 | * that was loaded first. |
581 | */ |
582 | return ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG || |
583 | gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) && |
584 | device_iommu_mapped(dev: &intel_private.pcidev->dev)); |
585 | } |
586 | |
587 | static bool intel_gtt_can_wc(void) |
588 | { |
589 | if (INTEL_GTT_GEN <= 2) |
590 | return false; |
591 | |
592 | if (INTEL_GTT_GEN >= 6) |
593 | return false; |
594 | |
595 | /* Reports of major corruption with ILK vt'd enabled */ |
596 | if (needs_ilk_vtd_wa()) |
597 | return false; |
598 | |
599 | return true; |
600 | } |
601 | |
602 | static int intel_gtt_init(void) |
603 | { |
604 | u32 gtt_map_size; |
605 | int ret, bar; |
606 | |
607 | ret = intel_private.driver->setup(); |
608 | if (ret != 0) |
609 | return ret; |
610 | |
611 | intel_private.gtt_mappable_entries = intel_gtt_mappable_entries(); |
612 | intel_private.gtt_total_entries = intel_gtt_total_entries(); |
613 | |
614 | /* save the PGETBL reg for resume */ |
615 | intel_private.PGETBL_save = |
616 | readl(addr: intel_private.registers+I810_PGETBL_CTL) |
617 | & ~I810_PGETBL_ENABLED; |
618 | /* we only ever restore the register when enabling the PGTBL... */ |
619 | if (HAS_PGTBL_EN) |
620 | intel_private.PGETBL_save |= I810_PGETBL_ENABLED; |
621 | |
622 | dev_info(&intel_private.bridge_dev->dev, |
623 | "detected gtt size: %dK total, %dK mappable\n" , |
624 | intel_private.gtt_total_entries * 4, |
625 | intel_private.gtt_mappable_entries * 4); |
626 | |
627 | gtt_map_size = intel_private.gtt_total_entries * 4; |
628 | |
629 | intel_private.gtt = NULL; |
630 | if (intel_gtt_can_wc()) |
631 | intel_private.gtt = ioremap_wc(offset: intel_private.gtt_phys_addr, |
632 | size: gtt_map_size); |
633 | if (intel_private.gtt == NULL) |
634 | intel_private.gtt = ioremap(offset: intel_private.gtt_phys_addr, |
635 | size: gtt_map_size); |
636 | if (intel_private.gtt == NULL) { |
637 | intel_private.driver->cleanup(); |
638 | iounmap(addr: intel_private.registers); |
639 | return -ENOMEM; |
640 | } |
641 | |
642 | #if IS_ENABLED(CONFIG_AGP_INTEL) |
643 | global_cache_flush(); /* FIXME: ? */ |
644 | #endif |
645 | |
646 | intel_private.stolen_size = intel_gtt_stolen_size(); |
647 | |
648 | intel_private.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2; |
649 | |
650 | ret = intel_gtt_setup_scratch_page(); |
651 | if (ret != 0) { |
652 | intel_gtt_cleanup(); |
653 | return ret; |
654 | } |
655 | |
656 | if (INTEL_GTT_GEN <= 2) |
657 | bar = I810_GMADR_BAR; |
658 | else |
659 | bar = I915_GMADR_BAR; |
660 | |
661 | intel_private.gma_bus_addr = pci_bus_address(pdev: intel_private.pcidev, bar); |
662 | return 0; |
663 | } |
664 | |
665 | #if IS_ENABLED(CONFIG_AGP_INTEL) |
666 | static const struct aper_size_info_fixed intel_fake_agp_sizes[] = { |
667 | {32, 8192, 3}, |
668 | {64, 16384, 4}, |
669 | {128, 32768, 5}, |
670 | {256, 65536, 6}, |
671 | {512, 131072, 7}, |
672 | }; |
673 | |
674 | static int intel_fake_agp_fetch_size(void) |
675 | { |
676 | int num_sizes = ARRAY_SIZE(intel_fake_agp_sizes); |
677 | unsigned int aper_size; |
678 | int i; |
679 | |
680 | aper_size = (intel_private.gtt_mappable_entries << PAGE_SHIFT) / MB(1); |
681 | |
682 | for (i = 0; i < num_sizes; i++) { |
683 | if (aper_size == intel_fake_agp_sizes[i].size) { |
684 | agp_bridge->current_size = |
685 | (void *) (intel_fake_agp_sizes + i); |
686 | return aper_size; |
687 | } |
688 | } |
689 | |
690 | return 0; |
691 | } |
692 | #endif |
693 | |
694 | static void i830_cleanup(void) |
695 | { |
696 | } |
697 | |
698 | /* The chipset_flush interface needs to get data that has already been |
699 | * flushed out of the CPU all the way out to main memory, because the GPU |
700 | * doesn't snoop those buffers. |
701 | * |
702 | * The 8xx series doesn't have the same lovely interface for flushing the |
703 | * chipset write buffers that the later chips do. According to the 865 |
704 | * specs, it's 64 octwords, or 1KB. So, to get those previous things in |
705 | * that buffer out, we just fill 1KB and clflush it out, on the assumption |
706 | * that it'll push whatever was in there out. It appears to work. |
707 | */ |
708 | static void i830_chipset_flush(void) |
709 | { |
710 | unsigned long timeout = jiffies + msecs_to_jiffies(m: 1000); |
711 | |
712 | /* Forcibly evict everything from the CPU write buffers. |
713 | * clflush appears to be insufficient. |
714 | */ |
715 | wbinvd_on_all_cpus(); |
716 | |
717 | /* Now we've only seen documents for this magic bit on 855GM, |
718 | * we hope it exists for the other gen2 chipsets... |
719 | * |
720 | * Also works as advertised on my 845G. |
721 | */ |
722 | writel(readl(addr: intel_private.registers+I830_HIC) | (1<<31), |
723 | addr: intel_private.registers+I830_HIC); |
724 | |
725 | while (readl(addr: intel_private.registers+I830_HIC) & (1<<31)) { |
726 | if (time_after(jiffies, timeout)) |
727 | break; |
728 | |
729 | udelay(50); |
730 | } |
731 | } |
732 | |
733 | static void i830_write_entry(dma_addr_t addr, unsigned int entry, |
734 | unsigned int flags) |
735 | { |
736 | u32 pte_flags = I810_PTE_VALID; |
737 | |
738 | if (flags == AGP_USER_CACHED_MEMORY) |
739 | pte_flags |= I830_PTE_SYSTEM_CACHED; |
740 | |
741 | writel_relaxed(addr | pte_flags, intel_private.gtt + entry); |
742 | } |
743 | |
744 | bool intel_gmch_enable_gtt(void) |
745 | { |
746 | u8 __iomem *reg; |
747 | |
748 | if (INTEL_GTT_GEN == 2) { |
749 | u16 gmch_ctrl; |
750 | |
751 | pci_read_config_word(dev: intel_private.bridge_dev, |
752 | I830_GMCH_CTRL, val: &gmch_ctrl); |
753 | gmch_ctrl |= I830_GMCH_ENABLED; |
754 | pci_write_config_word(dev: intel_private.bridge_dev, |
755 | I830_GMCH_CTRL, val: gmch_ctrl); |
756 | |
757 | pci_read_config_word(dev: intel_private.bridge_dev, |
758 | I830_GMCH_CTRL, val: &gmch_ctrl); |
759 | if ((gmch_ctrl & I830_GMCH_ENABLED) == 0) { |
760 | dev_err(&intel_private.pcidev->dev, |
761 | "failed to enable the GTT: GMCH_CTRL=%x\n" , |
762 | gmch_ctrl); |
763 | return false; |
764 | } |
765 | } |
766 | |
767 | /* On the resume path we may be adjusting the PGTBL value, so |
768 | * be paranoid and flush all chipset write buffers... |
769 | */ |
770 | if (INTEL_GTT_GEN >= 3) |
771 | writel(val: 0, addr: intel_private.registers+GFX_FLSH_CNTL); |
772 | |
773 | reg = intel_private.registers+I810_PGETBL_CTL; |
774 | writel(val: intel_private.PGETBL_save, addr: reg); |
775 | if (HAS_PGTBL_EN && (readl(addr: reg) & I810_PGETBL_ENABLED) == 0) { |
776 | dev_err(&intel_private.pcidev->dev, |
777 | "failed to enable the GTT: PGETBL=%x [expected %x]\n" , |
778 | readl(reg), intel_private.PGETBL_save); |
779 | return false; |
780 | } |
781 | |
782 | if (INTEL_GTT_GEN >= 3) |
783 | writel(val: 0, addr: intel_private.registers+GFX_FLSH_CNTL); |
784 | |
785 | return true; |
786 | } |
787 | EXPORT_SYMBOL(intel_gmch_enable_gtt); |
788 | |
789 | static int i830_setup(void) |
790 | { |
791 | phys_addr_t reg_addr; |
792 | |
793 | reg_addr = pci_resource_start(intel_private.pcidev, I810_MMADR_BAR); |
794 | |
795 | intel_private.registers = ioremap(offset: reg_addr, KB(64)); |
796 | if (!intel_private.registers) |
797 | return -ENOMEM; |
798 | |
799 | intel_private.gtt_phys_addr = reg_addr + I810_PTE_BASE; |
800 | |
801 | return 0; |
802 | } |
803 | |
804 | #if IS_ENABLED(CONFIG_AGP_INTEL) |
805 | static int intel_fake_agp_create_gatt_table(struct agp_bridge_data *bridge) |
806 | { |
807 | agp_bridge->gatt_table_real = NULL; |
808 | agp_bridge->gatt_table = NULL; |
809 | agp_bridge->gatt_bus_addr = 0; |
810 | |
811 | return 0; |
812 | } |
813 | |
814 | static int intel_fake_agp_free_gatt_table(struct agp_bridge_data *bridge) |
815 | { |
816 | return 0; |
817 | } |
818 | |
819 | static int intel_fake_agp_configure(void) |
820 | { |
821 | if (!intel_gmch_enable_gtt()) |
822 | return -EIO; |
823 | |
824 | intel_private.clear_fake_agp = true; |
825 | agp_bridge->gart_bus_addr = intel_private.gma_bus_addr; |
826 | |
827 | return 0; |
828 | } |
829 | #endif |
830 | |
831 | static bool i830_check_flags(unsigned int flags) |
832 | { |
833 | switch (flags) { |
834 | case 0: |
835 | case AGP_PHYS_MEMORY: |
836 | case AGP_USER_CACHED_MEMORY: |
837 | case AGP_USER_MEMORY: |
838 | return true; |
839 | } |
840 | |
841 | return false; |
842 | } |
843 | |
844 | void intel_gmch_gtt_insert_page(dma_addr_t addr, |
845 | unsigned int pg, |
846 | unsigned int flags) |
847 | { |
848 | intel_private.driver->write_entry(addr, pg, flags); |
849 | readl(addr: intel_private.gtt + pg); |
850 | if (intel_private.driver->chipset_flush) |
851 | intel_private.driver->chipset_flush(); |
852 | } |
853 | EXPORT_SYMBOL(intel_gmch_gtt_insert_page); |
854 | |
855 | void intel_gmch_gtt_insert_sg_entries(struct sg_table *st, |
856 | unsigned int pg_start, |
857 | unsigned int flags) |
858 | { |
859 | struct scatterlist *sg; |
860 | unsigned int len, m; |
861 | int i, j; |
862 | |
863 | j = pg_start; |
864 | |
865 | /* sg may merge pages, but we have to separate |
866 | * per-page addr for GTT */ |
867 | for_each_sg(st->sgl, sg, st->nents, i) { |
868 | len = sg_dma_len(sg) >> PAGE_SHIFT; |
869 | for (m = 0; m < len; m++) { |
870 | dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT); |
871 | intel_private.driver->write_entry(addr, j, flags); |
872 | j++; |
873 | } |
874 | } |
875 | readl(addr: intel_private.gtt + j - 1); |
876 | if (intel_private.driver->chipset_flush) |
877 | intel_private.driver->chipset_flush(); |
878 | } |
879 | EXPORT_SYMBOL(intel_gmch_gtt_insert_sg_entries); |
880 | |
881 | #if IS_ENABLED(CONFIG_AGP_INTEL) |
882 | static void intel_gmch_gtt_insert_pages(unsigned int first_entry, |
883 | unsigned int num_entries, |
884 | struct page **pages, |
885 | unsigned int flags) |
886 | { |
887 | int i, j; |
888 | |
889 | for (i = 0, j = first_entry; i < num_entries; i++, j++) { |
890 | dma_addr_t addr = page_to_phys(pages[i]); |
891 | intel_private.driver->write_entry(addr, |
892 | j, flags); |
893 | } |
894 | wmb(); |
895 | } |
896 | |
897 | static int intel_fake_agp_insert_entries(struct agp_memory *mem, |
898 | off_t pg_start, int type) |
899 | { |
900 | int ret = -EINVAL; |
901 | |
902 | if (intel_private.clear_fake_agp) { |
903 | int start = intel_private.stolen_size / PAGE_SIZE; |
904 | int end = intel_private.gtt_mappable_entries; |
905 | intel_gmch_gtt_clear_range(first_entry: start, num_entries: end - start); |
906 | intel_private.clear_fake_agp = false; |
907 | } |
908 | |
909 | if (INTEL_GTT_GEN == 1 && type == AGP_DCACHE_MEMORY) |
910 | return i810_insert_dcache_entries(mem, pg_start, type); |
911 | |
912 | if (mem->page_count == 0) |
913 | goto out; |
914 | |
915 | if (pg_start + mem->page_count > intel_private.gtt_total_entries) |
916 | goto out_err; |
917 | |
918 | if (type != mem->type) |
919 | goto out_err; |
920 | |
921 | if (!intel_private.driver->check_flags(type)) |
922 | goto out_err; |
923 | |
924 | if (!mem->is_flushed) |
925 | global_cache_flush(); |
926 | |
927 | if (intel_private.needs_dmar) { |
928 | struct sg_table st; |
929 | |
930 | ret = intel_gtt_map_memory(pages: mem->pages, num_entries: mem->page_count, st: &st); |
931 | if (ret != 0) |
932 | return ret; |
933 | |
934 | intel_gmch_gtt_insert_sg_entries(&st, pg_start, type); |
935 | mem->sg_list = st.sgl; |
936 | mem->num_sg = st.nents; |
937 | } else |
938 | intel_gmch_gtt_insert_pages(first_entry: pg_start, num_entries: mem->page_count, pages: mem->pages, |
939 | flags: type); |
940 | |
941 | out: |
942 | ret = 0; |
943 | out_err: |
944 | mem->is_flushed = true; |
945 | return ret; |
946 | } |
947 | #endif |
948 | |
949 | void intel_gmch_gtt_clear_range(unsigned int first_entry, unsigned int num_entries) |
950 | { |
951 | unsigned int i; |
952 | |
953 | for (i = first_entry; i < (first_entry + num_entries); i++) { |
954 | intel_private.driver->write_entry(intel_private.scratch_page_dma, |
955 | i, 0); |
956 | } |
957 | wmb(); |
958 | } |
959 | EXPORT_SYMBOL(intel_gmch_gtt_clear_range); |
960 | |
961 | #if IS_ENABLED(CONFIG_AGP_INTEL) |
962 | static int intel_fake_agp_remove_entries(struct agp_memory *mem, |
963 | off_t pg_start, int type) |
964 | { |
965 | if (mem->page_count == 0) |
966 | return 0; |
967 | |
968 | intel_gmch_gtt_clear_range(pg_start, mem->page_count); |
969 | |
970 | if (intel_private.needs_dmar) { |
971 | intel_gtt_unmap_memory(sg_list: mem->sg_list, num_sg: mem->num_sg); |
972 | mem->sg_list = NULL; |
973 | mem->num_sg = 0; |
974 | } |
975 | |
976 | return 0; |
977 | } |
978 | |
979 | static struct agp_memory *intel_fake_agp_alloc_by_type(size_t pg_count, |
980 | int type) |
981 | { |
982 | struct agp_memory *new; |
983 | |
984 | if (type == AGP_DCACHE_MEMORY && INTEL_GTT_GEN == 1) { |
985 | if (pg_count != intel_private.num_dcache_entries) |
986 | return NULL; |
987 | |
988 | new = agp_create_memory(scratch_pages: 1); |
989 | if (new == NULL) |
990 | return NULL; |
991 | |
992 | new->type = AGP_DCACHE_MEMORY; |
993 | new->page_count = pg_count; |
994 | new->num_scratch_pages = 0; |
995 | agp_free_page_array(mem: new); |
996 | return new; |
997 | } |
998 | if (type == AGP_PHYS_MEMORY) |
999 | return alloc_agpphysmem_i8xx(pg_count, type); |
1000 | /* always return NULL for other allocation types for now */ |
1001 | return NULL; |
1002 | } |
1003 | #endif |
1004 | |
1005 | static int intel_alloc_chipset_flush_resource(void) |
1006 | { |
1007 | int ret; |
1008 | ret = pci_bus_alloc_resource(bus: intel_private.bridge_dev->bus, res: &intel_private.ifp_resource, PAGE_SIZE, |
1009 | PAGE_SIZE, PCIBIOS_MIN_MEM, type_mask: 0, |
1010 | alignf: pcibios_align_resource, alignf_data: intel_private.bridge_dev); |
1011 | |
1012 | return ret; |
1013 | } |
1014 | |
1015 | static void intel_i915_setup_chipset_flush(void) |
1016 | { |
1017 | int ret; |
1018 | u32 temp; |
1019 | |
1020 | pci_read_config_dword(dev: intel_private.bridge_dev, I915_IFPADDR, val: &temp); |
1021 | if (!(temp & 0x1)) { |
1022 | intel_alloc_chipset_flush_resource(); |
1023 | intel_private.resource_valid = 1; |
1024 | pci_write_config_dword(dev: intel_private.bridge_dev, I915_IFPADDR, val: (intel_private.ifp_resource.start & 0xffffffff) | 0x1); |
1025 | } else { |
1026 | temp &= ~1; |
1027 | |
1028 | intel_private.resource_valid = 1; |
1029 | intel_private.ifp_resource.start = temp; |
1030 | intel_private.ifp_resource.end = temp + PAGE_SIZE; |
1031 | ret = request_resource(root: &iomem_resource, new: &intel_private.ifp_resource); |
1032 | /* some BIOSes reserve this area in a pnp some don't */ |
1033 | if (ret) |
1034 | intel_private.resource_valid = 0; |
1035 | } |
1036 | } |
1037 | |
1038 | static void intel_i965_g33_setup_chipset_flush(void) |
1039 | { |
1040 | u32 temp_hi, temp_lo; |
1041 | int ret; |
1042 | |
1043 | pci_read_config_dword(dev: intel_private.bridge_dev, I965_IFPADDR + 4, val: &temp_hi); |
1044 | pci_read_config_dword(dev: intel_private.bridge_dev, I965_IFPADDR, val: &temp_lo); |
1045 | |
1046 | if (!(temp_lo & 0x1)) { |
1047 | |
1048 | intel_alloc_chipset_flush_resource(); |
1049 | |
1050 | intel_private.resource_valid = 1; |
1051 | pci_write_config_dword(dev: intel_private.bridge_dev, I965_IFPADDR + 4, |
1052 | upper_32_bits(intel_private.ifp_resource.start)); |
1053 | pci_write_config_dword(dev: intel_private.bridge_dev, I965_IFPADDR, val: (intel_private.ifp_resource.start & 0xffffffff) | 0x1); |
1054 | } else { |
1055 | u64 l64; |
1056 | |
1057 | temp_lo &= ~0x1; |
1058 | l64 = ((u64)temp_hi << 32) | temp_lo; |
1059 | |
1060 | intel_private.resource_valid = 1; |
1061 | intel_private.ifp_resource.start = l64; |
1062 | intel_private.ifp_resource.end = l64 + PAGE_SIZE; |
1063 | ret = request_resource(root: &iomem_resource, new: &intel_private.ifp_resource); |
1064 | /* some BIOSes reserve this area in a pnp some don't */ |
1065 | if (ret) |
1066 | intel_private.resource_valid = 0; |
1067 | } |
1068 | } |
1069 | |
1070 | static void intel_i9xx_setup_flush(void) |
1071 | { |
1072 | /* return if already configured */ |
1073 | if (intel_private.ifp_resource.start) |
1074 | return; |
1075 | |
1076 | if (INTEL_GTT_GEN == 6) |
1077 | return; |
1078 | |
1079 | /* setup a resource for this object */ |
1080 | intel_private.ifp_resource.name = "Intel Flush Page" ; |
1081 | intel_private.ifp_resource.flags = IORESOURCE_MEM; |
1082 | |
1083 | /* Setup chipset flush for 915 */ |
1084 | if (IS_G33 || INTEL_GTT_GEN >= 4) { |
1085 | intel_i965_g33_setup_chipset_flush(); |
1086 | } else { |
1087 | intel_i915_setup_chipset_flush(); |
1088 | } |
1089 | |
1090 | if (intel_private.ifp_resource.start) |
1091 | intel_private.i9xx_flush_page = ioremap(offset: intel_private.ifp_resource.start, PAGE_SIZE); |
1092 | if (!intel_private.i9xx_flush_page) |
1093 | dev_err(&intel_private.pcidev->dev, |
1094 | "can't ioremap flush page - no chipset flushing\n" ); |
1095 | } |
1096 | |
1097 | static void i9xx_cleanup(void) |
1098 | { |
1099 | if (intel_private.i9xx_flush_page) |
1100 | iounmap(addr: intel_private.i9xx_flush_page); |
1101 | if (intel_private.resource_valid) |
1102 | release_resource(new: &intel_private.ifp_resource); |
1103 | intel_private.ifp_resource.start = 0; |
1104 | intel_private.resource_valid = 0; |
1105 | } |
1106 | |
1107 | static void i9xx_chipset_flush(void) |
1108 | { |
1109 | wmb(); |
1110 | if (intel_private.i9xx_flush_page) |
1111 | writel(val: 1, addr: intel_private.i9xx_flush_page); |
1112 | } |
1113 | |
1114 | static void i965_write_entry(dma_addr_t addr, |
1115 | unsigned int entry, |
1116 | unsigned int flags) |
1117 | { |
1118 | u32 pte_flags; |
1119 | |
1120 | pte_flags = I810_PTE_VALID; |
1121 | if (flags == AGP_USER_CACHED_MEMORY) |
1122 | pte_flags |= I830_PTE_SYSTEM_CACHED; |
1123 | |
1124 | /* Shift high bits down */ |
1125 | addr |= (addr >> 28) & 0xf0; |
1126 | writel_relaxed(addr | pte_flags, intel_private.gtt + entry); |
1127 | } |
1128 | |
1129 | static int i9xx_setup(void) |
1130 | { |
1131 | phys_addr_t reg_addr; |
1132 | int size = KB(512); |
1133 | |
1134 | reg_addr = pci_resource_start(intel_private.pcidev, I915_MMADR_BAR); |
1135 | |
1136 | intel_private.registers = ioremap(offset: reg_addr, size); |
1137 | if (!intel_private.registers) |
1138 | return -ENOMEM; |
1139 | |
1140 | switch (INTEL_GTT_GEN) { |
1141 | case 3: |
1142 | intel_private.gtt_phys_addr = |
1143 | pci_resource_start(intel_private.pcidev, I915_PTE_BAR); |
1144 | break; |
1145 | case 5: |
1146 | intel_private.gtt_phys_addr = reg_addr + MB(2); |
1147 | break; |
1148 | default: |
1149 | intel_private.gtt_phys_addr = reg_addr + KB(512); |
1150 | break; |
1151 | } |
1152 | |
1153 | intel_i9xx_setup_flush(); |
1154 | |
1155 | return 0; |
1156 | } |
1157 | |
1158 | #if IS_ENABLED(CONFIG_AGP_INTEL) |
1159 | static const struct agp_bridge_driver intel_fake_agp_driver = { |
1160 | .owner = THIS_MODULE, |
1161 | .size_type = FIXED_APER_SIZE, |
1162 | .aperture_sizes = intel_fake_agp_sizes, |
1163 | .num_aperture_sizes = ARRAY_SIZE(intel_fake_agp_sizes), |
1164 | .configure = intel_fake_agp_configure, |
1165 | .fetch_size = intel_fake_agp_fetch_size, |
1166 | .cleanup = intel_gtt_cleanup, |
1167 | .agp_enable = intel_fake_agp_enable, |
1168 | .cache_flush = global_cache_flush, |
1169 | .create_gatt_table = intel_fake_agp_create_gatt_table, |
1170 | .free_gatt_table = intel_fake_agp_free_gatt_table, |
1171 | .insert_memory = intel_fake_agp_insert_entries, |
1172 | .remove_memory = intel_fake_agp_remove_entries, |
1173 | .alloc_by_type = intel_fake_agp_alloc_by_type, |
1174 | .free_by_type = intel_i810_free_by_type, |
1175 | .agp_alloc_page = agp_generic_alloc_page, |
1176 | .agp_alloc_pages = agp_generic_alloc_pages, |
1177 | .agp_destroy_page = agp_generic_destroy_page, |
1178 | .agp_destroy_pages = agp_generic_destroy_pages, |
1179 | }; |
1180 | #endif |
1181 | |
1182 | static const struct intel_gtt_driver i81x_gtt_driver = { |
1183 | .gen = 1, |
1184 | .has_pgtbl_enable = 1, |
1185 | .dma_mask_size = 32, |
1186 | .setup = i810_setup, |
1187 | .cleanup = i810_cleanup, |
1188 | .check_flags = i830_check_flags, |
1189 | .write_entry = i810_write_entry, |
1190 | }; |
1191 | static const struct intel_gtt_driver i8xx_gtt_driver = { |
1192 | .gen = 2, |
1193 | .has_pgtbl_enable = 1, |
1194 | .setup = i830_setup, |
1195 | .cleanup = i830_cleanup, |
1196 | .write_entry = i830_write_entry, |
1197 | .dma_mask_size = 32, |
1198 | .check_flags = i830_check_flags, |
1199 | .chipset_flush = i830_chipset_flush, |
1200 | }; |
1201 | static const struct intel_gtt_driver i915_gtt_driver = { |
1202 | .gen = 3, |
1203 | .has_pgtbl_enable = 1, |
1204 | .setup = i9xx_setup, |
1205 | .cleanup = i9xx_cleanup, |
1206 | /* i945 is the last gpu to need phys mem (for overlay and cursors). */ |
1207 | .write_entry = i830_write_entry, |
1208 | .dma_mask_size = 32, |
1209 | .check_flags = i830_check_flags, |
1210 | .chipset_flush = i9xx_chipset_flush, |
1211 | }; |
1212 | static const struct intel_gtt_driver g33_gtt_driver = { |
1213 | .gen = 3, |
1214 | .is_g33 = 1, |
1215 | .setup = i9xx_setup, |
1216 | .cleanup = i9xx_cleanup, |
1217 | .write_entry = i965_write_entry, |
1218 | .dma_mask_size = 36, |
1219 | .check_flags = i830_check_flags, |
1220 | .chipset_flush = i9xx_chipset_flush, |
1221 | }; |
1222 | static const struct intel_gtt_driver pineview_gtt_driver = { |
1223 | .gen = 3, |
1224 | .is_pineview = 1, .is_g33 = 1, |
1225 | .setup = i9xx_setup, |
1226 | .cleanup = i9xx_cleanup, |
1227 | .write_entry = i965_write_entry, |
1228 | .dma_mask_size = 36, |
1229 | .check_flags = i830_check_flags, |
1230 | .chipset_flush = i9xx_chipset_flush, |
1231 | }; |
1232 | static const struct intel_gtt_driver i965_gtt_driver = { |
1233 | .gen = 4, |
1234 | .has_pgtbl_enable = 1, |
1235 | .setup = i9xx_setup, |
1236 | .cleanup = i9xx_cleanup, |
1237 | .write_entry = i965_write_entry, |
1238 | .dma_mask_size = 36, |
1239 | .check_flags = i830_check_flags, |
1240 | .chipset_flush = i9xx_chipset_flush, |
1241 | }; |
1242 | static const struct intel_gtt_driver g4x_gtt_driver = { |
1243 | .gen = 5, |
1244 | .setup = i9xx_setup, |
1245 | .cleanup = i9xx_cleanup, |
1246 | .write_entry = i965_write_entry, |
1247 | .dma_mask_size = 36, |
1248 | .check_flags = i830_check_flags, |
1249 | .chipset_flush = i9xx_chipset_flush, |
1250 | }; |
1251 | static const struct intel_gtt_driver ironlake_gtt_driver = { |
1252 | .gen = 5, |
1253 | .is_ironlake = 1, |
1254 | .setup = i9xx_setup, |
1255 | .cleanup = i9xx_cleanup, |
1256 | .write_entry = i965_write_entry, |
1257 | .dma_mask_size = 36, |
1258 | .check_flags = i830_check_flags, |
1259 | .chipset_flush = i9xx_chipset_flush, |
1260 | }; |
1261 | |
1262 | /* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of |
1263 | * driver and gmch_driver must be non-null, and find_gmch will determine |
1264 | * which one should be used if a gmch_chip_id is present. |
1265 | */ |
1266 | static const struct intel_gtt_driver_description { |
1267 | unsigned int gmch_chip_id; |
1268 | char *name; |
1269 | const struct intel_gtt_driver *gtt_driver; |
1270 | } intel_gtt_chipsets[] = { |
1271 | { PCI_DEVICE_ID_INTEL_82810_IG1, "i810" , |
1272 | &i81x_gtt_driver}, |
1273 | { PCI_DEVICE_ID_INTEL_82810_IG3, "i810" , |
1274 | &i81x_gtt_driver}, |
1275 | { PCI_DEVICE_ID_INTEL_82810E_IG, "i810" , |
1276 | &i81x_gtt_driver}, |
1277 | { PCI_DEVICE_ID_INTEL_82815_CGC, "i815" , |
1278 | &i81x_gtt_driver}, |
1279 | { PCI_DEVICE_ID_INTEL_82830_CGC, "830M" , |
1280 | &i8xx_gtt_driver}, |
1281 | { PCI_DEVICE_ID_INTEL_82845G_IG, "845G" , |
1282 | &i8xx_gtt_driver}, |
1283 | { PCI_DEVICE_ID_INTEL_82854_IG, "854" , |
1284 | &i8xx_gtt_driver}, |
1285 | { PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM" , |
1286 | &i8xx_gtt_driver}, |
1287 | { PCI_DEVICE_ID_INTEL_82865_IG, "865" , |
1288 | &i8xx_gtt_driver}, |
1289 | { PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)" , |
1290 | &i915_gtt_driver }, |
1291 | { PCI_DEVICE_ID_INTEL_82915G_IG, "915G" , |
1292 | &i915_gtt_driver }, |
1293 | { PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM" , |
1294 | &i915_gtt_driver }, |
1295 | { PCI_DEVICE_ID_INTEL_82945G_IG, "945G" , |
1296 | &i915_gtt_driver }, |
1297 | { PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM" , |
1298 | &i915_gtt_driver }, |
1299 | { PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME" , |
1300 | &i915_gtt_driver }, |
1301 | { PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ" , |
1302 | &i965_gtt_driver }, |
1303 | { PCI_DEVICE_ID_INTEL_82G35_IG, "G35" , |
1304 | &i965_gtt_driver }, |
1305 | { PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q" , |
1306 | &i965_gtt_driver }, |
1307 | { PCI_DEVICE_ID_INTEL_82965G_IG, "965G" , |
1308 | &i965_gtt_driver }, |
1309 | { PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM" , |
1310 | &i965_gtt_driver }, |
1311 | { PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE" , |
1312 | &i965_gtt_driver }, |
1313 | { PCI_DEVICE_ID_INTEL_G33_IG, "G33" , |
1314 | &g33_gtt_driver }, |
1315 | { PCI_DEVICE_ID_INTEL_Q35_IG, "Q35" , |
1316 | &g33_gtt_driver }, |
1317 | { PCI_DEVICE_ID_INTEL_Q33_IG, "Q33" , |
1318 | &g33_gtt_driver }, |
1319 | { PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150" , |
1320 | &pineview_gtt_driver }, |
1321 | { PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150" , |
1322 | &pineview_gtt_driver }, |
1323 | { PCI_DEVICE_ID_INTEL_GM45_IG, "GM45" , |
1324 | &g4x_gtt_driver }, |
1325 | { PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake" , |
1326 | &g4x_gtt_driver }, |
1327 | { PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43" , |
1328 | &g4x_gtt_driver }, |
1329 | { PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43" , |
1330 | &g4x_gtt_driver }, |
1331 | { PCI_DEVICE_ID_INTEL_B43_IG, "B43" , |
1332 | &g4x_gtt_driver }, |
1333 | { PCI_DEVICE_ID_INTEL_B43_1_IG, "B43" , |
1334 | &g4x_gtt_driver }, |
1335 | { PCI_DEVICE_ID_INTEL_G41_IG, "G41" , |
1336 | &g4x_gtt_driver }, |
1337 | { PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, |
1338 | "HD Graphics" , &ironlake_gtt_driver }, |
1339 | { PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, |
1340 | "HD Graphics" , &ironlake_gtt_driver }, |
1341 | { 0, NULL, NULL } |
1342 | }; |
1343 | |
1344 | static int find_gmch(u16 device) |
1345 | { |
1346 | struct pci_dev *gmch_device; |
1347 | |
1348 | gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL); |
1349 | if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) { |
1350 | gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, |
1351 | device, from: gmch_device); |
1352 | } |
1353 | |
1354 | if (!gmch_device) |
1355 | return 0; |
1356 | |
1357 | intel_private.pcidev = gmch_device; |
1358 | return 1; |
1359 | } |
1360 | |
1361 | int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev, |
1362 | struct agp_bridge_data *bridge) |
1363 | { |
1364 | int i, mask; |
1365 | |
1366 | for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) { |
1367 | if (gpu_pdev) { |
1368 | if (gpu_pdev->device == |
1369 | intel_gtt_chipsets[i].gmch_chip_id) { |
1370 | intel_private.pcidev = pci_dev_get(dev: gpu_pdev); |
1371 | intel_private.driver = |
1372 | intel_gtt_chipsets[i].gtt_driver; |
1373 | |
1374 | break; |
1375 | } |
1376 | } else if (find_gmch(device: intel_gtt_chipsets[i].gmch_chip_id)) { |
1377 | intel_private.driver = |
1378 | intel_gtt_chipsets[i].gtt_driver; |
1379 | break; |
1380 | } |
1381 | } |
1382 | |
1383 | if (!intel_private.driver) |
1384 | return 0; |
1385 | |
1386 | #if IS_ENABLED(CONFIG_AGP_INTEL) |
1387 | if (bridge) { |
1388 | if (INTEL_GTT_GEN > 1) |
1389 | return 0; |
1390 | |
1391 | bridge->driver = &intel_fake_agp_driver; |
1392 | bridge->dev_private_data = &intel_private; |
1393 | bridge->dev = bridge_pdev; |
1394 | } |
1395 | #endif |
1396 | |
1397 | |
1398 | /* |
1399 | * Can be called from the fake agp driver but also directly from |
1400 | * drm/i915.ko. Hence we need to check whether everything is set up |
1401 | * already. |
1402 | */ |
1403 | if (intel_private.refcount++) |
1404 | return 1; |
1405 | |
1406 | intel_private.bridge_dev = pci_dev_get(dev: bridge_pdev); |
1407 | |
1408 | dev_info(&bridge_pdev->dev, "Intel %s Chipset\n" , intel_gtt_chipsets[i].name); |
1409 | |
1410 | if (bridge) { |
1411 | mask = intel_private.driver->dma_mask_size; |
1412 | if (dma_set_mask(dev: &intel_private.pcidev->dev, DMA_BIT_MASK(mask))) |
1413 | dev_err(&intel_private.pcidev->dev, |
1414 | "set gfx device dma mask %d-bit failed!\n" , |
1415 | mask); |
1416 | else |
1417 | dma_set_coherent_mask(dev: &intel_private.pcidev->dev, |
1418 | DMA_BIT_MASK(mask)); |
1419 | } |
1420 | |
1421 | if (intel_gtt_init() != 0) { |
1422 | intel_gmch_remove(); |
1423 | |
1424 | return 0; |
1425 | } |
1426 | |
1427 | return 1; |
1428 | } |
1429 | EXPORT_SYMBOL(intel_gmch_probe); |
1430 | |
1431 | void intel_gmch_gtt_get(u64 *gtt_total, |
1432 | phys_addr_t *mappable_base, |
1433 | resource_size_t *mappable_end) |
1434 | { |
1435 | *gtt_total = intel_private.gtt_total_entries << PAGE_SHIFT; |
1436 | *mappable_base = intel_private.gma_bus_addr; |
1437 | *mappable_end = intel_private.gtt_mappable_entries << PAGE_SHIFT; |
1438 | } |
1439 | EXPORT_SYMBOL(intel_gmch_gtt_get); |
1440 | |
1441 | void intel_gmch_gtt_flush(void) |
1442 | { |
1443 | if (intel_private.driver->chipset_flush) |
1444 | intel_private.driver->chipset_flush(); |
1445 | } |
1446 | EXPORT_SYMBOL(intel_gmch_gtt_flush); |
1447 | |
1448 | void intel_gmch_remove(void) |
1449 | { |
1450 | if (--intel_private.refcount) |
1451 | return; |
1452 | |
1453 | if (intel_private.scratch_page) |
1454 | intel_gtt_teardown_scratch_page(); |
1455 | if (intel_private.pcidev) |
1456 | pci_dev_put(dev: intel_private.pcidev); |
1457 | if (intel_private.bridge_dev) |
1458 | pci_dev_put(dev: intel_private.bridge_dev); |
1459 | intel_private.driver = NULL; |
1460 | } |
1461 | EXPORT_SYMBOL(intel_gmch_remove); |
1462 | |
1463 | MODULE_AUTHOR("Dave Jones, Various @Intel" ); |
1464 | MODULE_LICENSE("GPL and additional rights" ); |
1465 | |