1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /************************************************************************** |
3 | * Copyright (c) 2007, Intel Corporation. |
4 | * |
5 | **************************************************************************/ |
6 | |
7 | #include <linux/highmem.h> |
8 | |
9 | #include "mmu.h" |
10 | #include "psb_drv.h" |
11 | #include "psb_reg.h" |
12 | |
13 | /* |
14 | * Code for the SGX MMU: |
15 | */ |
16 | |
17 | /* |
18 | * clflush on one processor only: |
19 | * clflush should apparently flush the cache line on all processors in an |
20 | * SMP system. |
21 | */ |
22 | |
23 | /* |
24 | * kmap atomic: |
25 | * The usage of the slots must be completely encapsulated within a spinlock, and |
26 | * no other functions that may be using the locks for other purposed may be |
27 | * called from within the locked region. |
28 | * Since the slots are per processor, this will guarantee that we are the only |
29 | * user. |
30 | */ |
31 | |
32 | /* |
33 | * TODO: Inserting ptes from an interrupt handler: |
34 | * This may be desirable for some SGX functionality where the GPU can fault in |
35 | * needed pages. For that, we need to make an atomic insert_pages function, that |
36 | * may fail. |
37 | * If it fails, the caller need to insert the page using a workqueue function, |
38 | * but on average it should be fast. |
39 | */ |
40 | |
41 | static inline uint32_t psb_mmu_pt_index(uint32_t offset) |
42 | { |
43 | return (offset >> PSB_PTE_SHIFT) & 0x3FF; |
44 | } |
45 | |
46 | static inline uint32_t psb_mmu_pd_index(uint32_t offset) |
47 | { |
48 | return offset >> PSB_PDE_SHIFT; |
49 | } |
50 | |
51 | static inline void psb_clflush(void *addr) |
52 | { |
53 | __asm__ __volatile__("clflush (%0)\n" : : "r" (addr) : "memory" ); |
54 | } |
55 | |
56 | static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr) |
57 | { |
58 | if (!driver->has_clflush) |
59 | return; |
60 | |
61 | mb(); |
62 | psb_clflush(addr); |
63 | mb(); |
64 | } |
65 | |
66 | static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver, int force) |
67 | { |
68 | struct drm_device *dev = driver->dev; |
69 | struct drm_psb_private *dev_priv = to_drm_psb_private(dev); |
70 | |
71 | if (atomic_read(v: &driver->needs_tlbflush) || force) { |
72 | uint32_t val = PSB_RSGX32(PSB_CR_BIF_CTRL); |
73 | PSB_WSGX32(val | _PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL); |
74 | |
75 | /* Make sure data cache is turned off before enabling it */ |
76 | wmb(); |
77 | PSB_WSGX32(val & ~_PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL); |
78 | (void)PSB_RSGX32(PSB_CR_BIF_CTRL); |
79 | if (driver->msvdx_mmu_invaldc) |
80 | atomic_set(v: driver->msvdx_mmu_invaldc, i: 1); |
81 | } |
82 | atomic_set(v: &driver->needs_tlbflush, i: 0); |
83 | } |
84 | |
85 | #if 0 |
86 | static void psb_mmu_flush_pd(struct psb_mmu_driver *driver, int force) |
87 | { |
88 | down_write(&driver->sem); |
89 | psb_mmu_flush_pd_locked(driver, force); |
90 | up_write(&driver->sem); |
91 | } |
92 | #endif |
93 | |
94 | void psb_mmu_flush(struct psb_mmu_driver *driver) |
95 | { |
96 | struct drm_device *dev = driver->dev; |
97 | struct drm_psb_private *dev_priv = to_drm_psb_private(dev); |
98 | uint32_t val; |
99 | |
100 | down_write(sem: &driver->sem); |
101 | val = PSB_RSGX32(PSB_CR_BIF_CTRL); |
102 | if (atomic_read(v: &driver->needs_tlbflush)) |
103 | PSB_WSGX32(val | _PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL); |
104 | else |
105 | PSB_WSGX32(val | _PSB_CB_CTRL_FLUSH, PSB_CR_BIF_CTRL); |
106 | |
107 | /* Make sure data cache is turned off and MMU is flushed before |
108 | restoring bank interface control register */ |
109 | wmb(); |
110 | PSB_WSGX32(val & ~(_PSB_CB_CTRL_FLUSH | _PSB_CB_CTRL_INVALDC), |
111 | PSB_CR_BIF_CTRL); |
112 | (void)PSB_RSGX32(PSB_CR_BIF_CTRL); |
113 | |
114 | atomic_set(v: &driver->needs_tlbflush, i: 0); |
115 | if (driver->msvdx_mmu_invaldc) |
116 | atomic_set(v: driver->msvdx_mmu_invaldc, i: 1); |
117 | up_write(sem: &driver->sem); |
118 | } |
119 | |
120 | void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context) |
121 | { |
122 | struct drm_device *dev = pd->driver->dev; |
123 | struct drm_psb_private *dev_priv = to_drm_psb_private(dev); |
124 | uint32_t offset = (hw_context == 0) ? PSB_CR_BIF_DIR_LIST_BASE0 : |
125 | PSB_CR_BIF_DIR_LIST_BASE1 + hw_context * 4; |
126 | |
127 | down_write(sem: &pd->driver->sem); |
128 | PSB_WSGX32(page_to_pfn(pd->p) << PAGE_SHIFT, offset); |
129 | wmb(); |
130 | psb_mmu_flush_pd_locked(driver: pd->driver, force: 1); |
131 | pd->hw_context = hw_context; |
132 | up_write(sem: &pd->driver->sem); |
133 | |
134 | } |
135 | |
136 | static inline unsigned long psb_pd_addr_end(unsigned long addr, |
137 | unsigned long end) |
138 | { |
139 | addr = (addr + PSB_PDE_MASK + 1) & ~PSB_PDE_MASK; |
140 | return (addr < end) ? addr : end; |
141 | } |
142 | |
143 | static inline uint32_t psb_mmu_mask_pte(uint32_t pfn, int type) |
144 | { |
145 | uint32_t mask = PSB_PTE_VALID; |
146 | |
147 | if (type & PSB_MMU_CACHED_MEMORY) |
148 | mask |= PSB_PTE_CACHED; |
149 | if (type & PSB_MMU_RO_MEMORY) |
150 | mask |= PSB_PTE_RO; |
151 | if (type & PSB_MMU_WO_MEMORY) |
152 | mask |= PSB_PTE_WO; |
153 | |
154 | return (pfn << PAGE_SHIFT) | mask; |
155 | } |
156 | |
157 | struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver, |
158 | int trap_pagefaults, int invalid_type) |
159 | { |
160 | struct psb_mmu_pd *pd = kmalloc(size: sizeof(*pd), GFP_KERNEL); |
161 | uint32_t *v; |
162 | int i; |
163 | |
164 | if (!pd) |
165 | return NULL; |
166 | |
167 | pd->p = alloc_page(GFP_DMA32); |
168 | if (!pd->p) |
169 | goto out_err1; |
170 | pd->dummy_pt = alloc_page(GFP_DMA32); |
171 | if (!pd->dummy_pt) |
172 | goto out_err2; |
173 | pd->dummy_page = alloc_page(GFP_DMA32); |
174 | if (!pd->dummy_page) |
175 | goto out_err3; |
176 | |
177 | if (!trap_pagefaults) { |
178 | pd->invalid_pde = psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt), |
179 | type: invalid_type); |
180 | pd->invalid_pte = psb_mmu_mask_pte(page_to_pfn(pd->dummy_page), |
181 | type: invalid_type); |
182 | } else { |
183 | pd->invalid_pde = 0; |
184 | pd->invalid_pte = 0; |
185 | } |
186 | |
187 | v = kmap_local_page(page: pd->dummy_pt); |
188 | for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i) |
189 | v[i] = pd->invalid_pte; |
190 | |
191 | kunmap_local(v); |
192 | |
193 | v = kmap_local_page(page: pd->p); |
194 | for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i) |
195 | v[i] = pd->invalid_pde; |
196 | |
197 | kunmap_local(v); |
198 | |
199 | clear_page(page: kmap(page: pd->dummy_page)); |
200 | kunmap(page: pd->dummy_page); |
201 | |
202 | pd->tables = vmalloc_user(size: sizeof(struct psb_mmu_pt *) * 1024); |
203 | if (!pd->tables) |
204 | goto out_err4; |
205 | |
206 | pd->hw_context = -1; |
207 | pd->pd_mask = PSB_PTE_VALID; |
208 | pd->driver = driver; |
209 | |
210 | return pd; |
211 | |
212 | out_err4: |
213 | __free_page(pd->dummy_page); |
214 | out_err3: |
215 | __free_page(pd->dummy_pt); |
216 | out_err2: |
217 | __free_page(pd->p); |
218 | out_err1: |
219 | kfree(objp: pd); |
220 | return NULL; |
221 | } |
222 | |
223 | static void psb_mmu_free_pt(struct psb_mmu_pt *pt) |
224 | { |
225 | __free_page(pt->p); |
226 | kfree(objp: pt); |
227 | } |
228 | |
229 | void psb_mmu_free_pagedir(struct psb_mmu_pd *pd) |
230 | { |
231 | struct psb_mmu_driver *driver = pd->driver; |
232 | struct drm_device *dev = driver->dev; |
233 | struct drm_psb_private *dev_priv = to_drm_psb_private(dev); |
234 | struct psb_mmu_pt *pt; |
235 | int i; |
236 | |
237 | down_write(sem: &driver->sem); |
238 | if (pd->hw_context != -1) { |
239 | PSB_WSGX32(0, PSB_CR_BIF_DIR_LIST_BASE0 + pd->hw_context * 4); |
240 | psb_mmu_flush_pd_locked(driver, force: 1); |
241 | } |
242 | |
243 | /* Should take the spinlock here, but we don't need to do that |
244 | since we have the semaphore in write mode. */ |
245 | |
246 | for (i = 0; i < 1024; ++i) { |
247 | pt = pd->tables[i]; |
248 | if (pt) |
249 | psb_mmu_free_pt(pt); |
250 | } |
251 | |
252 | vfree(addr: pd->tables); |
253 | __free_page(pd->dummy_page); |
254 | __free_page(pd->dummy_pt); |
255 | __free_page(pd->p); |
256 | kfree(objp: pd); |
257 | up_write(sem: &driver->sem); |
258 | } |
259 | |
260 | static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd) |
261 | { |
262 | struct psb_mmu_pt *pt = kmalloc(size: sizeof(*pt), GFP_KERNEL); |
263 | void *v; |
264 | uint32_t clflush_add = pd->driver->clflush_add >> PAGE_SHIFT; |
265 | uint32_t clflush_count = PAGE_SIZE / clflush_add; |
266 | spinlock_t *lock = &pd->driver->lock; |
267 | uint8_t *clf; |
268 | uint32_t *ptes; |
269 | int i; |
270 | |
271 | if (!pt) |
272 | return NULL; |
273 | |
274 | pt->p = alloc_page(GFP_DMA32); |
275 | if (!pt->p) { |
276 | kfree(objp: pt); |
277 | return NULL; |
278 | } |
279 | |
280 | spin_lock(lock); |
281 | |
282 | v = kmap_atomic(page: pt->p); |
283 | clf = (uint8_t *) v; |
284 | ptes = (uint32_t *) v; |
285 | for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i) |
286 | *ptes++ = pd->invalid_pte; |
287 | |
288 | if (pd->driver->has_clflush && pd->hw_context != -1) { |
289 | mb(); |
290 | for (i = 0; i < clflush_count; ++i) { |
291 | psb_clflush(addr: clf); |
292 | clf += clflush_add; |
293 | } |
294 | mb(); |
295 | } |
296 | kunmap_atomic(v); |
297 | spin_unlock(lock); |
298 | |
299 | pt->count = 0; |
300 | pt->pd = pd; |
301 | pt->index = 0; |
302 | |
303 | return pt; |
304 | } |
305 | |
306 | static struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd, |
307 | unsigned long addr) |
308 | { |
309 | uint32_t index = psb_mmu_pd_index(offset: addr); |
310 | struct psb_mmu_pt *pt; |
311 | uint32_t *v; |
312 | spinlock_t *lock = &pd->driver->lock; |
313 | |
314 | spin_lock(lock); |
315 | pt = pd->tables[index]; |
316 | while (!pt) { |
317 | spin_unlock(lock); |
318 | pt = psb_mmu_alloc_pt(pd); |
319 | if (!pt) |
320 | return NULL; |
321 | spin_lock(lock); |
322 | |
323 | if (pd->tables[index]) { |
324 | spin_unlock(lock); |
325 | psb_mmu_free_pt(pt); |
326 | spin_lock(lock); |
327 | pt = pd->tables[index]; |
328 | continue; |
329 | } |
330 | |
331 | v = kmap_atomic(page: pd->p); |
332 | pd->tables[index] = pt; |
333 | v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask; |
334 | pt->index = index; |
335 | kunmap_atomic((void *) v); |
336 | |
337 | if (pd->hw_context != -1) { |
338 | psb_mmu_clflush(driver: pd->driver, addr: (void *)&v[index]); |
339 | atomic_set(v: &pd->driver->needs_tlbflush, i: 1); |
340 | } |
341 | } |
342 | pt->v = kmap_atomic(page: pt->p); |
343 | return pt; |
344 | } |
345 | |
346 | static struct psb_mmu_pt *psb_mmu_pt_map_lock(struct psb_mmu_pd *pd, |
347 | unsigned long addr) |
348 | { |
349 | uint32_t index = psb_mmu_pd_index(offset: addr); |
350 | struct psb_mmu_pt *pt; |
351 | spinlock_t *lock = &pd->driver->lock; |
352 | |
353 | spin_lock(lock); |
354 | pt = pd->tables[index]; |
355 | if (!pt) { |
356 | spin_unlock(lock); |
357 | return NULL; |
358 | } |
359 | pt->v = kmap_atomic(page: pt->p); |
360 | return pt; |
361 | } |
362 | |
363 | static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt) |
364 | { |
365 | struct psb_mmu_pd *pd = pt->pd; |
366 | uint32_t *v; |
367 | |
368 | kunmap_atomic(pt->v); |
369 | if (pt->count == 0) { |
370 | v = kmap_atomic(page: pd->p); |
371 | v[pt->index] = pd->invalid_pde; |
372 | pd->tables[pt->index] = NULL; |
373 | |
374 | if (pd->hw_context != -1) { |
375 | psb_mmu_clflush(driver: pd->driver, addr: (void *)&v[pt->index]); |
376 | atomic_set(v: &pd->driver->needs_tlbflush, i: 1); |
377 | } |
378 | kunmap_atomic(v); |
379 | spin_unlock(lock: &pd->driver->lock); |
380 | psb_mmu_free_pt(pt); |
381 | return; |
382 | } |
383 | spin_unlock(lock: &pd->driver->lock); |
384 | } |
385 | |
386 | static inline void psb_mmu_set_pte(struct psb_mmu_pt *pt, unsigned long addr, |
387 | uint32_t pte) |
388 | { |
389 | pt->v[psb_mmu_pt_index(offset: addr)] = pte; |
390 | } |
391 | |
392 | static inline void psb_mmu_invalidate_pte(struct psb_mmu_pt *pt, |
393 | unsigned long addr) |
394 | { |
395 | pt->v[psb_mmu_pt_index(offset: addr)] = pt->pd->invalid_pte; |
396 | } |
397 | |
398 | struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver) |
399 | { |
400 | struct psb_mmu_pd *pd; |
401 | |
402 | down_read(sem: &driver->sem); |
403 | pd = driver->default_pd; |
404 | up_read(sem: &driver->sem); |
405 | |
406 | return pd; |
407 | } |
408 | |
409 | void psb_mmu_driver_takedown(struct psb_mmu_driver *driver) |
410 | { |
411 | struct drm_device *dev = driver->dev; |
412 | struct drm_psb_private *dev_priv = to_drm_psb_private(dev); |
413 | |
414 | PSB_WSGX32(driver->bif_ctrl, PSB_CR_BIF_CTRL); |
415 | psb_mmu_free_pagedir(pd: driver->default_pd); |
416 | kfree(objp: driver); |
417 | } |
418 | |
419 | struct psb_mmu_driver *psb_mmu_driver_init(struct drm_device *dev, |
420 | int trap_pagefaults, |
421 | int invalid_type, |
422 | atomic_t *msvdx_mmu_invaldc) |
423 | { |
424 | struct psb_mmu_driver *driver; |
425 | struct drm_psb_private *dev_priv = to_drm_psb_private(dev); |
426 | |
427 | driver = kmalloc(size: sizeof(*driver), GFP_KERNEL); |
428 | |
429 | if (!driver) |
430 | return NULL; |
431 | |
432 | driver->dev = dev; |
433 | driver->default_pd = psb_mmu_alloc_pd(driver, trap_pagefaults, |
434 | invalid_type); |
435 | if (!driver->default_pd) |
436 | goto out_err1; |
437 | |
438 | spin_lock_init(&driver->lock); |
439 | init_rwsem(&driver->sem); |
440 | down_write(sem: &driver->sem); |
441 | atomic_set(v: &driver->needs_tlbflush, i: 1); |
442 | driver->msvdx_mmu_invaldc = msvdx_mmu_invaldc; |
443 | |
444 | driver->bif_ctrl = PSB_RSGX32(PSB_CR_BIF_CTRL); |
445 | PSB_WSGX32(driver->bif_ctrl | _PSB_CB_CTRL_CLEAR_FAULT, |
446 | PSB_CR_BIF_CTRL); |
447 | PSB_WSGX32(driver->bif_ctrl & ~_PSB_CB_CTRL_CLEAR_FAULT, |
448 | PSB_CR_BIF_CTRL); |
449 | |
450 | driver->has_clflush = 0; |
451 | |
452 | if (boot_cpu_has(X86_FEATURE_CLFLUSH)) { |
453 | uint32_t tfms, misc, cap0, cap4, clflush_size; |
454 | |
455 | /* |
456 | * clflush size is determined at kernel setup for x86_64 but not |
457 | * for i386. We have to do it here. |
458 | */ |
459 | |
460 | cpuid(op: 0x00000001, eax: &tfms, ebx: &misc, ecx: &cap0, edx: &cap4); |
461 | clflush_size = ((misc >> 8) & 0xff) * 8; |
462 | driver->has_clflush = 1; |
463 | driver->clflush_add = |
464 | PAGE_SIZE * clflush_size / sizeof(uint32_t); |
465 | driver->clflush_mask = driver->clflush_add - 1; |
466 | driver->clflush_mask = ~driver->clflush_mask; |
467 | } |
468 | |
469 | up_write(sem: &driver->sem); |
470 | return driver; |
471 | |
472 | out_err1: |
473 | kfree(objp: driver); |
474 | return NULL; |
475 | } |
476 | |
477 | static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address, |
478 | uint32_t num_pages, uint32_t desired_tile_stride, |
479 | uint32_t hw_tile_stride) |
480 | { |
481 | struct psb_mmu_pt *pt; |
482 | uint32_t rows = 1; |
483 | uint32_t i; |
484 | unsigned long addr; |
485 | unsigned long end; |
486 | unsigned long next; |
487 | unsigned long add; |
488 | unsigned long row_add; |
489 | unsigned long clflush_add = pd->driver->clflush_add; |
490 | unsigned long clflush_mask = pd->driver->clflush_mask; |
491 | |
492 | if (!pd->driver->has_clflush) |
493 | return; |
494 | |
495 | if (hw_tile_stride) |
496 | rows = num_pages / desired_tile_stride; |
497 | else |
498 | desired_tile_stride = num_pages; |
499 | |
500 | add = desired_tile_stride << PAGE_SHIFT; |
501 | row_add = hw_tile_stride << PAGE_SHIFT; |
502 | mb(); |
503 | for (i = 0; i < rows; ++i) { |
504 | |
505 | addr = address; |
506 | end = addr + add; |
507 | |
508 | do { |
509 | next = psb_pd_addr_end(addr, end); |
510 | pt = psb_mmu_pt_map_lock(pd, addr); |
511 | if (!pt) |
512 | continue; |
513 | do { |
514 | psb_clflush(addr: &pt->v[psb_mmu_pt_index(offset: addr)]); |
515 | } while (addr += clflush_add, |
516 | (addr & clflush_mask) < next); |
517 | |
518 | psb_mmu_pt_unmap_unlock(pt); |
519 | } while (addr = next, next != end); |
520 | address += row_add; |
521 | } |
522 | mb(); |
523 | } |
524 | |
525 | void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd, |
526 | unsigned long address, uint32_t num_pages) |
527 | { |
528 | struct psb_mmu_pt *pt; |
529 | unsigned long addr; |
530 | unsigned long end; |
531 | unsigned long next; |
532 | unsigned long f_address = address; |
533 | |
534 | down_read(sem: &pd->driver->sem); |
535 | |
536 | addr = address; |
537 | end = addr + (num_pages << PAGE_SHIFT); |
538 | |
539 | do { |
540 | next = psb_pd_addr_end(addr, end); |
541 | pt = psb_mmu_pt_alloc_map_lock(pd, addr); |
542 | if (!pt) |
543 | goto out; |
544 | do { |
545 | psb_mmu_invalidate_pte(pt, addr); |
546 | --pt->count; |
547 | } while (addr += PAGE_SIZE, addr < next); |
548 | psb_mmu_pt_unmap_unlock(pt); |
549 | |
550 | } while (addr = next, next != end); |
551 | |
552 | out: |
553 | if (pd->hw_context != -1) |
554 | psb_mmu_flush_ptes(pd, address: f_address, num_pages, desired_tile_stride: 1, hw_tile_stride: 1); |
555 | |
556 | up_read(sem: &pd->driver->sem); |
557 | |
558 | if (pd->hw_context != -1) |
559 | psb_mmu_flush(driver: pd->driver); |
560 | |
561 | return; |
562 | } |
563 | |
564 | void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address, |
565 | uint32_t num_pages, uint32_t desired_tile_stride, |
566 | uint32_t hw_tile_stride) |
567 | { |
568 | struct psb_mmu_pt *pt; |
569 | uint32_t rows = 1; |
570 | uint32_t i; |
571 | unsigned long addr; |
572 | unsigned long end; |
573 | unsigned long next; |
574 | unsigned long add; |
575 | unsigned long row_add; |
576 | unsigned long f_address = address; |
577 | |
578 | if (hw_tile_stride) |
579 | rows = num_pages / desired_tile_stride; |
580 | else |
581 | desired_tile_stride = num_pages; |
582 | |
583 | add = desired_tile_stride << PAGE_SHIFT; |
584 | row_add = hw_tile_stride << PAGE_SHIFT; |
585 | |
586 | down_read(sem: &pd->driver->sem); |
587 | |
588 | /* Make sure we only need to flush this processor's cache */ |
589 | |
590 | for (i = 0; i < rows; ++i) { |
591 | |
592 | addr = address; |
593 | end = addr + add; |
594 | |
595 | do { |
596 | next = psb_pd_addr_end(addr, end); |
597 | pt = psb_mmu_pt_map_lock(pd, addr); |
598 | if (!pt) |
599 | continue; |
600 | do { |
601 | psb_mmu_invalidate_pte(pt, addr); |
602 | --pt->count; |
603 | |
604 | } while (addr += PAGE_SIZE, addr < next); |
605 | psb_mmu_pt_unmap_unlock(pt); |
606 | |
607 | } while (addr = next, next != end); |
608 | address += row_add; |
609 | } |
610 | if (pd->hw_context != -1) |
611 | psb_mmu_flush_ptes(pd, address: f_address, num_pages, |
612 | desired_tile_stride, hw_tile_stride); |
613 | |
614 | up_read(sem: &pd->driver->sem); |
615 | |
616 | if (pd->hw_context != -1) |
617 | psb_mmu_flush(driver: pd->driver); |
618 | } |
619 | |
620 | int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn, |
621 | unsigned long address, uint32_t num_pages, |
622 | int type) |
623 | { |
624 | struct psb_mmu_pt *pt; |
625 | uint32_t pte; |
626 | unsigned long addr; |
627 | unsigned long end; |
628 | unsigned long next; |
629 | unsigned long f_address = address; |
630 | int ret = -ENOMEM; |
631 | |
632 | down_read(sem: &pd->driver->sem); |
633 | |
634 | addr = address; |
635 | end = addr + (num_pages << PAGE_SHIFT); |
636 | |
637 | do { |
638 | next = psb_pd_addr_end(addr, end); |
639 | pt = psb_mmu_pt_alloc_map_lock(pd, addr); |
640 | if (!pt) { |
641 | ret = -ENOMEM; |
642 | goto out; |
643 | } |
644 | do { |
645 | pte = psb_mmu_mask_pte(pfn: start_pfn++, type); |
646 | psb_mmu_set_pte(pt, addr, pte); |
647 | pt->count++; |
648 | } while (addr += PAGE_SIZE, addr < next); |
649 | psb_mmu_pt_unmap_unlock(pt); |
650 | |
651 | } while (addr = next, next != end); |
652 | ret = 0; |
653 | |
654 | out: |
655 | if (pd->hw_context != -1) |
656 | psb_mmu_flush_ptes(pd, address: f_address, num_pages, desired_tile_stride: 1, hw_tile_stride: 1); |
657 | |
658 | up_read(sem: &pd->driver->sem); |
659 | |
660 | if (pd->hw_context != -1) |
661 | psb_mmu_flush(driver: pd->driver); |
662 | |
663 | return ret; |
664 | } |
665 | |
666 | int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages, |
667 | unsigned long address, uint32_t num_pages, |
668 | uint32_t desired_tile_stride, uint32_t hw_tile_stride, |
669 | int type) |
670 | { |
671 | struct psb_mmu_pt *pt; |
672 | uint32_t rows = 1; |
673 | uint32_t i; |
674 | uint32_t pte; |
675 | unsigned long addr; |
676 | unsigned long end; |
677 | unsigned long next; |
678 | unsigned long add; |
679 | unsigned long row_add; |
680 | unsigned long f_address = address; |
681 | int ret = -ENOMEM; |
682 | |
683 | if (hw_tile_stride) { |
684 | if (num_pages % desired_tile_stride != 0) |
685 | return -EINVAL; |
686 | rows = num_pages / desired_tile_stride; |
687 | } else { |
688 | desired_tile_stride = num_pages; |
689 | } |
690 | |
691 | add = desired_tile_stride << PAGE_SHIFT; |
692 | row_add = hw_tile_stride << PAGE_SHIFT; |
693 | |
694 | down_read(sem: &pd->driver->sem); |
695 | |
696 | for (i = 0; i < rows; ++i) { |
697 | |
698 | addr = address; |
699 | end = addr + add; |
700 | |
701 | do { |
702 | next = psb_pd_addr_end(addr, end); |
703 | pt = psb_mmu_pt_alloc_map_lock(pd, addr); |
704 | if (!pt) |
705 | goto out; |
706 | do { |
707 | pte = psb_mmu_mask_pte(page_to_pfn(*pages++), |
708 | type); |
709 | psb_mmu_set_pte(pt, addr, pte); |
710 | pt->count++; |
711 | } while (addr += PAGE_SIZE, addr < next); |
712 | psb_mmu_pt_unmap_unlock(pt); |
713 | |
714 | } while (addr = next, next != end); |
715 | |
716 | address += row_add; |
717 | } |
718 | |
719 | ret = 0; |
720 | out: |
721 | if (pd->hw_context != -1) |
722 | psb_mmu_flush_ptes(pd, address: f_address, num_pages, |
723 | desired_tile_stride, hw_tile_stride); |
724 | |
725 | up_read(sem: &pd->driver->sem); |
726 | |
727 | if (pd->hw_context != -1) |
728 | psb_mmu_flush(driver: pd->driver); |
729 | |
730 | return ret; |
731 | } |
732 | |
733 | int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual, |
734 | unsigned long *pfn) |
735 | { |
736 | int ret; |
737 | struct psb_mmu_pt *pt; |
738 | uint32_t tmp; |
739 | spinlock_t *lock = &pd->driver->lock; |
740 | |
741 | down_read(sem: &pd->driver->sem); |
742 | pt = psb_mmu_pt_map_lock(pd, addr: virtual); |
743 | if (!pt) { |
744 | uint32_t *v; |
745 | |
746 | spin_lock(lock); |
747 | v = kmap_atomic(page: pd->p); |
748 | tmp = v[psb_mmu_pd_index(offset: virtual)]; |
749 | kunmap_atomic(v); |
750 | spin_unlock(lock); |
751 | |
752 | if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) || |
753 | !(pd->invalid_pte & PSB_PTE_VALID)) { |
754 | ret = -EINVAL; |
755 | goto out; |
756 | } |
757 | ret = 0; |
758 | *pfn = pd->invalid_pte >> PAGE_SHIFT; |
759 | goto out; |
760 | } |
761 | tmp = pt->v[psb_mmu_pt_index(offset: virtual)]; |
762 | if (!(tmp & PSB_PTE_VALID)) { |
763 | ret = -EINVAL; |
764 | } else { |
765 | ret = 0; |
766 | *pfn = tmp >> PAGE_SHIFT; |
767 | } |
768 | psb_mmu_pt_unmap_unlock(pt); |
769 | out: |
770 | up_read(sem: &pd->driver->sem); |
771 | return ret; |
772 | } |
773 | |