1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
2 | /* |
3 | * linux/arch/arm/mm/tlbv4.S |
4 | * |
5 | * Copyright (C) 1997-2002 Russell King |
6 | * |
7 | * ARM architecture version 4 TLB handling functions. |
8 | * These assume a split I/D TLBs, and no write buffer. |
9 | * |
10 | * Processors: ARM720T |
11 | */ |
12 | #include <linux/linkage.h> |
13 | #include <linux/init.h> |
14 | #include <asm/assembler.h> |
15 | #include <asm/asm-offsets.h> |
16 | #include <asm/tlbflush.h> |
17 | #include "proc-macros.S" |
18 | |
19 | .align 5 |
20 | /* |
21 | * v4_flush_user_tlb_range(start, end, mm) |
22 | * |
23 | * Invalidate a range of TLB entries in the specified user address space. |
24 | * |
25 | * - start - range start address |
26 | * - end - range end address |
27 | * - mm - mm_struct describing address space |
28 | */ |
29 | .align 5 |
30 | ENTRY(v4_flush_user_tlb_range) |
31 | vma_vm_mm ip, r2 |
32 | act_mm r3 @ get current->active_mm |
33 | eors r3, ip, r3 @ == mm ? |
34 | retne lr @ no, we dont do anything |
35 | .v4_flush_kern_tlb_range: |
36 | bic r0, r0, #0x0ff |
37 | bic r0, r0, #0xf00 |
38 | 1: mcr p15, 0, r0, c8, c7, 1 @ invalidate TLB entry |
39 | add r0, r0, #PAGE_SZ |
40 | cmp r0, r1 |
41 | blo 1b |
42 | ret lr |
43 | |
44 | /* |
45 | * v4_flush_kern_tlb_range(start, end) |
46 | * |
47 | * Invalidate a range of TLB entries in the specified kernel |
48 | * address range. |
49 | * |
50 | * - start - virtual address (may not be aligned) |
51 | * - end - virtual address (may not be aligned) |
52 | */ |
53 | .globl v4_flush_kern_tlb_range |
54 | .equ v4_flush_kern_tlb_range, .v4_flush_kern_tlb_range |
55 | |
56 | __INITDATA |
57 | |
58 | /* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */ |
59 | define_tlb_functions v4, v4_tlb_flags |
60 | |