1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2005-2007 Cavium Networks
7 */
8#include <linux/export.h>
9#include <linux/kernel.h>
10#include <linux/sched.h>
11#include <linux/smp.h>
12#include <linux/mm.h>
13#include <linux/bitops.h>
14#include <linux/cpu.h>
15#include <linux/io.h>
16
17#include <asm/bcache.h>
18#include <asm/bootinfo.h>
19#include <asm/cacheops.h>
20#include <asm/cpu-features.h>
21#include <asm/cpu-type.h>
22#include <asm/page.h>
23#include <asm/r4kcache.h>
24#include <asm/traps.h>
25#include <asm/mmu_context.h>
26
27#include <asm/octeon/octeon.h>
28
29unsigned long long cache_err_dcache[NR_CPUS];
30EXPORT_SYMBOL_GPL(cache_err_dcache);
31
32/*
33 * Octeon automatically flushes the dcache on tlb changes, so
34 * from Linux's viewpoint it acts much like a physically
35 * tagged cache. No flushing is needed
36 *
37 */
38static void octeon_flush_data_cache_page(unsigned long addr)
39{
40 /* Nothing to do */
41}
42
43static inline void octeon_local_flush_icache(void)
44{
45 asm volatile ("synci 0($0)");
46}
47
48/*
49 * Flush local I-cache for the specified range.
50 */
51static void local_octeon_flush_icache_range(unsigned long start,
52 unsigned long end)
53{
54 octeon_local_flush_icache();
55}
56
57/**
58 * octeon_flush_icache_all_cores - Flush caches as necessary for all cores
59 * affected by a vma. If no vma is supplied, all cores are flushed.
60 *
61 * @vma: VMA to flush or NULL to flush all icaches.
62 */
63static void octeon_flush_icache_all_cores(struct vm_area_struct *vma)
64{
65 extern void octeon_send_ipi_single(int cpu, unsigned int action);
66#ifdef CONFIG_SMP
67 int cpu;
68 cpumask_t mask;
69#endif
70
71 mb();
72 octeon_local_flush_icache();
73#ifdef CONFIG_SMP
74 preempt_disable();
75 cpu = smp_processor_id();
76
77 /*
78 * If we have a vma structure, we only need to worry about
79 * cores it has been used on
80 */
81 if (vma)
82 mask = *mm_cpumask(mm: vma->vm_mm);
83 else
84 mask = *cpu_online_mask;
85 cpumask_clear_cpu(cpu, dstp: &mask);
86#ifdef CONFIG_CAVIUM_OCTEON_SOC
87 for_each_cpu(cpu, &mask)
88 octeon_send_ipi_single(cpu, SMP_ICACHE_FLUSH);
89#else
90 smp_call_function_many(mask: &mask, func: (smp_call_func_t)octeon_local_flush_icache,
91 NULL, wait: 1);
92#endif
93
94 preempt_enable();
95#endif
96}
97
98
99/*
100 * Called to flush the icache on all cores
101 */
102static void octeon_flush_icache_all(void)
103{
104 octeon_flush_icache_all_cores(NULL);
105}
106
107
108/**
109 * octeon_flush_cache_mm - flush all memory associated with a memory context.
110 *
111 * @mm: Memory context to flush
112 */
113static void octeon_flush_cache_mm(struct mm_struct *mm)
114{
115 /*
116 * According to the R4K version of this file, CPUs without
117 * dcache aliases don't need to do anything here
118 */
119}
120
121
122/*
123 * Flush a range of kernel addresses out of the icache
124 *
125 */
126static void octeon_flush_icache_range(unsigned long start, unsigned long end)
127{
128 octeon_flush_icache_all_cores(NULL);
129}
130
131
132/**
133 * octeon_flush_cache_range - Flush a range out of a vma
134 *
135 * @vma: VMA to flush
136 * @start: beginning address for flush
137 * @end: ending address for flush
138 */
139static void octeon_flush_cache_range(struct vm_area_struct *vma,
140 unsigned long start, unsigned long end)
141{
142 if (vma->vm_flags & VM_EXEC)
143 octeon_flush_icache_all_cores(vma);
144}
145
146
147/**
148 * octeon_flush_cache_page - Flush a specific page of a vma
149 *
150 * @vma: VMA to flush page for
151 * @page: Page to flush
152 * @pfn: Page frame number
153 */
154static void octeon_flush_cache_page(struct vm_area_struct *vma,
155 unsigned long page, unsigned long pfn)
156{
157 if (vma->vm_flags & VM_EXEC)
158 octeon_flush_icache_all_cores(vma);
159}
160
161static void octeon_flush_kernel_vmap_range(unsigned long vaddr, int size)
162{
163 BUG();
164}
165
166/*
167 * Probe Octeon's caches
168 *
169 */
170static void probe_octeon(void)
171{
172 unsigned long icache_size;
173 unsigned long dcache_size;
174 unsigned int config1;
175 struct cpuinfo_mips *c = &current_cpu_data;
176 int cputype = current_cpu_type();
177
178 config1 = read_c0_config1();
179 switch (cputype) {
180 case CPU_CAVIUM_OCTEON:
181 case CPU_CAVIUM_OCTEON_PLUS:
182 c->icache.linesz = 2 << ((config1 >> 19) & 7);
183 c->icache.sets = 64 << ((config1 >> 22) & 7);
184 c->icache.ways = 1 + ((config1 >> 16) & 7);
185 c->icache.flags |= MIPS_CACHE_VTAG;
186 icache_size =
187 c->icache.sets * c->icache.ways * c->icache.linesz;
188 c->icache.waybit = ffs(icache_size / c->icache.ways) - 1;
189 c->dcache.linesz = 128;
190 if (cputype == CPU_CAVIUM_OCTEON_PLUS)
191 c->dcache.sets = 2; /* CN5XXX has two Dcache sets */
192 else
193 c->dcache.sets = 1; /* CN3XXX has one Dcache set */
194 c->dcache.ways = 64;
195 dcache_size =
196 c->dcache.sets * c->dcache.ways * c->dcache.linesz;
197 c->dcache.waybit = ffs(dcache_size / c->dcache.ways) - 1;
198 c->options |= MIPS_CPU_PREFETCH;
199 break;
200
201 case CPU_CAVIUM_OCTEON2:
202 c->icache.linesz = 2 << ((config1 >> 19) & 7);
203 c->icache.sets = 8;
204 c->icache.ways = 37;
205 c->icache.flags |= MIPS_CACHE_VTAG;
206 icache_size = c->icache.sets * c->icache.ways * c->icache.linesz;
207
208 c->dcache.linesz = 128;
209 c->dcache.ways = 32;
210 c->dcache.sets = 8;
211 dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz;
212 c->options |= MIPS_CPU_PREFETCH;
213 break;
214
215 case CPU_CAVIUM_OCTEON3:
216 c->icache.linesz = 128;
217 c->icache.sets = 16;
218 c->icache.ways = 39;
219 c->icache.flags |= MIPS_CACHE_VTAG;
220 icache_size = c->icache.sets * c->icache.ways * c->icache.linesz;
221
222 c->dcache.linesz = 128;
223 c->dcache.ways = 32;
224 c->dcache.sets = 8;
225 dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz;
226 c->options |= MIPS_CPU_PREFETCH;
227 break;
228
229 default:
230 panic(fmt: "Unsupported Cavium Networks CPU type");
231 break;
232 }
233
234 /* compute a couple of other cache variables */
235 c->icache.waysize = icache_size / c->icache.ways;
236 c->dcache.waysize = dcache_size / c->dcache.ways;
237
238 c->icache.sets = icache_size / (c->icache.linesz * c->icache.ways);
239 c->dcache.sets = dcache_size / (c->dcache.linesz * c->dcache.ways);
240
241 if (smp_processor_id() == 0) {
242 pr_info("Primary instruction cache %ldkB, %s, %d way, "
243 "%d sets, linesize %d bytes.\n",
244 icache_size >> 10,
245 cpu_has_vtag_icache ?
246 "virtually tagged" : "physically tagged",
247 c->icache.ways, c->icache.sets, c->icache.linesz);
248
249 pr_info("Primary data cache %ldkB, %d-way, %d sets, "
250 "linesize %d bytes.\n",
251 dcache_size >> 10, c->dcache.ways,
252 c->dcache.sets, c->dcache.linesz);
253 }
254}
255
256static void octeon_cache_error_setup(void)
257{
258 extern char except_vec2_octeon;
259 set_handler(0x100, &except_vec2_octeon, 0x80);
260}
261
262/*
263 * Setup the Octeon cache flush routines
264 *
265 */
266void octeon_cache_init(void)
267{
268 probe_octeon();
269
270 shm_align_mask = PAGE_SIZE - 1;
271
272 flush_cache_all = octeon_flush_icache_all;
273 __flush_cache_all = octeon_flush_icache_all;
274 flush_cache_mm = octeon_flush_cache_mm;
275 flush_cache_page = octeon_flush_cache_page;
276 flush_cache_range = octeon_flush_cache_range;
277 flush_icache_all = octeon_flush_icache_all;
278 flush_data_cache_page = octeon_flush_data_cache_page;
279 flush_icache_range = octeon_flush_icache_range;
280 local_flush_icache_range = local_octeon_flush_icache_range;
281 __flush_icache_user_range = octeon_flush_icache_range;
282 __local_flush_icache_user_range = local_octeon_flush_icache_range;
283
284 __flush_kernel_vmap_range = octeon_flush_kernel_vmap_range;
285
286 build_clear_page();
287 build_copy_page();
288
289 board_cache_error_setup = octeon_cache_error_setup;
290}
291
292/*
293 * Handle a cache error exception
294 */
295static RAW_NOTIFIER_HEAD(co_cache_error_chain);
296
297int register_co_cache_error_notifier(struct notifier_block *nb)
298{
299 return raw_notifier_chain_register(nh: &co_cache_error_chain, nb);
300}
301EXPORT_SYMBOL_GPL(register_co_cache_error_notifier);
302
303int unregister_co_cache_error_notifier(struct notifier_block *nb)
304{
305 return raw_notifier_chain_unregister(nh: &co_cache_error_chain, nb);
306}
307EXPORT_SYMBOL_GPL(unregister_co_cache_error_notifier);
308
309static void co_cache_error_call_notifiers(unsigned long val)
310{
311 int rv = raw_notifier_call_chain(nh: &co_cache_error_chain, val, NULL);
312 if ((rv & ~NOTIFY_STOP_MASK) != NOTIFY_OK) {
313 u64 dcache_err;
314 unsigned long coreid = cvmx_get_core_num();
315 u64 icache_err = read_octeon_c0_icacheerr();
316
317 if (val) {
318 dcache_err = cache_err_dcache[coreid];
319 cache_err_dcache[coreid] = 0;
320 } else {
321 dcache_err = read_octeon_c0_dcacheerr();
322 }
323
324 pr_err("Core%lu: Cache error exception:\n", coreid);
325 pr_err("cp0_errorepc == %lx\n", read_c0_errorepc());
326 if (icache_err & 1) {
327 pr_err("CacheErr (Icache) == %llx\n",
328 (unsigned long long)icache_err);
329 write_octeon_c0_icacheerr(0);
330 }
331 if (dcache_err & 1) {
332 pr_err("CacheErr (Dcache) == %llx\n",
333 (unsigned long long)dcache_err);
334 }
335 }
336}
337
338/*
339 * Called when the exception is recoverable
340 */
341
342asmlinkage void cache_parity_error_octeon_recoverable(void)
343{
344 co_cache_error_call_notifiers(val: 0);
345}
346
347/*
348 * Called when the exception is not recoverable
349 */
350
351asmlinkage void cache_parity_error_octeon_non_recoverable(void)
352{
353 co_cache_error_call_notifiers(val: 1);
354 panic(fmt: "Can't handle cache error: nested exception");
355}
356

source code of linux/arch/mips/mm/c-octeon.c