1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * linux/arch/arm/mm/arm940.S: utility functions for ARM940T
4 *
5 * Copyright (C) 2004-2006 Hyok S. Choi (hyok.choi@samsung.com)
6 */
7#include <linux/linkage.h>
8#include <linux/init.h>
9#include <linux/pgtable.h>
10#include <asm/assembler.h>
11#include <asm/hwcap.h>
12#include <asm/pgtable-hwdef.h>
13#include <asm/ptrace.h>
14#include "proc-macros.S"
15
16/* ARM940T has a 4KB DCache comprising 256 lines of 4 words */
17#define CACHE_DLINESIZE 16
18#define CACHE_DSEGMENTS 4
19#define CACHE_DENTRIES 64
20
21 .text
22/*
23 * cpu_arm940_proc_init()
24 * cpu_arm940_switch_mm()
25 *
26 * These are not required.
27 */
28ENTRY(cpu_arm940_proc_init)
29ENTRY(cpu_arm940_switch_mm)
30 ret lr
31
32/*
33 * cpu_arm940_proc_fin()
34 */
35ENTRY(cpu_arm940_proc_fin)
36 mrc p15, 0, r0, c1, c0, 0 @ ctrl register
37 bic r0, r0, #0x00001000 @ i-cache
38 bic r0, r0, #0x00000004 @ d-cache
39 mcr p15, 0, r0, c1, c0, 0 @ disable caches
40 ret lr
41
42/*
43 * cpu_arm940_reset(loc)
44 * Params : r0 = address to jump to
45 * Notes : This sets up everything for a reset
46 */
47 .pushsection .idmap.text, "ax"
48ENTRY(cpu_arm940_reset)
49 mov ip, #0
50 mcr p15, 0, ip, c7, c5, 0 @ flush I cache
51 mcr p15, 0, ip, c7, c6, 0 @ flush D cache
52 mcr p15, 0, ip, c7, c10, 4 @ drain WB
53 mrc p15, 0, ip, c1, c0, 0 @ ctrl register
54 bic ip, ip, #0x00000005 @ .............c.p
55 bic ip, ip, #0x00001000 @ i-cache
56 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
57 ret r0
58ENDPROC(cpu_arm940_reset)
59 .popsection
60
61/*
62 * cpu_arm940_do_idle()
63 */
64 .align 5
65ENTRY(cpu_arm940_do_idle)
66 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
67 ret lr
68
69/*
70 * flush_icache_all()
71 *
72 * Unconditionally clean and invalidate the entire icache.
73 */
74ENTRY(arm940_flush_icache_all)
75 mov r0, #0
76 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
77 ret lr
78ENDPROC(arm940_flush_icache_all)
79
80/*
81 * flush_user_cache_all()
82 */
83ENTRY(arm940_flush_user_cache_all)
84 /* FALLTHROUGH */
85
86/*
87 * flush_kern_cache_all()
88 *
89 * Clean and invalidate the entire cache.
90 */
91ENTRY(arm940_flush_kern_cache_all)
92 mov r2, #VM_EXEC
93 /* FALLTHROUGH */
94
95/*
96 * flush_user_cache_range(start, end, flags)
97 *
98 * There is no efficient way to flush a range of cache entries
99 * in the specified address range. Thus, flushes all.
100 *
101 * - start - start address (inclusive)
102 * - end - end address (exclusive)
103 * - flags - vm_flags describing address space
104 */
105ENTRY(arm940_flush_user_cache_range)
106 mov ip, #0
107#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
108 mcr p15, 0, ip, c7, c6, 0 @ flush D cache
109#else
110 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
1111: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
1122: mcr p15, 0, r3, c7, c14, 2 @ clean/flush D index
113 subs r3, r3, #1 << 26
114 bcs 2b @ entries 63 to 0
115 subs r1, r1, #1 << 4
116 bcs 1b @ segments 3 to 0
117#endif
118 tst r2, #VM_EXEC
119 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
120 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
121 ret lr
122
123/*
124 * coherent_kern_range(start, end)
125 *
126 * Ensure coherency between the Icache and the Dcache in the
127 * region described by start, end. If you have non-snooping
128 * Harvard caches, you need to implement this function.
129 *
130 * - start - virtual start address
131 * - end - virtual end address
132 */
133ENTRY(arm940_coherent_kern_range)
134 /* FALLTHROUGH */
135
136/*
137 * coherent_user_range(start, end)
138 *
139 * Ensure coherency between the Icache and the Dcache in the
140 * region described by start, end. If you have non-snooping
141 * Harvard caches, you need to implement this function.
142 *
143 * - start - virtual start address
144 * - end - virtual end address
145 */
146ENTRY(arm940_coherent_user_range)
147 /* FALLTHROUGH */
148
149/*
150 * flush_kern_dcache_area(void *addr, size_t size)
151 *
152 * Ensure no D cache aliasing occurs, either with itself or
153 * the I cache
154 *
155 * - addr - kernel address
156 * - size - region size
157 */
158ENTRY(arm940_flush_kern_dcache_area)
159 mov r0, #0
160 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
1611: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
1622: mcr p15, 0, r3, c7, c14, 2 @ clean/flush D index
163 subs r3, r3, #1 << 26
164 bcs 2b @ entries 63 to 0
165 subs r1, r1, #1 << 4
166 bcs 1b @ segments 7 to 0
167 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
168 mcr p15, 0, r0, c7, c10, 4 @ drain WB
169 ret lr
170
171/*
172 * dma_inv_range(start, end)
173 *
174 * There is no efficient way to invalidate a specifid virtual
175 * address range. Thus, invalidates all.
176 *
177 * - start - virtual start address
178 * - end - virtual end address
179 */
180arm940_dma_inv_range:
181 mov ip, #0
182 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
1831: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
1842: mcr p15, 0, r3, c7, c6, 2 @ flush D entry
185 subs r3, r3, #1 << 26
186 bcs 2b @ entries 63 to 0
187 subs r1, r1, #1 << 4
188 bcs 1b @ segments 7 to 0
189 mcr p15, 0, ip, c7, c10, 4 @ drain WB
190 ret lr
191
192/*
193 * dma_clean_range(start, end)
194 *
195 * There is no efficient way to clean a specifid virtual
196 * address range. Thus, cleans all.
197 *
198 * - start - virtual start address
199 * - end - virtual end address
200 */
201arm940_dma_clean_range:
202ENTRY(cpu_arm940_dcache_clean_area)
203 mov ip, #0
204#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
205 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
2061: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
2072: mcr p15, 0, r3, c7, c10, 2 @ clean D entry
208 subs r3, r3, #1 << 26
209 bcs 2b @ entries 63 to 0
210 subs r1, r1, #1 << 4
211 bcs 1b @ segments 7 to 0
212#endif
213 mcr p15, 0, ip, c7, c10, 4 @ drain WB
214 ret lr
215
216/*
217 * dma_flush_range(start, end)
218 *
219 * There is no efficient way to clean and invalidate a specifid
220 * virtual address range.
221 *
222 * - start - virtual start address
223 * - end - virtual end address
224 */
225ENTRY(arm940_dma_flush_range)
226 mov ip, #0
227 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
2281: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
2292:
230#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
231 mcr p15, 0, r3, c7, c14, 2 @ clean/flush D entry
232#else
233 mcr p15, 0, r3, c7, c6, 2 @ invalidate D entry
234#endif
235 subs r3, r3, #1 << 26
236 bcs 2b @ entries 63 to 0
237 subs r1, r1, #1 << 4
238 bcs 1b @ segments 7 to 0
239 mcr p15, 0, ip, c7, c10, 4 @ drain WB
240 ret lr
241
242/*
243 * dma_map_area(start, size, dir)
244 * - start - kernel virtual start address
245 * - size - size of region
246 * - dir - DMA direction
247 */
248ENTRY(arm940_dma_map_area)
249 add r1, r1, r0
250 cmp r2, #DMA_TO_DEVICE
251 beq arm940_dma_clean_range
252 bcs arm940_dma_inv_range
253 b arm940_dma_flush_range
254ENDPROC(arm940_dma_map_area)
255
256/*
257 * dma_unmap_area(start, size, dir)
258 * - start - kernel virtual start address
259 * - size - size of region
260 * - dir - DMA direction
261 */
262ENTRY(arm940_dma_unmap_area)
263 ret lr
264ENDPROC(arm940_dma_unmap_area)
265
266 .globl arm940_flush_kern_cache_louis
267 .equ arm940_flush_kern_cache_louis, arm940_flush_kern_cache_all
268
269 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
270 define_cache_functions arm940
271
272 .type __arm940_setup, #function
273__arm940_setup:
274 mov r0, #0
275 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
276 mcr p15, 0, r0, c7, c6, 0 @ invalidate D cache
277 mcr p15, 0, r0, c7, c10, 4 @ drain WB
278
279 mcr p15, 0, r0, c6, c3, 0 @ disable data area 3~7
280 mcr p15, 0, r0, c6, c4, 0
281 mcr p15, 0, r0, c6, c5, 0
282 mcr p15, 0, r0, c6, c6, 0
283 mcr p15, 0, r0, c6, c7, 0
284
285 mcr p15, 0, r0, c6, c3, 1 @ disable instruction area 3~7
286 mcr p15, 0, r0, c6, c4, 1
287 mcr p15, 0, r0, c6, c5, 1
288 mcr p15, 0, r0, c6, c6, 1
289 mcr p15, 0, r0, c6, c7, 1
290
291 mov r0, #0x0000003F @ base = 0, size = 4GB
292 mcr p15, 0, r0, c6, c0, 0 @ set area 0, default
293 mcr p15, 0, r0, c6, c0, 1
294
295 ldr r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM
296 ldr r7, =CONFIG_DRAM_SIZE >> 12 @ size of RAM (must be >= 4KB)
297 pr_val r3, r0, r7, #1
298 mcr p15, 0, r3, c6, c1, 0 @ set area 1, RAM
299 mcr p15, 0, r3, c6, c1, 1
300
301 ldr r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH
302 ldr r7, =CONFIG_FLASH_SIZE @ size of FLASH (must be >= 4KB)
303 pr_val r3, r0, r6, #1
304 mcr p15, 0, r3, c6, c2, 0 @ set area 2, ROM/FLASH
305 mcr p15, 0, r3, c6, c2, 1
306
307 mov r0, #0x06
308 mcr p15, 0, r0, c2, c0, 0 @ Region 1&2 cacheable
309 mcr p15, 0, r0, c2, c0, 1
310#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
311 mov r0, #0x00 @ disable whole write buffer
312#else
313 mov r0, #0x02 @ Region 1 write bufferred
314#endif
315 mcr p15, 0, r0, c3, c0, 0
316
317 mov r0, #0x10000
318 sub r0, r0, #1 @ r0 = 0xffff
319 mcr p15, 0, r0, c5, c0, 0 @ all read/write access
320 mcr p15, 0, r0, c5, c0, 1
321
322 mrc p15, 0, r0, c1, c0 @ get control register
323 orr r0, r0, #0x00001000 @ I-cache
324 orr r0, r0, #0x00000005 @ MPU/D-cache
325
326 ret lr
327
328 .size __arm940_setup, . - __arm940_setup
329
330 __INITDATA
331
332 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
333 define_processor_functions arm940, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1
334
335 .section ".rodata"
336
337 string cpu_arch_name, "armv4t"
338 string cpu_elf_name, "v4"
339 string cpu_arm940_name, "ARM940T"
340
341 .align
342
343 .section ".proc.info.init", "a"
344
345 .type __arm940_proc_info,#object
346__arm940_proc_info:
347 .long 0x41009400
348 .long 0xff00fff0
349 .long 0
350 initfn __arm940_setup, __arm940_proc_info
351 .long cpu_arch_name
352 .long cpu_elf_name
353 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
354 .long cpu_arm940_name
355 .long arm940_processor_functions
356 .long 0
357 .long 0
358 .long arm940_cache_fns
359 .size __arm940_proc_info, . - __arm940_proc_info
360
361

source code of linux/arch/arm/mm/proc-arm940.S