1 | //! System Control Block |
2 | |
3 | use core::ptr; |
4 | |
5 | use volatile_register::RW; |
6 | |
7 | #[cfg (not(armv6m))] |
8 | use super::cpuid::CsselrCacheType; |
9 | #[cfg (not(armv6m))] |
10 | use super::CBP; |
11 | #[cfg (not(armv6m))] |
12 | use super::CPUID; |
13 | use super::SCB; |
14 | #[cfg (feature = "serde" )] |
15 | use serde::{Deserialize, Serialize}; |
16 | |
17 | /// Register block |
18 | #[repr (C)] |
19 | pub struct RegisterBlock { |
20 | /// Interrupt Control and State |
21 | pub icsr: RW<u32>, |
22 | |
23 | /// Vector Table Offset (not present on Cortex-M0 variants) |
24 | pub vtor: RW<u32>, |
25 | |
26 | /// Application Interrupt and Reset Control |
27 | pub aircr: RW<u32>, |
28 | |
29 | /// System Control |
30 | pub scr: RW<u32>, |
31 | |
32 | /// Configuration and Control |
33 | pub ccr: RW<u32>, |
34 | |
35 | /// System Handler Priority (word accessible only on Cortex-M0 variants) |
36 | /// |
37 | /// On ARMv7-M, `shpr[0]` points to SHPR1 |
38 | /// |
39 | /// On ARMv6-M, `shpr[0]` points to SHPR2 |
40 | #[cfg (not(armv6m))] |
41 | pub shpr: [RW<u8>; 12], |
42 | #[cfg (armv6m)] |
43 | _reserved1: u32, |
44 | /// System Handler Priority (word accessible only on Cortex-M0 variants) |
45 | /// |
46 | /// On ARMv7-M, `shpr[0]` points to SHPR1 |
47 | /// |
48 | /// On ARMv6-M, `shpr[0]` points to SHPR2 |
49 | #[cfg (armv6m)] |
50 | pub shpr: [RW<u32>; 2], |
51 | |
52 | /// System Handler Control and State |
53 | pub shcsr: RW<u32>, |
54 | |
55 | /// Configurable Fault Status (not present on Cortex-M0 variants) |
56 | #[cfg (not(armv6m))] |
57 | pub cfsr: RW<u32>, |
58 | #[cfg (armv6m)] |
59 | _reserved2: u32, |
60 | |
61 | /// HardFault Status (not present on Cortex-M0 variants) |
62 | #[cfg (not(armv6m))] |
63 | pub hfsr: RW<u32>, |
64 | #[cfg (armv6m)] |
65 | _reserved3: u32, |
66 | |
67 | /// Debug Fault Status (not present on Cortex-M0 variants) |
68 | #[cfg (not(armv6m))] |
69 | pub dfsr: RW<u32>, |
70 | #[cfg (armv6m)] |
71 | _reserved4: u32, |
72 | |
73 | /// MemManage Fault Address (not present on Cortex-M0 variants) |
74 | #[cfg (not(armv6m))] |
75 | pub mmfar: RW<u32>, |
76 | #[cfg (armv6m)] |
77 | _reserved5: u32, |
78 | |
79 | /// BusFault Address (not present on Cortex-M0 variants) |
80 | #[cfg (not(armv6m))] |
81 | pub bfar: RW<u32>, |
82 | #[cfg (armv6m)] |
83 | _reserved6: u32, |
84 | |
85 | /// Auxiliary Fault Status (not present on Cortex-M0 variants) |
86 | #[cfg (not(armv6m))] |
87 | pub afsr: RW<u32>, |
88 | #[cfg (armv6m)] |
89 | _reserved7: u32, |
90 | |
91 | _reserved8: [u32; 18], |
92 | |
93 | /// Coprocessor Access Control (not present on Cortex-M0 variants) |
94 | #[cfg (not(armv6m))] |
95 | pub cpacr: RW<u32>, |
96 | #[cfg (armv6m)] |
97 | _reserved9: u32, |
98 | } |
99 | |
100 | /// FPU access mode |
101 | #[cfg (has_fpu)] |
102 | #[derive (Clone, Copy, Debug, PartialEq, Eq)] |
103 | pub enum FpuAccessMode { |
104 | /// FPU is not accessible |
105 | Disabled, |
106 | /// FPU is accessible in Privileged and User mode |
107 | Enabled, |
108 | /// FPU is accessible in Privileged mode only |
109 | Privileged, |
110 | } |
111 | |
112 | #[cfg (has_fpu)] |
113 | mod fpu_consts { |
114 | pub const SCB_CPACR_FPU_MASK: u32 = 0b11_11 << 20; |
115 | pub const SCB_CPACR_FPU_ENABLE: u32 = 0b01_01 << 20; |
116 | pub const SCB_CPACR_FPU_USER: u32 = 0b10_10 << 20; |
117 | } |
118 | |
119 | #[cfg (has_fpu)] |
120 | use self::fpu_consts::*; |
121 | |
122 | #[cfg (has_fpu)] |
123 | impl SCB { |
124 | /// Shorthand for `set_fpu_access_mode(FpuAccessMode::Disabled)` |
125 | #[inline ] |
126 | pub fn disable_fpu(&mut self) { |
127 | self.set_fpu_access_mode(FpuAccessMode::Disabled) |
128 | } |
129 | |
130 | /// Shorthand for `set_fpu_access_mode(FpuAccessMode::Enabled)` |
131 | #[inline ] |
132 | pub fn enable_fpu(&mut self) { |
133 | self.set_fpu_access_mode(FpuAccessMode::Enabled) |
134 | } |
135 | |
136 | /// Gets FPU access mode |
137 | #[inline ] |
138 | pub fn fpu_access_mode() -> FpuAccessMode { |
139 | // NOTE(unsafe) atomic read operation with no side effects |
140 | let cpacr = unsafe { (*Self::PTR).cpacr.read() }; |
141 | |
142 | if cpacr & SCB_CPACR_FPU_MASK == SCB_CPACR_FPU_ENABLE | SCB_CPACR_FPU_USER { |
143 | FpuAccessMode::Enabled |
144 | } else if cpacr & SCB_CPACR_FPU_MASK == SCB_CPACR_FPU_ENABLE { |
145 | FpuAccessMode::Privileged |
146 | } else { |
147 | FpuAccessMode::Disabled |
148 | } |
149 | } |
150 | |
151 | /// Sets FPU access mode |
152 | /// |
153 | /// *IMPORTANT* Any function that runs fully or partly with the FPU disabled must *not* take any |
154 | /// floating-point arguments or have any floating-point local variables. Because the compiler |
155 | /// might inline such a function into a caller that does have floating-point arguments or |
156 | /// variables, any such function must be also marked #[inline(never)]. |
157 | #[inline ] |
158 | pub fn set_fpu_access_mode(&mut self, mode: FpuAccessMode) { |
159 | let mut cpacr = self.cpacr.read() & !SCB_CPACR_FPU_MASK; |
160 | match mode { |
161 | FpuAccessMode::Disabled => (), |
162 | FpuAccessMode::Privileged => cpacr |= SCB_CPACR_FPU_ENABLE, |
163 | FpuAccessMode::Enabled => cpacr |= SCB_CPACR_FPU_ENABLE | SCB_CPACR_FPU_USER, |
164 | } |
165 | unsafe { self.cpacr.write(cpacr) } |
166 | } |
167 | } |
168 | |
169 | impl SCB { |
170 | /// Returns the active exception number |
171 | #[inline ] |
172 | pub fn vect_active() -> VectActive { |
173 | let icsr = unsafe { ptr::read(&(*SCB::PTR).icsr as *const _ as *const u32) }; |
174 | |
175 | match icsr as u8 { |
176 | 0 => VectActive::ThreadMode, |
177 | 2 => VectActive::Exception(Exception::NonMaskableInt), |
178 | 3 => VectActive::Exception(Exception::HardFault), |
179 | #[cfg (not(armv6m))] |
180 | 4 => VectActive::Exception(Exception::MemoryManagement), |
181 | #[cfg (not(armv6m))] |
182 | 5 => VectActive::Exception(Exception::BusFault), |
183 | #[cfg (not(armv6m))] |
184 | 6 => VectActive::Exception(Exception::UsageFault), |
185 | #[cfg (any(armv8m, native))] |
186 | 7 => VectActive::Exception(Exception::SecureFault), |
187 | 11 => VectActive::Exception(Exception::SVCall), |
188 | #[cfg (not(armv6m))] |
189 | 12 => VectActive::Exception(Exception::DebugMonitor), |
190 | 14 => VectActive::Exception(Exception::PendSV), |
191 | 15 => VectActive::Exception(Exception::SysTick), |
192 | irqn => VectActive::Interrupt { irqn: irqn - 16 }, |
193 | } |
194 | } |
195 | } |
196 | |
197 | /// Processor core exceptions (internal interrupts) |
198 | #[derive (Clone, Copy, Debug, Eq, PartialEq)] |
199 | #[cfg_attr (feature = "serde" , derive(Serialize, Deserialize))] |
200 | #[cfg_attr (feature = "std" , derive(PartialOrd, Hash))] |
201 | pub enum Exception { |
202 | /// Non maskable interrupt |
203 | NonMaskableInt, |
204 | |
205 | /// Hard fault interrupt |
206 | HardFault, |
207 | |
208 | /// Memory management interrupt (not present on Cortex-M0 variants) |
209 | #[cfg (not(armv6m))] |
210 | MemoryManagement, |
211 | |
212 | /// Bus fault interrupt (not present on Cortex-M0 variants) |
213 | #[cfg (not(armv6m))] |
214 | BusFault, |
215 | |
216 | /// Usage fault interrupt (not present on Cortex-M0 variants) |
217 | #[cfg (not(armv6m))] |
218 | UsageFault, |
219 | |
220 | /// Secure fault interrupt (only on ARMv8-M) |
221 | #[cfg (any(armv8m, native))] |
222 | SecureFault, |
223 | |
224 | /// SV call interrupt |
225 | SVCall, |
226 | |
227 | /// Debug monitor interrupt (not present on Cortex-M0 variants) |
228 | #[cfg (not(armv6m))] |
229 | DebugMonitor, |
230 | |
231 | /// Pend SV interrupt |
232 | PendSV, |
233 | |
234 | /// System Tick interrupt |
235 | SysTick, |
236 | } |
237 | |
238 | impl Exception { |
239 | /// Returns the IRQ number of this `Exception` |
240 | /// |
241 | /// The return value is always within the closed range `[-1, -14]` |
242 | #[inline ] |
243 | pub fn irqn(self) -> i8 { |
244 | match self { |
245 | Exception::NonMaskableInt => -14, |
246 | Exception::HardFault => -13, |
247 | #[cfg (not(armv6m))] |
248 | Exception::MemoryManagement => -12, |
249 | #[cfg (not(armv6m))] |
250 | Exception::BusFault => -11, |
251 | #[cfg (not(armv6m))] |
252 | Exception::UsageFault => -10, |
253 | #[cfg (any(armv8m, native))] |
254 | Exception::SecureFault => -9, |
255 | Exception::SVCall => -5, |
256 | #[cfg (not(armv6m))] |
257 | Exception::DebugMonitor => -4, |
258 | Exception::PendSV => -2, |
259 | Exception::SysTick => -1, |
260 | } |
261 | } |
262 | } |
263 | |
264 | /// Active exception number |
265 | #[derive (Clone, Copy, Debug, Eq, PartialEq)] |
266 | #[cfg_attr (feature = "serde" , derive(Serialize, Deserialize))] |
267 | #[cfg_attr (feature = "std" , derive(PartialOrd, Hash))] |
268 | pub enum VectActive { |
269 | /// Thread mode |
270 | ThreadMode, |
271 | |
272 | /// Processor core exception (internal interrupts) |
273 | Exception(Exception), |
274 | |
275 | /// Device specific exception (external interrupts) |
276 | Interrupt { |
277 | /// Interrupt number. This number is always within half open range `[0, 240)` |
278 | irqn: u8, |
279 | }, |
280 | } |
281 | |
282 | impl VectActive { |
283 | /// Converts a `byte` into `VectActive` |
284 | #[inline ] |
285 | pub fn from(vect_active: u8) -> Option<Self> { |
286 | Some(match vect_active { |
287 | 0 => VectActive::ThreadMode, |
288 | 2 => VectActive::Exception(Exception::NonMaskableInt), |
289 | 3 => VectActive::Exception(Exception::HardFault), |
290 | #[cfg (not(armv6m))] |
291 | 4 => VectActive::Exception(Exception::MemoryManagement), |
292 | #[cfg (not(armv6m))] |
293 | 5 => VectActive::Exception(Exception::BusFault), |
294 | #[cfg (not(armv6m))] |
295 | 6 => VectActive::Exception(Exception::UsageFault), |
296 | #[cfg (any(armv8m, native))] |
297 | 7 => VectActive::Exception(Exception::SecureFault), |
298 | 11 => VectActive::Exception(Exception::SVCall), |
299 | #[cfg (not(armv6m))] |
300 | 12 => VectActive::Exception(Exception::DebugMonitor), |
301 | 14 => VectActive::Exception(Exception::PendSV), |
302 | 15 => VectActive::Exception(Exception::SysTick), |
303 | irqn if irqn >= 16 => VectActive::Interrupt { irqn }, |
304 | _ => return None, |
305 | }) |
306 | } |
307 | } |
308 | |
309 | #[cfg (not(armv6m))] |
310 | mod scb_consts { |
311 | pub const SCB_CCR_IC_MASK: u32 = 1 << 17; |
312 | pub const SCB_CCR_DC_MASK: u32 = 1 << 16; |
313 | } |
314 | |
315 | #[cfg (not(armv6m))] |
316 | use self::scb_consts::*; |
317 | |
318 | #[cfg (not(armv6m))] |
319 | impl SCB { |
320 | /// Enables I-cache if currently disabled. |
321 | /// |
322 | /// This operation first invalidates the entire I-cache. |
323 | #[inline ] |
324 | pub fn enable_icache(&mut self) { |
325 | // Don't do anything if I-cache is already enabled |
326 | if Self::icache_enabled() { |
327 | return; |
328 | } |
329 | |
330 | // NOTE(unsafe): No races as all CBP registers are write-only and stateless |
331 | let mut cbp = unsafe { CBP::new() }; |
332 | |
333 | // Invalidate I-cache |
334 | cbp.iciallu(); |
335 | |
336 | // Enable I-cache |
337 | extern "C" { |
338 | // see asm-v7m.s |
339 | fn __enable_icache(); |
340 | } |
341 | |
342 | // NOTE(unsafe): The asm routine manages exclusive access to the SCB |
343 | // registers and applies the proper barriers; it is technically safe on |
344 | // its own, and is only `unsafe` here because it's `extern "C"`. |
345 | unsafe { |
346 | __enable_icache(); |
347 | } |
348 | } |
349 | |
350 | /// Disables I-cache if currently enabled. |
351 | /// |
352 | /// This operation invalidates the entire I-cache after disabling. |
353 | #[inline ] |
354 | pub fn disable_icache(&mut self) { |
355 | // Don't do anything if I-cache is already disabled |
356 | if !Self::icache_enabled() { |
357 | return; |
358 | } |
359 | |
360 | // NOTE(unsafe): No races as all CBP registers are write-only and stateless |
361 | let mut cbp = unsafe { CBP::new() }; |
362 | |
363 | // Disable I-cache |
364 | // NOTE(unsafe): We have synchronised access by &mut self |
365 | unsafe { self.ccr.modify(|r| r & !SCB_CCR_IC_MASK) }; |
366 | |
367 | // Invalidate I-cache |
368 | cbp.iciallu(); |
369 | |
370 | crate::asm::dsb(); |
371 | crate::asm::isb(); |
372 | } |
373 | |
374 | /// Returns whether the I-cache is currently enabled. |
375 | #[inline (always)] |
376 | pub fn icache_enabled() -> bool { |
377 | crate::asm::dsb(); |
378 | crate::asm::isb(); |
379 | |
380 | // NOTE(unsafe): atomic read with no side effects |
381 | unsafe { (*Self::PTR).ccr.read() & SCB_CCR_IC_MASK == SCB_CCR_IC_MASK } |
382 | } |
383 | |
384 | /// Invalidates the entire I-cache. |
385 | #[inline ] |
386 | pub fn invalidate_icache(&mut self) { |
387 | // NOTE(unsafe): No races as all CBP registers are write-only and stateless |
388 | let mut cbp = unsafe { CBP::new() }; |
389 | |
390 | // Invalidate I-cache |
391 | cbp.iciallu(); |
392 | |
393 | crate::asm::dsb(); |
394 | crate::asm::isb(); |
395 | } |
396 | |
397 | /// Enables D-cache if currently disabled. |
398 | /// |
399 | /// This operation first invalidates the entire D-cache, ensuring it does |
400 | /// not contain stale values before being enabled. |
401 | #[inline ] |
402 | pub fn enable_dcache(&mut self, cpuid: &mut CPUID) { |
403 | // Don't do anything if D-cache is already enabled |
404 | if Self::dcache_enabled() { |
405 | return; |
406 | } |
407 | |
408 | // Invalidate anything currently in the D-cache |
409 | unsafe { self.invalidate_dcache(cpuid) }; |
410 | |
411 | // Now turn on the D-cache |
412 | extern "C" { |
413 | // see asm-v7m.s |
414 | fn __enable_dcache(); |
415 | } |
416 | |
417 | // NOTE(unsafe): The asm routine manages exclusive access to the SCB |
418 | // registers and applies the proper barriers; it is technically safe on |
419 | // its own, and is only `unsafe` here because it's `extern "C"`. |
420 | unsafe { |
421 | __enable_dcache(); |
422 | } |
423 | } |
424 | |
425 | /// Disables D-cache if currently enabled. |
426 | /// |
427 | /// This operation subsequently cleans and invalidates the entire D-cache, |
428 | /// ensuring all contents are safely written back to main memory after disabling. |
429 | #[inline ] |
430 | pub fn disable_dcache(&mut self, cpuid: &mut CPUID) { |
431 | // Don't do anything if D-cache is already disabled |
432 | if !Self::dcache_enabled() { |
433 | return; |
434 | } |
435 | |
436 | // Turn off the D-cache |
437 | // NOTE(unsafe): We have synchronised access by &mut self |
438 | unsafe { self.ccr.modify(|r| r & !SCB_CCR_DC_MASK) }; |
439 | |
440 | // Clean and invalidate whatever was left in it |
441 | self.clean_invalidate_dcache(cpuid); |
442 | } |
443 | |
444 | /// Returns whether the D-cache is currently enabled. |
445 | #[inline ] |
446 | pub fn dcache_enabled() -> bool { |
447 | crate::asm::dsb(); |
448 | crate::asm::isb(); |
449 | |
450 | // NOTE(unsafe) atomic read with no side effects |
451 | unsafe { (*Self::PTR).ccr.read() & SCB_CCR_DC_MASK == SCB_CCR_DC_MASK } |
452 | } |
453 | |
454 | /// Invalidates the entire D-cache. |
455 | /// |
456 | /// Note that calling this while the dcache is enabled will probably wipe out the |
457 | /// stack, depending on optimisations, therefore breaking returning to the call point. |
458 | /// |
459 | /// It's used immediately before enabling the dcache, but not exported publicly. |
460 | #[inline ] |
461 | unsafe fn invalidate_dcache(&mut self, cpuid: &mut CPUID) { |
462 | // NOTE(unsafe): No races as all CBP registers are write-only and stateless |
463 | let mut cbp = CBP::new(); |
464 | |
465 | // Read number of sets and ways |
466 | let (sets, ways) = cpuid.cache_num_sets_ways(0, CsselrCacheType::DataOrUnified); |
467 | |
468 | // Invalidate entire D-cache |
469 | for set in 0..sets { |
470 | for way in 0..ways { |
471 | cbp.dcisw(set, way); |
472 | } |
473 | } |
474 | |
475 | crate::asm::dsb(); |
476 | crate::asm::isb(); |
477 | } |
478 | |
479 | /// Cleans the entire D-cache. |
480 | /// |
481 | /// This function causes everything in the D-cache to be written back to main memory, |
482 | /// overwriting whatever is already there. |
483 | #[inline ] |
484 | pub fn clean_dcache(&mut self, cpuid: &mut CPUID) { |
485 | // NOTE(unsafe): No races as all CBP registers are write-only and stateless |
486 | let mut cbp = unsafe { CBP::new() }; |
487 | |
488 | // Read number of sets and ways |
489 | let (sets, ways) = cpuid.cache_num_sets_ways(0, CsselrCacheType::DataOrUnified); |
490 | |
491 | for set in 0..sets { |
492 | for way in 0..ways { |
493 | cbp.dccsw(set, way); |
494 | } |
495 | } |
496 | |
497 | crate::asm::dsb(); |
498 | crate::asm::isb(); |
499 | } |
500 | |
501 | /// Cleans and invalidates the entire D-cache. |
502 | /// |
503 | /// This function causes everything in the D-cache to be written back to main memory, |
504 | /// and then marks the entire D-cache as invalid, causing future reads to first fetch |
505 | /// from main memory. |
506 | #[inline ] |
507 | pub fn clean_invalidate_dcache(&mut self, cpuid: &mut CPUID) { |
508 | // NOTE(unsafe): No races as all CBP registers are write-only and stateless |
509 | let mut cbp = unsafe { CBP::new() }; |
510 | |
511 | // Read number of sets and ways |
512 | let (sets, ways) = cpuid.cache_num_sets_ways(0, CsselrCacheType::DataOrUnified); |
513 | |
514 | for set in 0..sets { |
515 | for way in 0..ways { |
516 | cbp.dccisw(set, way); |
517 | } |
518 | } |
519 | |
520 | crate::asm::dsb(); |
521 | crate::asm::isb(); |
522 | } |
523 | |
524 | /// Invalidates D-cache by address. |
525 | /// |
526 | /// * `addr`: The address to invalidate, which must be cache-line aligned. |
527 | /// * `size`: Number of bytes to invalidate, which must be a multiple of the cache line size. |
528 | /// |
529 | /// Invalidates D-cache cache lines, starting from the first line containing `addr`, |
530 | /// finishing once at least `size` bytes have been invalidated. |
531 | /// |
532 | /// Invalidation causes the next read access to memory to be fetched from main memory instead |
533 | /// of the cache. |
534 | /// |
535 | /// # Cache Line Sizes |
536 | /// |
537 | /// Cache line sizes vary by core. For all Cortex-M7 cores, the cache line size is fixed |
538 | /// to 32 bytes, which means `addr` must be 32-byte aligned and `size` must be a multiple |
539 | /// of 32. At the time of writing, no other Cortex-M cores have data caches. |
540 | /// |
541 | /// If `addr` is not cache-line aligned, or `size` is not a multiple of the cache line size, |
542 | /// other data before or after the desired memory would also be invalidated, which can very |
543 | /// easily cause memory corruption and undefined behaviour. |
544 | /// |
545 | /// # Safety |
546 | /// |
547 | /// After invalidating, the next read of invalidated data will be from main memory. This may |
548 | /// cause recent writes to be lost, potentially including writes that initialized objects. |
549 | /// Therefore, this method may cause uninitialized memory or invalid values to be read, |
550 | /// resulting in undefined behaviour. You must ensure that main memory contains valid and |
551 | /// initialized values before invalidating. |
552 | /// |
553 | /// `addr` **must** be aligned to the size of the cache lines, and `size` **must** be a |
554 | /// multiple of the cache line size, otherwise this function will invalidate other memory, |
555 | /// easily leading to memory corruption and undefined behaviour. This precondition is checked |
556 | /// in debug builds using a `debug_assert!()`, but not checked in release builds to avoid |
557 | /// a runtime-dependent `panic!()` call. |
558 | #[inline ] |
559 | pub unsafe fn invalidate_dcache_by_address(&mut self, addr: usize, size: usize) { |
560 | // No-op zero sized operations |
561 | if size == 0 { |
562 | return; |
563 | } |
564 | |
565 | // NOTE(unsafe): No races as all CBP registers are write-only and stateless |
566 | let mut cbp = CBP::new(); |
567 | |
568 | // dminline is log2(num words), so 2**dminline * 4 gives size in bytes |
569 | let dminline = CPUID::cache_dminline(); |
570 | let line_size = (1 << dminline) * 4; |
571 | |
572 | debug_assert!((addr & (line_size - 1)) == 0); |
573 | debug_assert!((size & (line_size - 1)) == 0); |
574 | |
575 | crate::asm::dsb(); |
576 | |
577 | // Find number of cache lines to invalidate |
578 | let num_lines = ((size - 1) / line_size) + 1; |
579 | |
580 | // Compute address of first cache line |
581 | let mask = 0xFFFF_FFFF - (line_size - 1); |
582 | let mut addr = addr & mask; |
583 | |
584 | for _ in 0..num_lines { |
585 | cbp.dcimvac(addr as u32); |
586 | addr += line_size; |
587 | } |
588 | |
589 | crate::asm::dsb(); |
590 | crate::asm::isb(); |
591 | } |
592 | |
593 | /// Invalidates an object from the D-cache. |
594 | /// |
595 | /// * `obj`: The object to invalidate. |
596 | /// |
597 | /// Invalidates D-cache starting from the first cache line containing `obj`, |
598 | /// continuing to invalidate cache lines until all of `obj` has been invalidated. |
599 | /// |
600 | /// Invalidation causes the next read access to memory to be fetched from main memory instead |
601 | /// of the cache. |
602 | /// |
603 | /// # Cache Line Sizes |
604 | /// |
605 | /// Cache line sizes vary by core. For all Cortex-M7 cores, the cache line size is fixed |
606 | /// to 32 bytes, which means `obj` must be 32-byte aligned, and its size must be a multiple |
607 | /// of 32 bytes. At the time of writing, no other Cortex-M cores have data caches. |
608 | /// |
609 | /// If `obj` is not cache-line aligned, or its size is not a multiple of the cache line size, |
610 | /// other data before or after the desired memory would also be invalidated, which can very |
611 | /// easily cause memory corruption and undefined behaviour. |
612 | /// |
613 | /// # Safety |
614 | /// |
615 | /// After invalidating, `obj` will be read from main memory on next access. This may cause |
616 | /// recent writes to `obj` to be lost, potentially including the write that initialized it. |
617 | /// Therefore, this method may cause uninitialized memory or invalid values to be read, |
618 | /// resulting in undefined behaviour. You must ensure that main memory contains a valid and |
619 | /// initialized value for T before invalidating `obj`. |
620 | /// |
621 | /// `obj` **must** be aligned to the size of the cache lines, and its size **must** be a |
622 | /// multiple of the cache line size, otherwise this function will invalidate other memory, |
623 | /// easily leading to memory corruption and undefined behaviour. This precondition is checked |
624 | /// in debug builds using a `debug_assert!()`, but not checked in release builds to avoid |
625 | /// a runtime-dependent `panic!()` call. |
626 | #[inline ] |
627 | pub unsafe fn invalidate_dcache_by_ref<T>(&mut self, obj: &mut T) { |
628 | self.invalidate_dcache_by_address(obj as *const T as usize, core::mem::size_of::<T>()); |
629 | } |
630 | |
631 | /// Invalidates a slice from the D-cache. |
632 | /// |
633 | /// * `slice`: The slice to invalidate. |
634 | /// |
635 | /// Invalidates D-cache starting from the first cache line containing members of `slice`, |
636 | /// continuing to invalidate cache lines until all of `slice` has been invalidated. |
637 | /// |
638 | /// Invalidation causes the next read access to memory to be fetched from main memory instead |
639 | /// of the cache. |
640 | /// |
641 | /// # Cache Line Sizes |
642 | /// |
643 | /// Cache line sizes vary by core. For all Cortex-M7 cores, the cache line size is fixed |
644 | /// to 32 bytes, which means `slice` must be 32-byte aligned, and its size must be a multiple |
645 | /// of 32 bytes. At the time of writing, no other Cortex-M cores have data caches. |
646 | /// |
647 | /// If `slice` is not cache-line aligned, or its size is not a multiple of the cache line size, |
648 | /// other data before or after the desired memory would also be invalidated, which can very |
649 | /// easily cause memory corruption and undefined behaviour. |
650 | /// |
651 | /// # Safety |
652 | /// |
653 | /// After invalidating, `slice` will be read from main memory on next access. This may cause |
654 | /// recent writes to `slice` to be lost, potentially including the write that initialized it. |
655 | /// Therefore, this method may cause uninitialized memory or invalid values to be read, |
656 | /// resulting in undefined behaviour. You must ensure that main memory contains valid and |
657 | /// initialized values for T before invalidating `slice`. |
658 | /// |
659 | /// `slice` **must** be aligned to the size of the cache lines, and its size **must** be a |
660 | /// multiple of the cache line size, otherwise this function will invalidate other memory, |
661 | /// easily leading to memory corruption and undefined behaviour. This precondition is checked |
662 | /// in debug builds using a `debug_assert!()`, but not checked in release builds to avoid |
663 | /// a runtime-dependent `panic!()` call. |
664 | #[inline ] |
665 | pub unsafe fn invalidate_dcache_by_slice<T>(&mut self, slice: &mut [T]) { |
666 | self.invalidate_dcache_by_address( |
667 | slice.as_ptr() as usize, |
668 | slice.len() * core::mem::size_of::<T>(), |
669 | ); |
670 | } |
671 | |
672 | /// Cleans D-cache by address. |
673 | /// |
674 | /// * `addr`: The address to start cleaning at. |
675 | /// * `size`: The number of bytes to clean. |
676 | /// |
677 | /// Cleans D-cache cache lines, starting from the first line containing `addr`, |
678 | /// finishing once at least `size` bytes have been invalidated. |
679 | /// |
680 | /// Cleaning the cache causes whatever data is present in the cache to be immediately written |
681 | /// to main memory, overwriting whatever was in main memory. |
682 | /// |
683 | /// # Cache Line Sizes |
684 | /// |
685 | /// Cache line sizes vary by core. For all Cortex-M7 cores, the cache line size is fixed |
686 | /// to 32 bytes, which means `addr` should generally be 32-byte aligned and `size` should be a |
687 | /// multiple of 32. At the time of writing, no other Cortex-M cores have data caches. |
688 | /// |
689 | /// If `addr` is not cache-line aligned, or `size` is not a multiple of the cache line size, |
690 | /// other data before or after the desired memory will also be cleaned. From the point of view |
691 | /// of the core executing this function, memory remains consistent, so this is not unsound, |
692 | /// but is worth knowing about. |
693 | #[inline ] |
694 | pub fn clean_dcache_by_address(&mut self, addr: usize, size: usize) { |
695 | // No-op zero sized operations |
696 | if size == 0 { |
697 | return; |
698 | } |
699 | |
700 | // NOTE(unsafe): No races as all CBP registers are write-only and stateless |
701 | let mut cbp = unsafe { CBP::new() }; |
702 | |
703 | crate::asm::dsb(); |
704 | |
705 | let dminline = CPUID::cache_dminline(); |
706 | let line_size = (1 << dminline) * 4; |
707 | let num_lines = ((size - 1) / line_size) + 1; |
708 | |
709 | let mask = 0xFFFF_FFFF - (line_size - 1); |
710 | let mut addr = addr & mask; |
711 | |
712 | for _ in 0..num_lines { |
713 | cbp.dccmvac(addr as u32); |
714 | addr += line_size; |
715 | } |
716 | |
717 | crate::asm::dsb(); |
718 | crate::asm::isb(); |
719 | } |
720 | |
721 | /// Cleans an object from the D-cache. |
722 | /// |
723 | /// * `obj`: The object to clean. |
724 | /// |
725 | /// Cleans D-cache starting from the first cache line containing `obj`, |
726 | /// continuing to clean cache lines until all of `obj` has been cleaned. |
727 | /// |
728 | /// It is recommended that `obj` is both aligned to the cache line size and a multiple of |
729 | /// the cache line size long, otherwise surrounding data will also be cleaned. |
730 | /// |
731 | /// Cleaning the cache causes whatever data is present in the cache to be immediately written |
732 | /// to main memory, overwriting whatever was in main memory. |
733 | #[inline ] |
734 | pub fn clean_dcache_by_ref<T>(&mut self, obj: &T) { |
735 | self.clean_dcache_by_address(obj as *const T as usize, core::mem::size_of::<T>()); |
736 | } |
737 | |
738 | /// Cleans a slice from D-cache. |
739 | /// |
740 | /// * `slice`: The slice to clean. |
741 | /// |
742 | /// Cleans D-cache starting from the first cache line containing members of `slice`, |
743 | /// continuing to clean cache lines until all of `slice` has been cleaned. |
744 | /// |
745 | /// It is recommended that `slice` is both aligned to the cache line size and a multiple of |
746 | /// the cache line size long, otherwise surrounding data will also be cleaned. |
747 | /// |
748 | /// Cleaning the cache causes whatever data is present in the cache to be immediately written |
749 | /// to main memory, overwriting whatever was in main memory. |
750 | #[inline ] |
751 | pub fn clean_dcache_by_slice<T>(&mut self, slice: &[T]) { |
752 | self.clean_dcache_by_address( |
753 | slice.as_ptr() as usize, |
754 | slice.len() * core::mem::size_of::<T>(), |
755 | ); |
756 | } |
757 | |
758 | /// Cleans and invalidates D-cache by address. |
759 | /// |
760 | /// * `addr`: The address to clean and invalidate. |
761 | /// * `size`: The number of bytes to clean and invalidate. |
762 | /// |
763 | /// Cleans and invalidates D-cache starting from the first cache line containing `addr`, |
764 | /// finishing once at least `size` bytes have been cleaned and invalidated. |
765 | /// |
766 | /// It is recommended that `addr` is aligned to the cache line size and `size` is a multiple of |
767 | /// the cache line size, otherwise surrounding data will also be cleaned. |
768 | /// |
769 | /// Cleaning and invalidating causes data in the D-cache to be written back to main memory, |
770 | /// and then marks that data in the D-cache as invalid, causing future reads to first fetch |
771 | /// from main memory. |
772 | #[inline ] |
773 | pub fn clean_invalidate_dcache_by_address(&mut self, addr: usize, size: usize) { |
774 | // No-op zero sized operations |
775 | if size == 0 { |
776 | return; |
777 | } |
778 | |
779 | // NOTE(unsafe): No races as all CBP registers are write-only and stateless |
780 | let mut cbp = unsafe { CBP::new() }; |
781 | |
782 | crate::asm::dsb(); |
783 | |
784 | // Cache lines are fixed to 32 bit on Cortex-M7 and not present in earlier Cortex-M |
785 | const LINESIZE: usize = 32; |
786 | let num_lines = ((size - 1) / LINESIZE) + 1; |
787 | |
788 | let mut addr = addr & 0xFFFF_FFE0; |
789 | |
790 | for _ in 0..num_lines { |
791 | cbp.dccimvac(addr as u32); |
792 | addr += LINESIZE; |
793 | } |
794 | |
795 | crate::asm::dsb(); |
796 | crate::asm::isb(); |
797 | } |
798 | } |
799 | |
800 | const SCB_SCR_SLEEPDEEP: u32 = 0x1 << 2; |
801 | |
802 | impl SCB { |
803 | /// Set the SLEEPDEEP bit in the SCR register |
804 | #[inline ] |
805 | pub fn set_sleepdeep(&mut self) { |
806 | unsafe { |
807 | self.scr.modify(|scr: u32| scr | SCB_SCR_SLEEPDEEP); |
808 | } |
809 | } |
810 | |
811 | /// Clear the SLEEPDEEP bit in the SCR register |
812 | #[inline ] |
813 | pub fn clear_sleepdeep(&mut self) { |
814 | unsafe { |
815 | self.scr.modify(|scr: u32| scr & !SCB_SCR_SLEEPDEEP); |
816 | } |
817 | } |
818 | } |
819 | |
820 | const SCB_SCR_SLEEPONEXIT: u32 = 0x1 << 1; |
821 | |
822 | impl SCB { |
823 | /// Set the SLEEPONEXIT bit in the SCR register |
824 | #[inline ] |
825 | pub fn set_sleeponexit(&mut self) { |
826 | unsafe { |
827 | self.scr.modify(|scr: u32| scr | SCB_SCR_SLEEPONEXIT); |
828 | } |
829 | } |
830 | |
831 | /// Clear the SLEEPONEXIT bit in the SCR register |
832 | #[inline ] |
833 | pub fn clear_sleeponexit(&mut self) { |
834 | unsafe { |
835 | self.scr.modify(|scr: u32| scr & !SCB_SCR_SLEEPONEXIT); |
836 | } |
837 | } |
838 | } |
839 | |
840 | const SCB_AIRCR_VECTKEY: u32 = 0x05FA << 16; |
841 | const SCB_AIRCR_PRIGROUP_MASK: u32 = 0x7 << 8; |
842 | const SCB_AIRCR_SYSRESETREQ: u32 = 1 << 2; |
843 | |
844 | impl SCB { |
845 | /// Initiate a system reset request to reset the MCU |
846 | #[inline ] |
847 | pub fn sys_reset() -> ! { |
848 | crate::asm::dsb(); |
849 | unsafe { |
850 | (*Self::PTR).aircr.modify( |
851 | |r: u32| { |
852 | SCB_AIRCR_VECTKEY | // otherwise the write is ignored |
853 | r & SCB_AIRCR_PRIGROUP_MASK | // keep priority group unchanged |
854 | SCB_AIRCR_SYSRESETREQ |
855 | }, // set the bit |
856 | ) |
857 | }; |
858 | crate::asm::dsb(); |
859 | loop { |
860 | // wait for the reset |
861 | crate::asm::nop(); // avoid rust-lang/rust#28728 |
862 | } |
863 | } |
864 | } |
865 | |
866 | const SCB_ICSR_PENDSVSET: u32 = 1 << 28; |
867 | const SCB_ICSR_PENDSVCLR: u32 = 1 << 27; |
868 | |
869 | const SCB_ICSR_PENDSTSET: u32 = 1 << 26; |
870 | const SCB_ICSR_PENDSTCLR: u32 = 1 << 25; |
871 | |
872 | impl SCB { |
873 | /// Set the PENDSVSET bit in the ICSR register which will pend the PendSV interrupt |
874 | #[inline ] |
875 | pub fn set_pendsv() { |
876 | unsafe { |
877 | (*Self::PTR).icsr.write(SCB_ICSR_PENDSVSET); |
878 | } |
879 | } |
880 | |
881 | /// Check if PENDSVSET bit in the ICSR register is set meaning PendSV interrupt is pending |
882 | #[inline ] |
883 | pub fn is_pendsv_pending() -> bool { |
884 | unsafe { (*Self::PTR).icsr.read() & SCB_ICSR_PENDSVSET == SCB_ICSR_PENDSVSET } |
885 | } |
886 | |
887 | /// Set the PENDSVCLR bit in the ICSR register which will clear a pending PendSV interrupt |
888 | #[inline ] |
889 | pub fn clear_pendsv() { |
890 | unsafe { |
891 | (*Self::PTR).icsr.write(SCB_ICSR_PENDSVCLR); |
892 | } |
893 | } |
894 | |
895 | /// Set the PENDSTSET bit in the ICSR register which will pend a SysTick interrupt |
896 | #[inline ] |
897 | pub fn set_pendst() { |
898 | unsafe { |
899 | (*Self::PTR).icsr.write(SCB_ICSR_PENDSTSET); |
900 | } |
901 | } |
902 | |
903 | /// Check if PENDSTSET bit in the ICSR register is set meaning SysTick interrupt is pending |
904 | #[inline ] |
905 | pub fn is_pendst_pending() -> bool { |
906 | unsafe { (*Self::PTR).icsr.read() & SCB_ICSR_PENDSTSET == SCB_ICSR_PENDSTSET } |
907 | } |
908 | |
909 | /// Set the PENDSTCLR bit in the ICSR register which will clear a pending SysTick interrupt |
910 | #[inline ] |
911 | pub fn clear_pendst() { |
912 | unsafe { |
913 | (*Self::PTR).icsr.write(SCB_ICSR_PENDSTCLR); |
914 | } |
915 | } |
916 | } |
917 | |
918 | /// System handlers, exceptions with configurable priority |
919 | #[derive (Clone, Copy, Debug, Eq, PartialEq)] |
920 | #[repr (u8)] |
921 | pub enum SystemHandler { |
922 | // NonMaskableInt, // priority is fixed |
923 | // HardFault, // priority is fixed |
924 | /// Memory management interrupt (not present on Cortex-M0 variants) |
925 | #[cfg (not(armv6m))] |
926 | MemoryManagement = 4, |
927 | |
928 | /// Bus fault interrupt (not present on Cortex-M0 variants) |
929 | #[cfg (not(armv6m))] |
930 | BusFault = 5, |
931 | |
932 | /// Usage fault interrupt (not present on Cortex-M0 variants) |
933 | #[cfg (not(armv6m))] |
934 | UsageFault = 6, |
935 | |
936 | /// Secure fault interrupt (only on ARMv8-M) |
937 | #[cfg (any(armv8m, native))] |
938 | SecureFault = 7, |
939 | |
940 | /// SV call interrupt |
941 | SVCall = 11, |
942 | |
943 | /// Debug monitor interrupt (not present on Cortex-M0 variants) |
944 | #[cfg (not(armv6m))] |
945 | DebugMonitor = 12, |
946 | |
947 | /// Pend SV interrupt |
948 | PendSV = 14, |
949 | |
950 | /// System Tick interrupt |
951 | SysTick = 15, |
952 | } |
953 | |
954 | impl SCB { |
955 | /// Returns the hardware priority of `system_handler` |
956 | /// |
957 | /// *NOTE*: Hardware priority does not exactly match logical priority levels. See |
958 | /// [`NVIC.get_priority`](struct.NVIC.html#method.get_priority) for more details. |
959 | #[inline ] |
960 | pub fn get_priority(system_handler: SystemHandler) -> u8 { |
961 | let index = system_handler as u8; |
962 | |
963 | #[cfg (not(armv6m))] |
964 | { |
965 | // NOTE(unsafe) atomic read with no side effects |
966 | |
967 | // NOTE(unsafe): Index is bounded to [4,15] by SystemHandler design. |
968 | // TODO: Review it after rust-lang/rust/issues/13926 will be fixed. |
969 | let priority_ref = unsafe { (*Self::PTR).shpr.get_unchecked(usize::from(index - 4)) }; |
970 | |
971 | priority_ref.read() |
972 | } |
973 | |
974 | #[cfg (armv6m)] |
975 | { |
976 | // NOTE(unsafe) atomic read with no side effects |
977 | |
978 | // NOTE(unsafe): Index is bounded to [11,15] by SystemHandler design. |
979 | // TODO: Review it after rust-lang/rust/issues/13926 will be fixed. |
980 | let priority_ref = unsafe { |
981 | (*Self::PTR) |
982 | .shpr |
983 | .get_unchecked(usize::from((index - 8) / 4)) |
984 | }; |
985 | |
986 | let shpr = priority_ref.read(); |
987 | let prio = (shpr >> (8 * (index % 4))) & 0x0000_00ff; |
988 | prio as u8 |
989 | } |
990 | } |
991 | |
992 | /// Sets the hardware priority of `system_handler` to `prio` |
993 | /// |
994 | /// *NOTE*: Hardware priority does not exactly match logical priority levels. See |
995 | /// [`NVIC.get_priority`](struct.NVIC.html#method.get_priority) for more details. |
996 | /// |
997 | /// On ARMv6-M, updating a system handler priority requires a read-modify-write operation. On |
998 | /// ARMv7-M, the operation is performed in a single, atomic write operation. |
999 | /// |
1000 | /// # Unsafety |
1001 | /// |
1002 | /// Changing priority levels can break priority-based critical sections (see |
1003 | /// [`register::basepri`](crate::register::basepri)) and compromise memory safety. |
1004 | #[inline ] |
1005 | pub unsafe fn set_priority(&mut self, system_handler: SystemHandler, prio: u8) { |
1006 | let index = system_handler as u8; |
1007 | |
1008 | #[cfg (not(armv6m))] |
1009 | { |
1010 | // NOTE(unsafe): Index is bounded to [4,15] by SystemHandler design. |
1011 | // TODO: Review it after rust-lang/rust/issues/13926 will be fixed. |
1012 | let priority_ref = (*Self::PTR).shpr.get_unchecked(usize::from(index - 4)); |
1013 | |
1014 | priority_ref.write(prio) |
1015 | } |
1016 | |
1017 | #[cfg (armv6m)] |
1018 | { |
1019 | // NOTE(unsafe): Index is bounded to [11,15] by SystemHandler design. |
1020 | // TODO: Review it after rust-lang/rust/issues/13926 will be fixed. |
1021 | let priority_ref = (*Self::PTR) |
1022 | .shpr |
1023 | .get_unchecked(usize::from((index - 8) / 4)); |
1024 | |
1025 | priority_ref.modify(|value| { |
1026 | let shift = 8 * (index % 4); |
1027 | let mask = 0x0000_00ff << shift; |
1028 | let prio = u32::from(prio) << shift; |
1029 | |
1030 | (value & !mask) | prio |
1031 | }); |
1032 | } |
1033 | } |
1034 | |
1035 | /// Return the bit position of the exception enable bit in the SHCSR register |
1036 | #[inline ] |
1037 | #[cfg (not(any(armv6m, armv8m_base)))] |
1038 | fn shcsr_enable_shift(exception: Exception) -> Option<u32> { |
1039 | match exception { |
1040 | Exception::MemoryManagement => Some(16), |
1041 | Exception::BusFault => Some(17), |
1042 | Exception::UsageFault => Some(18), |
1043 | #[cfg (armv8m_main)] |
1044 | Exception::SecureFault => Some(19), |
1045 | _ => None, |
1046 | } |
1047 | } |
1048 | |
1049 | /// Enable the exception |
1050 | /// |
1051 | /// If the exception is enabled, when the exception is triggered, the exception handler will be executed instead of the |
1052 | /// HardFault handler. |
1053 | /// This function is only allowed on the following exceptions: |
1054 | /// * `MemoryManagement` |
1055 | /// * `BusFault` |
1056 | /// * `UsageFault` |
1057 | /// * `SecureFault` (can only be enabled from Secure state) |
1058 | /// |
1059 | /// Calling this function with any other exception will do nothing. |
1060 | #[inline ] |
1061 | #[cfg (not(any(armv6m, armv8m_base)))] |
1062 | pub fn enable(&mut self, exception: Exception) { |
1063 | if let Some(shift) = SCB::shcsr_enable_shift(exception) { |
1064 | // The mutable reference to SCB makes sure that only this code is currently modifying |
1065 | // the register. |
1066 | unsafe { self.shcsr.modify(|value| value | (1 << shift)) } |
1067 | } |
1068 | } |
1069 | |
1070 | /// Disable the exception |
1071 | /// |
1072 | /// If the exception is disabled, when the exception is triggered, the HardFault handler will be executed instead of the |
1073 | /// exception handler. |
1074 | /// This function is only allowed on the following exceptions: |
1075 | /// * `MemoryManagement` |
1076 | /// * `BusFault` |
1077 | /// * `UsageFault` |
1078 | /// * `SecureFault` (can not be changed from Non-secure state) |
1079 | /// |
1080 | /// Calling this function with any other exception will do nothing. |
1081 | #[inline ] |
1082 | #[cfg (not(any(armv6m, armv8m_base)))] |
1083 | pub fn disable(&mut self, exception: Exception) { |
1084 | if let Some(shift) = SCB::shcsr_enable_shift(exception) { |
1085 | // The mutable reference to SCB makes sure that only this code is currently modifying |
1086 | // the register. |
1087 | unsafe { self.shcsr.modify(|value| value & !(1 << shift)) } |
1088 | } |
1089 | } |
1090 | |
1091 | /// Check if an exception is enabled |
1092 | /// |
1093 | /// This function is only allowed on the following exception: |
1094 | /// * `MemoryManagement` |
1095 | /// * `BusFault` |
1096 | /// * `UsageFault` |
1097 | /// * `SecureFault` (can not be read from Non-secure state) |
1098 | /// |
1099 | /// Calling this function with any other exception will read `false`. |
1100 | #[inline ] |
1101 | #[cfg (not(any(armv6m, armv8m_base)))] |
1102 | pub fn is_enabled(&self, exception: Exception) -> bool { |
1103 | if let Some(shift) = SCB::shcsr_enable_shift(exception) { |
1104 | (self.shcsr.read() & (1 << shift)) > 0 |
1105 | } else { |
1106 | false |
1107 | } |
1108 | } |
1109 | } |
1110 | |