1//! Data Watchpoint and Trace unit
2
3#[cfg(not(armv6m))]
4use volatile_register::WO;
5use volatile_register::{RO, RW};
6
7use crate::peripheral::DWT;
8
9/// Register block
10#[repr(C)]
11pub struct RegisterBlock {
12 /// Control
13 pub ctrl: RW<u32>,
14 /// Cycle Count
15 #[cfg(not(armv6m))]
16 pub cyccnt: RW<u32>,
17 /// CPI Count
18 #[cfg(not(armv6m))]
19 pub cpicnt: RW<u32>,
20 /// Exception Overhead Count
21 #[cfg(not(armv6m))]
22 pub exccnt: RW<u32>,
23 /// Sleep Count
24 #[cfg(not(armv6m))]
25 pub sleepcnt: RW<u32>,
26 /// LSU Count
27 #[cfg(not(armv6m))]
28 pub lsucnt: RW<u32>,
29 /// Folded-instruction Count
30 #[cfg(not(armv6m))]
31 pub foldcnt: RW<u32>,
32 /// Cortex-M0(+) does not have these parts
33 #[cfg(armv6m)]
34 reserved: [u32; 6],
35 /// Program Counter Sample
36 pub pcsr: RO<u32>,
37 /// Comparators
38 #[cfg(armv6m)]
39 pub c: [Comparator; 2],
40 #[cfg(not(armv6m))]
41 /// Comparators
42 pub c: [Comparator; 16],
43 #[cfg(not(armv6m))]
44 reserved: [u32; 932],
45 /// Lock Access
46 #[cfg(not(armv6m))]
47 pub lar: WO<u32>,
48 /// Lock Status
49 #[cfg(not(armv6m))]
50 pub lsr: RO<u32>,
51}
52
53/// Comparator
54#[repr(C)]
55pub struct Comparator {
56 /// Comparator
57 pub comp: RW<u32>,
58 /// Comparator Mask
59 pub mask: RW<u32>,
60 /// Comparator Function
61 pub function: RW<u32>,
62 reserved: u32,
63}
64
65// DWT CTRL register fields
66const NUMCOMP_OFFSET: u32 = 28;
67const NOTRCPKT: u32 = 1 << 27;
68const NOEXTTRIG: u32 = 1 << 26;
69const NOCYCCNT: u32 = 1 << 25;
70const NOPRFCNT: u32 = 1 << 24;
71const CYCCNTENA: u32 = 1 << 0;
72
73impl DWT {
74 /// Number of comparators implemented
75 ///
76 /// A value of zero indicates no comparator support.
77 #[inline]
78 pub fn num_comp() -> u8 {
79 // NOTE(unsafe) atomic read with no side effects
80 unsafe { ((*Self::PTR).ctrl.read() >> NUMCOMP_OFFSET) as u8 }
81 }
82
83 /// Returns `true` if the the implementation supports sampling and exception tracing
84 #[cfg(not(armv6m))]
85 #[inline]
86 pub fn has_exception_trace() -> bool {
87 // NOTE(unsafe) atomic read with no side effects
88 unsafe { (*Self::PTR).ctrl.read() & NOTRCPKT == 0 }
89 }
90
91 /// Returns `true` if the implementation includes external match signals
92 #[cfg(not(armv6m))]
93 #[inline]
94 pub fn has_external_match() -> bool {
95 // NOTE(unsafe) atomic read with no side effects
96 unsafe { (*Self::PTR).ctrl.read() & NOEXTTRIG == 0 }
97 }
98
99 /// Returns `true` if the implementation supports a cycle counter
100 #[cfg(not(armv6m))]
101 #[inline]
102 pub fn has_cycle_counter() -> bool {
103 // NOTE(unsafe) atomic read with no side effects
104 unsafe { (*Self::PTR).ctrl.read() & NOCYCCNT == 0 }
105 }
106
107 /// Returns `true` if the implementation the profiling counters
108 #[cfg(not(armv6m))]
109 #[inline]
110 pub fn has_profiling_counter() -> bool {
111 // NOTE(unsafe) atomic read with no side effects
112 unsafe { (*Self::PTR).ctrl.read() & NOPRFCNT == 0 }
113 }
114
115 /// Enables the cycle counter
116 ///
117 /// The global trace enable ([`DCB::enable_trace`]) should be set before
118 /// enabling the cycle counter, the processor may ignore writes to the
119 /// cycle counter enable if the global trace is disabled
120 /// (implementation defined behaviour).
121 ///
122 /// [`DCB::enable_trace`]: crate::peripheral::DCB::enable_trace
123 #[cfg(not(armv6m))]
124 #[inline]
125 pub fn enable_cycle_counter(&mut self) {
126 unsafe { self.ctrl.modify(|r| r | CYCCNTENA) }
127 }
128
129 /// Disables the cycle counter
130 #[cfg(not(armv6m))]
131 #[inline]
132 pub fn disable_cycle_counter(&mut self) {
133 unsafe { self.ctrl.modify(|r| r & !CYCCNTENA) }
134 }
135
136 /// Returns `true` if the cycle counter is enabled
137 #[cfg(not(armv6m))]
138 #[inline]
139 pub fn cycle_counter_enabled() -> bool {
140 // NOTE(unsafe) atomic read with no side effects
141 unsafe { (*Self::PTR).ctrl.read() & CYCCNTENA != 0 }
142 }
143
144 /// Returns the current clock cycle count
145 #[cfg(not(armv6m))]
146 #[inline]
147 #[deprecated(
148 since = "0.7.4",
149 note = "Use `cycle_count` which follows the C-GETTER convention"
150 )]
151 pub fn get_cycle_count() -> u32 {
152 Self::cycle_count()
153 }
154
155 /// Returns the current clock cycle count
156 #[cfg(not(armv6m))]
157 #[inline]
158 pub fn cycle_count() -> u32 {
159 // NOTE(unsafe) atomic read with no side effects
160 unsafe { (*Self::PTR).cyccnt.read() }
161 }
162
163 /// Set the cycle count
164 #[cfg(not(armv6m))]
165 #[inline]
166 pub fn set_cycle_count(&mut self, count: u32) {
167 unsafe { self.cyccnt.write(count) }
168 }
169
170 /// Removes the software lock on the DWT
171 ///
172 /// Some devices, like the STM32F7, software lock the DWT after a power cycle.
173 #[cfg(not(armv6m))]
174 #[inline]
175 pub fn unlock() {
176 // NOTE(unsafe) atomic write to a stateless, write-only register
177 unsafe { (*Self::PTR).lar.write(0xC5AC_CE55) }
178 }
179
180 /// Get the CPI count
181 ///
182 /// Counts additional cycles required to execute multi-cycle instructions,
183 /// except those recorded by [`lsu_count`], and counts any instruction fetch
184 /// stalls.
185 ///
186 /// [`lsu_count`]: DWT::lsu_count
187 #[cfg(not(armv6m))]
188 #[inline]
189 pub fn cpi_count() -> u8 {
190 // NOTE(unsafe) atomic read with no side effects
191 unsafe { (*Self::PTR).cpicnt.read() as u8 }
192 }
193
194 /// Set the CPI count
195 #[cfg(not(armv6m))]
196 #[inline]
197 pub fn set_cpi_count(&mut self, count: u8) {
198 unsafe { self.cpicnt.write(count as u32) }
199 }
200
201 /// Get the total cycles spent in exception processing
202 #[cfg(not(armv6m))]
203 #[inline]
204 pub fn exception_count() -> u8 {
205 // NOTE(unsafe) atomic read with no side effects
206 unsafe { (*Self::PTR).exccnt.read() as u8 }
207 }
208
209 /// Set the exception count
210 #[cfg(not(armv6m))]
211 #[inline]
212 pub fn set_exception_count(&mut self, count: u8) {
213 unsafe { self.exccnt.write(count as u32) }
214 }
215
216 /// Get the total number of cycles that the processor is sleeping
217 ///
218 /// ARM recommends that this counter counts all cycles when the processor is sleeping,
219 /// regardless of whether a WFI or WFE instruction, or the sleep-on-exit functionality,
220 /// caused the entry to sleep mode.
221 /// However, all sleep features are implementation defined and therefore when
222 /// this counter counts is implementation defined.
223 #[cfg(not(armv6m))]
224 #[inline]
225 pub fn sleep_count() -> u8 {
226 // NOTE(unsafe) atomic read with no side effects
227 unsafe { (*Self::PTR).sleepcnt.read() as u8 }
228 }
229
230 /// Set the sleep count
231 #[cfg(not(armv6m))]
232 #[inline]
233 pub fn set_sleep_count(&mut self, count: u8) {
234 unsafe { self.sleepcnt.write(count as u32) }
235 }
236
237 /// Get the additional cycles required to execute all load or store instructions
238 #[cfg(not(armv6m))]
239 #[inline]
240 pub fn lsu_count() -> u8 {
241 // NOTE(unsafe) atomic read with no side effects
242 unsafe { (*Self::PTR).lsucnt.read() as u8 }
243 }
244
245 /// Set the lsu count
246 #[cfg(not(armv6m))]
247 #[inline]
248 pub fn set_lsu_count(&mut self, count: u8) {
249 unsafe { self.lsucnt.write(count as u32) }
250 }
251
252 /// Get the folded instruction count
253 ///
254 /// Increments on each instruction that takes 0 cycles.
255 #[cfg(not(armv6m))]
256 #[inline]
257 pub fn fold_count() -> u8 {
258 // NOTE(unsafe) atomic read with no side effects
259 unsafe { (*Self::PTR).foldcnt.read() as u8 }
260 }
261
262 /// Set the folded instruction count
263 #[cfg(not(armv6m))]
264 #[inline]
265 pub fn set_fold_count(&mut self, count: u8) {
266 unsafe { self.foldcnt.write(count as u32) }
267 }
268}
269