1use core::{
2 ptr,
3 sync::atomic::{AtomicUsize, Ordering},
4};
5
6use crate::{consts::BUF_SIZE, MODE_BLOCK_IF_FULL, MODE_MASK};
7
8/// RTT Up channel
9#[repr(C)]
10pub(crate) struct Channel {
11 pub name: *const u8,
12 /// Pointer to the RTT buffer.
13 pub buffer: *mut u8,
14 pub size: usize,
15 /// Written by the target.
16 pub write: AtomicUsize,
17 /// Written by the host.
18 pub read: AtomicUsize,
19 /// Channel properties.
20 ///
21 /// Currently, only the lowest 2 bits are used to set the channel mode (see constants below).
22 pub flags: AtomicUsize,
23}
24
25impl Channel {
26 pub fn write_all(&self, mut bytes: &[u8]) {
27 // the host-connection-status is only modified after RAM initialization while the device is
28 // halted, so we only need to check it once before the write-loop
29 let write = match self.host_is_connected() {
30 _ if cfg!(feature = "disable-blocking-mode") => Self::nonblocking_write,
31 true => Self::blocking_write,
32 false => Self::nonblocking_write,
33 };
34
35 while !bytes.is_empty() {
36 let consumed = write(self, bytes);
37 if consumed != 0 {
38 bytes = &bytes[consumed..];
39 }
40 }
41 }
42
43 fn blocking_write(&self, bytes: &[u8]) -> usize {
44 if bytes.is_empty() {
45 return 0;
46 }
47
48 // calculate how much space is left in the buffer
49 let read = self.read.load(Ordering::Relaxed);
50 let write = self.write.load(Ordering::Acquire);
51 let available = available_buffer_size(read, write);
52
53 // abort if buffer is full
54 if available == 0 {
55 return 0;
56 }
57
58 self.write_impl(bytes, write, available)
59 }
60
61 fn nonblocking_write(&self, bytes: &[u8]) -> usize {
62 let write = self.write.load(Ordering::Acquire);
63
64 // NOTE truncate at BUF_SIZE to avoid more than one "wrap-around" in a single `write` call
65 self.write_impl(bytes, write, BUF_SIZE)
66 }
67
68 fn write_impl(&self, bytes: &[u8], cursor: usize, available: usize) -> usize {
69 let len = bytes.len().min(available);
70
71 // copy `bytes[..len]` to the RTT buffer
72 unsafe {
73 if cursor + len > BUF_SIZE {
74 // split memcpy
75 let pivot = BUF_SIZE - cursor;
76 ptr::copy_nonoverlapping(bytes.as_ptr(), self.buffer.add(cursor), pivot);
77 ptr::copy_nonoverlapping(bytes.as_ptr().add(pivot), self.buffer, len - pivot);
78 } else {
79 // single memcpy
80 ptr::copy_nonoverlapping(bytes.as_ptr(), self.buffer.add(cursor), len);
81 }
82 }
83
84 // adjust the write pointer, so the host knows that there is new data
85 self.write
86 .store(cursor.wrapping_add(len) % BUF_SIZE, Ordering::Release);
87
88 // return the number of bytes written
89 len
90 }
91
92 pub fn flush(&self) {
93 // return early, if host is disconnected
94 if !self.host_is_connected() {
95 return;
96 }
97
98 // busy wait, until the read- catches up with the write-pointer
99 let read = || self.read.load(Ordering::Relaxed);
100 let write = || self.write.load(Ordering::Relaxed);
101 while read() != write() {}
102 }
103
104 fn host_is_connected(&self) -> bool {
105 // we assume that a host is connected if we are in blocking-mode. this is what probe-run does.
106 self.flags.load(Ordering::Relaxed) & MODE_MASK == MODE_BLOCK_IF_FULL
107 }
108}
109
110/// How much space is left in the buffer?
111fn available_buffer_size(read_cursor: usize, write_cursor: usize) -> usize {
112 if read_cursor > write_cursor {
113 read_cursor - write_cursor - 1
114 } else if read_cursor == 0 {
115 BUF_SIZE - write_cursor - 1
116 } else {
117 BUF_SIZE - write_cursor
118 }
119}
120