summaryrefslogtreecommitdiff
path: root/nrf-softdevice-defmt-rtt
diff options
context:
space:
mode:
authorDario Nieuwenhuis <dirbaio@dirbaio.net>2021-11-15 21:13:12 +0100
committerGitHub <noreply@github.com>2021-11-15 21:13:12 +0100
commitab4d4f9de306e8d3040e2b5d6e05709d72891721 (patch)
treed2583a3adf1df68ed47c333865ad5fae91200787 /nrf-softdevice-defmt-rtt
parentfae86722ac818c275920b8b78e0b34ea274e56d8 (diff)
parentc0cfccffc7c44c88cb9580b1496569d3e9de1848 (diff)
downloadnrf-softdevice-ab4d4f9de306e8d3040e2b5d6e05709d72891721.zip
Merge pull request #86 from bobmcwhirter/update_defmt
Update toolchain and defmt versions.
Diffstat (limited to 'nrf-softdevice-defmt-rtt')
-rw-r--r--nrf-softdevice-defmt-rtt/Cargo.toml2
-rw-r--r--nrf-softdevice-defmt-rtt/src/channel.rs116
-rw-r--r--nrf-softdevice-defmt-rtt/src/lib.rs187
3 files changed, 164 insertions, 141 deletions
diff --git a/nrf-softdevice-defmt-rtt/Cargo.toml b/nrf-softdevice-defmt-rtt/Cargo.toml
index 5d071be..e847a72 100644
--- a/nrf-softdevice-defmt-rtt/Cargo.toml
+++ b/nrf-softdevice-defmt-rtt/Cargo.toml
@@ -11,7 +11,7 @@ repository = "https://github.com/knurling-rs/defmt"
version = "0.1.0"
[dependencies]
-defmt = { version = "0.2.0" }
+defmt = { version = "0.3" }
nrf-softdevice = { path = "../nrf-softdevice", version = "0.1.0" }
cortex-m = "0.6.4"
critical-section = { version = "0.2.1" }
diff --git a/nrf-softdevice-defmt-rtt/src/channel.rs b/nrf-softdevice-defmt-rtt/src/channel.rs
new file mode 100644
index 0000000..ebe44bf
--- /dev/null
+++ b/nrf-softdevice-defmt-rtt/src/channel.rs
@@ -0,0 +1,116 @@
+use core::{
+ ptr,
+ sync::atomic::{AtomicUsize, Ordering},
+};
+
+use crate::{MODE_BLOCK_IF_FULL, MODE_MASK, SIZE};
+
+#[repr(C)]
+pub(crate) struct Channel {
+ pub name: *const u8,
+ pub buffer: *mut u8,
+ pub size: usize,
+ pub write: AtomicUsize,
+ pub read: AtomicUsize,
+ /// Channel properties.
+ ///
+ /// Currently, only the lowest 2 bits are used to set the channel mode (see constants below).
+ pub flags: AtomicUsize,
+}
+
+impl Channel {
+ pub fn write_all(&self, mut bytes: &[u8]) {
+ // the host-connection-status is only modified after RAM initialization while the device is
+ // halted, so we only need to check it once before the write-loop
+ let write = match self.host_is_connected() {
+ true => Channel::blocking_write,
+ false => Channel::nonblocking_write,
+ };
+
+ while !bytes.is_empty() {
+ let consumed = write(self, bytes);
+ if consumed != 0 {
+ bytes = &bytes[consumed..];
+ }
+ }
+ }
+
+ fn blocking_write(&self, bytes: &[u8]) -> usize {
+ if bytes.is_empty() {
+ return 0;
+ }
+
+ let read = self.read.load(Ordering::Relaxed);
+ let write = self.write.load(Ordering::Acquire);
+ let available = if read > write {
+ read - write - 1
+ } else if read == 0 {
+ SIZE - write - 1
+ } else {
+ SIZE - write
+ };
+
+ if available == 0 {
+ return 0;
+ }
+
+ let cursor = write;
+ let len = bytes.len().min(available);
+
+ unsafe {
+ if cursor + len > SIZE {
+ // split memcpy
+ let pivot = SIZE - cursor;
+ ptr::copy_nonoverlapping(bytes.as_ptr(), self.buffer.add(cursor), pivot);
+ ptr::copy_nonoverlapping(bytes.as_ptr().add(pivot), self.buffer, len - pivot);
+ } else {
+ // single memcpy
+ ptr::copy_nonoverlapping(bytes.as_ptr(), self.buffer.add(cursor), len);
+ }
+ }
+ self.write
+ .store(write.wrapping_add(len) % SIZE, Ordering::Release);
+
+ len
+ }
+
+ fn nonblocking_write(&self, bytes: &[u8]) -> usize {
+ let write = self.write.load(Ordering::Acquire);
+ let cursor = write;
+ // NOTE truncate at SIZE to avoid more than one "wrap-around" in a single `write` call
+ let len = bytes.len().min(SIZE);
+
+ unsafe {
+ if cursor + len > SIZE {
+ // split memcpy
+ let pivot = SIZE - cursor;
+ ptr::copy_nonoverlapping(bytes.as_ptr(), self.buffer.add(cursor), pivot);
+ ptr::copy_nonoverlapping(bytes.as_ptr().add(pivot), self.buffer, len - pivot);
+ } else {
+ // single memcpy
+ ptr::copy_nonoverlapping(bytes.as_ptr(), self.buffer.add(cursor), len);
+ }
+ }
+ self.write
+ .store(write.wrapping_add(len) % SIZE, Ordering::Release);
+
+ len
+ }
+
+ pub fn flush(&self) {
+ // return early, if host is disconnected
+ if !self.host_is_connected() {
+ return;
+ }
+
+ // busy wait, until the read- catches up with the write-pointer
+ let read = || self.read.load(Ordering::Relaxed);
+ let write = || self.write.load(Ordering::Relaxed);
+ while read() != write() {}
+ }
+
+ fn host_is_connected(&self) -> bool {
+ // we assume that a host is connected if we are in blocking-mode. this is what probe-run does.
+ self.flags.load(Ordering::Relaxed) & MODE_MASK == MODE_BLOCK_IF_FULL
+ }
+}
diff --git a/nrf-softdevice-defmt-rtt/src/lib.rs b/nrf-softdevice-defmt-rtt/src/lib.rs
index 0d2cf95..90d4d6f 100644
--- a/nrf-softdevice-defmt-rtt/src/lib.rs
+++ b/nrf-softdevice-defmt-rtt/src/lib.rs
@@ -8,51 +8,69 @@
//! // src/main.rs or src/bin/my-app.rs
//! use defmt_rtt as _;
//! ```
+//!
+//! # Blocking/Non-blocking
+//!
+//! `probe-run` puts RTT into blocking-mode, to avoid losing data.
+//!
+//! As an effect this implementation may block forever if `probe-run` disconnects on runtime. This
+//! is because the RTT buffer will fill up and writing will eventually halt the program execution.
+//!
+//! `defmt::flush` would also block forever in that case.
#![no_std]
-use core::{
- ptr,
- ptr::NonNull,
- sync::atomic::{AtomicBool, AtomicU8, AtomicUsize, Ordering},
-};
+mod channel;
-// TODO make configurable
-// NOTE use a power of 2 for best performance
-const SIZE: usize = 1024;
+use core::sync::atomic::{AtomicBool, AtomicU8, AtomicUsize, Ordering};
+
+use crate::channel::Channel;
#[defmt::global_logger]
struct Logger;
-impl defmt::Write for Logger {
- fn write(&mut self, bytes: &[u8]) {
- unsafe { handle().write_all(bytes) }
- }
-}
-
+/// Global logger lock.
static TAKEN: AtomicBool = AtomicBool::new(false);
static INTERRUPTS_TOKEN: AtomicU8 = AtomicU8::new(0);
+static mut ENCODER: defmt::Encoder = defmt::Encoder::new();
unsafe impl defmt::Logger for Logger {
- fn acquire() -> Option<NonNull<dyn defmt::Write>> {
+ fn acquire() {
let token = unsafe { critical_section::acquire() };
+
if !TAKEN.load(Ordering::Relaxed) {
// no need for CAS because interrupts are disabled
TAKEN.store(true, Ordering::Relaxed);
INTERRUPTS_TOKEN.store(token, Ordering::Relaxed);
- Some(NonNull::from(&Logger as &dyn defmt::Write))
+ // safety: accessing the `static mut` is OK because we have disabled interrupts.
+ unsafe { ENCODER.start_frame(do_write) }
} else {
unsafe { critical_section::release(token) };
- None
}
}
- unsafe fn release(_: NonNull<dyn defmt::Write>) {
+ unsafe fn flush() {
+ // SAFETY: if we get here, the global logger mutex is currently acquired
+ handle().flush();
+ }
+
+ unsafe fn release() {
+ // safety: accessing the `static mut` is OK because we have disabled interrupts.
+ ENCODER.end_frame(do_write);
TAKEN.store(false, Ordering::Relaxed);
critical_section::release(INTERRUPTS_TOKEN.load(Ordering::Relaxed));
}
+
+ unsafe fn write(bytes: &[u8]) {
+ // safety: accessing the `static mut` is OK because we have disabled interrupts.
+ ENCODER.write(bytes, do_write);
+ }
+}
+
+fn do_write(bytes: &[u8]) {
+ unsafe { handle().write_all(bytes) }
}
#[repr(C)]
@@ -63,127 +81,15 @@ struct Header {
up_channel: Channel,
}
-#[repr(C)]
-struct Channel {
- name: *const u8,
- buffer: *mut u8,
- size: usize,
- write: AtomicUsize,
- read: AtomicUsize,
- flags: AtomicUsize,
-}
+const MODE_MASK: usize = 0b11;
+/// Block the application if the RTT buffer is full, wait for the host to read data.
+const MODE_BLOCK_IF_FULL: usize = 2;
+/// Don't block if the RTT buffer is full. Truncate data to output as much as fits.
+const MODE_NON_BLOCKING_TRIM: usize = 1;
-const BLOCK_IF_FULL: usize = 2;
-const NOBLOCK_TRIM: usize = 1;
-
-impl Channel {
- fn write_all(&self, mut bytes: &[u8]) {
- // NOTE `flags` is modified by the host after RAM initialization while the device is halted
- // it cannot otherwise be modified so we don't need to check its state more often than
- // just here
- if self.flags.load(Ordering::Relaxed) == BLOCK_IF_FULL {
- while !bytes.is_empty() {
- let consumed = self.blocking_write(bytes);
- if consumed != 0 {
- bytes = &bytes[consumed..];
- }
- }
- } else {
- while !bytes.is_empty() {
- let consumed = self.nonblocking_write(bytes);
- if consumed != 0 {
- bytes = &bytes[consumed..];
- }
- }
- }
- }
-
- fn blocking_write(&self, bytes: &[u8]) -> usize {
- if bytes.is_empty() {
- return 0;
- }
-
- let read = self.read.load(Ordering::Relaxed);
- let write = self.write.load(Ordering::Acquire);
- let available = if read > write {
- read - write - 1
- } else if read == 0 {
- SIZE - write - 1
- } else {
- SIZE - write
- };
-
- if available == 0 {
- return 0;
- }
-
- let cursor = write;
- let len = bytes.len().min(available);
-
- unsafe {
- if cursor + len > SIZE {
- // split memcpy
- let pivot = SIZE - cursor;
- ptr::copy_nonoverlapping(
- bytes.as_ptr(),
- self.buffer.add(cursor.into()),
- pivot.into(),
- );
- ptr::copy_nonoverlapping(
- bytes.as_ptr().add(pivot.into()),
- self.buffer,
- (len - pivot).into(),
- );
- } else {
- // single memcpy
- ptr::copy_nonoverlapping(
- bytes.as_ptr(),
- self.buffer.add(cursor.into()),
- len.into(),
- );
- }
- }
- self.write
- .store(write.wrapping_add(len) % SIZE, Ordering::Release);
-
- len
- }
-
- fn nonblocking_write(&self, bytes: &[u8]) -> usize {
- let write = self.write.load(Ordering::Acquire);
- let cursor = write;
- // NOTE truncate at SIZE to avoid more than one "wrap-around" in a single `write` call
- let len = bytes.len().min(SIZE);
-
- unsafe {
- if cursor + len > SIZE {
- // split memcpy
- let pivot = SIZE - cursor;
- ptr::copy_nonoverlapping(
- bytes.as_ptr(),
- self.buffer.add(cursor.into()),
- pivot.into(),
- );
- ptr::copy_nonoverlapping(
- bytes.as_ptr().add(pivot.into()),
- self.buffer,
- (len - pivot).into(),
- );
- } else {
- // single memcpy
- ptr::copy_nonoverlapping(
- bytes.as_ptr(),
- self.buffer.add(cursor.into()),
- len.into(),
- );
- }
- }
- self.write
- .store(write.wrapping_add(len) % SIZE, Ordering::Release);
-
- len
- }
-}
+// TODO make configurable
+// NOTE use a power of 2 for best performance
+const SIZE: usize = 1024;
// make sure we only get shared references to the header/channel (avoid UB)
/// # Safety
@@ -205,11 +111,12 @@ unsafe fn handle() -> &'static Channel {
size: SIZE,
write: AtomicUsize::new(0),
read: AtomicUsize::new(0),
- flags: AtomicUsize::new(NOBLOCK_TRIM),
+ flags: AtomicUsize::new(MODE_NON_BLOCKING_TRIM),
},
};
- #[link_section = ".uninit.defmt-rtt.BUFFER"]
+ #[cfg_attr(target_os = "macos", link_section = ".uninit,defmt-rtt.BUFFER")]
+ #[cfg_attr(not(target_os = "macos"), link_section = ".uninit.defmt-rtt.BUFFER")]
static mut BUFFER: [u8; SIZE] = [0; SIZE];
static NAME: &[u8] = b"defmt\0";