diff --git a/embassy-stm32/src/dma/bdma.rs b/embassy-stm32/src/dma/bdma.rs index a2b83716d9..6302c9894d 100644 --- a/embassy-stm32/src/dma/bdma.rs +++ b/embassy-stm32/src/dma/bdma.rs @@ -1,6 +1,6 @@ //! Basic Direct Memory Acccess (BDMA) -use core::future::Future; +use core::future::{poll_fn, Future}; use core::pin::Pin; use core::sync::atomic::{fence, AtomicUsize, Ordering}; use core::task::{Context, Poll, Waker}; @@ -429,6 +429,40 @@ impl<'a, C: Channel> DmaCtrl for DmaCtrlImpl<'a, C> { } } +struct RingBuffer {} + +impl RingBuffer { + fn is_running(ch: &pac::bdma::Ch) -> bool { + ch.cr().read().en() + } + + fn request_stop(ch: &pac::bdma::Ch) { + ch.cr().modify(|w| { + w.set_circ(false); + }); + } + + async fn stop(ch: &pac::bdma::Ch, set_waker: &mut dyn FnMut(&Waker)) { + use core::sync::atomic::compiler_fence; + + Self::request_stop(ch); + + //wait until cr.susp reads as true + poll_fn(|cx| { + set_waker(cx.waker()); + + compiler_fence(Ordering::SeqCst); + + if !Self::is_running(ch) { + Poll::Ready(()) + } else { + Poll::Pending + } + }) + .await + } +} + /// Ringbuffer for reading data using DMA circular mode. pub struct ReadableRingBuffer<'a, C: Channel, W: Word> { cr: regs::Cr, @@ -506,6 +540,14 @@ impl<'a, C: Channel, W: Word> ReadableRingBuffer<'a, C, W> { self.ringbuf.clear(&mut DmaCtrlImpl(self.channel.reborrow())); } + /// This disables the circular DMA causing the DMA transfer to stop when the buffer is full. + pub async fn stop(&mut self) { + RingBuffer::stop(&self.channel.regs().ch(self.channel.num()), &mut |waker| { + self.set_waker(waker) + }) + .await + } + /// Read elements from the ring buffer /// Return a tuple of the length read and the length remaining in the buffer /// If not all of the elements were read, then there will be some elements in the buffer remaining @@ -555,16 +597,7 @@ impl<'a, C: Channel, W: Word> ReadableRingBuffer<'a, C, W> { /// /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false. pub fn request_stop(&mut self) { - let ch = self.channel.regs().ch(self.channel.num()); - - // Disable the channel. Keep the IEs enabled so the irqs still fire. - // If the channel is enabled and transfer is not completed, we need to perform - // two separate write access to the CR register to disable the channel. - ch.cr().write(|w| { - w.set_teie(true); - w.set_htie(true); - w.set_tcie(true); - }); + RingBuffer::request_stop(&self.channel.regs().ch(self.channel.num())); } /// Return whether DMA is still running. @@ -572,8 +605,7 @@ impl<'a, C: Channel, W: Word> ReadableRingBuffer<'a, C, W> { /// If this returns `false`, it can be because either the transfer finished, or /// it was requested to stop early with [`request_stop`](Self::request_stop). pub fn is_running(&mut self) -> bool { - let ch = self.channel.regs().ch(self.channel.num()); - ch.cr().read().en() + RingBuffer::is_running(&self.channel.regs().ch(self.channel.num())) } } @@ -664,6 +696,21 @@ impl<'a, C: Channel, W: Word> WritableRingBuffer<'a, C, W> { self.ringbuf.clear(&mut DmaCtrlImpl(self.channel.reborrow())); } + /// Write elements directly to the raw buffer. + /// This can be used to fill the buffer before starting the DMA transfer. + #[allow(dead_code)] + pub fn write_immediate(&mut self, buf: &[W]) -> Result<(usize, usize), OverrunError> { + self.ringbuf.write_immediate(buf) + } + + /// This will disable the circular DMA and wait for the current buffer to finish writing to the peripheral. + pub async fn stop(&mut self) { + RingBuffer::stop(&self.channel.regs().ch(self.channel.num()), &mut |waker| { + self.set_waker(waker) + }) + .await + } + /// Write elements to the ring buffer /// Return a tuple of the length written and the length remaining in the buffer pub fn write(&mut self, buf: &[W]) -> Result<(usize, usize), OverrunError> { @@ -700,16 +747,7 @@ impl<'a, C: Channel, W: Word> WritableRingBuffer<'a, C, W> { /// /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false. pub fn request_stop(&mut self) { - let ch = self.channel.regs().ch(self.channel.num()); - - // Disable the channel. Keep the IEs enabled so the irqs still fire. - // If the channel is enabled and transfer is not completed, we need to perform - // two separate write access to the CR register to disable the channel. - ch.cr().write(|w| { - w.set_teie(true); - w.set_htie(true); - w.set_tcie(true); - }); + RingBuffer::request_stop(&self.channel.regs().ch(self.channel.num())); } /// Return whether DMA is still running. @@ -717,8 +755,7 @@ impl<'a, C: Channel, W: Word> WritableRingBuffer<'a, C, W> { /// If this returns `false`, it can be because either the transfer finished, or /// it was requested to stop early with [`request_stop`](Self::request_stop). pub fn is_running(&mut self) -> bool { - let ch = self.channel.regs().ch(self.channel.num()); - ch.cr().read().en() + RingBuffer::is_running(&self.channel.regs().ch(self.channel.num())) } } diff --git a/embassy-stm32/src/dma/dma.rs b/embassy-stm32/src/dma/dma.rs index 16d02f2732..22b3ff38c9 100644 --- a/embassy-stm32/src/dma/dma.rs +++ b/embassy-stm32/src/dma/dma.rs @@ -1,4 +1,4 @@ -use core::future::Future; +use core::future::{poll_fn, Future}; use core::marker::PhantomData; use core::pin::Pin; use core::sync::atomic::{fence, AtomicUsize, Ordering}; @@ -667,6 +667,39 @@ impl<'a, C: Channel> DmaCtrl for DmaCtrlImpl<'a, C> { } } +struct RingBuffer {} + +impl RingBuffer { + fn is_running(ch: &pac::dma::St) -> bool { + ch.cr().read().en() + } + + fn request_stop(ch: &pac::dma::St) { + ch.cr().modify(|w| { + w.set_circ(false); + }); + } + + async fn stop(ch: &pac::dma::St, set_waker: &mut dyn FnMut(&Waker)) { + use core::sync::atomic::compiler_fence; + + Self::request_stop(ch); + + poll_fn(|cx| { + set_waker(cx.waker()); + + compiler_fence(Ordering::SeqCst); + + if !Self::is_running(ch) { + Poll::Ready(()) + } else { + Poll::Pending + } + }) + .await + } +} + /// Ringbuffer for receiving data using DMA circular mode. pub struct ReadableRingBuffer<'a, C: Channel, W: Word> { cr: regs::Cr, @@ -705,7 +738,7 @@ impl<'a, C: Channel, W: Word> ReadableRingBuffer<'a, C, W> { w.set_minc(true); w.set_pinc(false); w.set_teie(true); - w.set_htie(options.half_transfer_ir); + w.set_htie(true); w.set_tcie(true); w.set_circ(true); #[cfg(dma_v1)] @@ -754,6 +787,14 @@ impl<'a, C: Channel, W: Word> ReadableRingBuffer<'a, C, W> { ch.cr().write_value(self.cr); } + /// Disables the circular DMA transfer. The transfer will complete on the next iteration + pub async fn stop(&mut self) { + RingBuffer::stop(&self.channel.regs().st(self.channel.num()), &mut |waker| { + self.set_waker(waker) + }) + .await + } + /// Clear all data in the ring buffer. pub fn clear(&mut self) { self.ringbuf.clear(&mut DmaCtrlImpl(self.channel.reborrow())); @@ -880,7 +921,7 @@ impl<'a, C: Channel, W: Word> WritableRingBuffer<'a, C, W> { w.set_minc(true); w.set_pinc(false); w.set_teie(true); - w.set_htie(options.half_transfer_ir); + w.set_htie(true); w.set_tcie(true); w.set_circ(true); #[cfg(dma_v1)] @@ -929,11 +970,26 @@ impl<'a, C: Channel, W: Word> WritableRingBuffer<'a, C, W> { ch.cr().write_value(self.cr); } + /// Disables the circular DMA transfer. The transfer will complete on the next iteration + pub async fn stop(&mut self) { + RingBuffer::stop(&self.channel.regs().st(self.channel.num()), &mut |waker| { + self.set_waker(waker) + }) + .await + } + /// Clear all data in the ring buffer. pub fn clear(&mut self) { self.ringbuf.clear(&mut DmaCtrlImpl(self.channel.reborrow())); } + /// Write elements directly to the raw buffer. + /// This can be used to fill the buffer before starting the DMA transfer. + #[allow(dead_code)] + pub fn write_immediate(&mut self, buf: &[W]) -> Result<(usize, usize), OverrunError> { + self.ringbuf.write_immediate(buf) + } + /// Write elements from the ring buffer /// Return a tuple of the length written and the length remaining in the buffer pub fn write(&mut self, buf: &[W]) -> Result<(usize, usize), OverrunError> { diff --git a/embassy-stm32/src/dma/gpdma.rs b/embassy-stm32/src/dma/gpdma.rs index 34b2426b98..36f369075c 100644 --- a/embassy-stm32/src/dma/gpdma.rs +++ b/embassy-stm32/src/dma/gpdma.rs @@ -1,13 +1,14 @@ #![macro_use] -use core::future::Future; +use core::future::{poll_fn, Future}; use core::pin::Pin; -use core::sync::atomic::{fence, Ordering}; -use core::task::{Context, Poll}; +use core::sync::atomic::{fence, AtomicUsize, Ordering}; +use core::task::{Context, Poll, Waker}; use embassy_hal_internal::{into_ref, Peripheral, PeripheralRef}; use embassy_sync::waitqueue::AtomicWaker; +use super::ringbuffer::{DmaCtrl, OverrunError, ReadableDmaRingBuffer, WritableDmaRingBuffer}; use super::word::{Word, WordSize}; use super::Dir; use crate::_generated::GPDMA_CHANNEL_COUNT; @@ -40,13 +41,18 @@ impl From for vals::ChTr1Dw { struct State { ch_wakers: [AtomicWaker; GPDMA_CHANNEL_COUNT], + circular_address: [AtomicUsize; GPDMA_CHANNEL_COUNT], + complete_count: [AtomicUsize; GPDMA_CHANNEL_COUNT], } impl State { const fn new() -> Self { + const ZERO: AtomicUsize = AtomicUsize::new(0); const AW: AtomicWaker = AtomicWaker::new(); Self { ch_wakers: [AW; GPDMA_CHANNEL_COUNT], + circular_address: [ZERO; GPDMA_CHANNEL_COUNT], + complete_count: [ZERO; GPDMA_CHANNEL_COUNT], } } } @@ -105,9 +111,31 @@ pub(crate) unsafe fn on_irq_inner(dma: pac::gpdma::Gpdma, channel_num: usize, in ); } - if sr.suspf() || sr.tcf() { + if sr.htf() { + //clear the flag for the half transfer complete + ch.fcr().modify(|w| w.set_htf(true)); + STATE.ch_wakers[index].wake(); + } + + if sr.tcf() { + //clear the flag for the transfer complete + ch.fcr().modify(|w| w.set_tcf(true)); + STATE.complete_count[index].fetch_add(1, Ordering::Relaxed); + STATE.ch_wakers[index].wake(); + return; + } + + if sr.suspf() { + ch.fcr().modify(|w| w.set_suspf(true)); + // disable all xxIEs to prevent the irq from firing again. - ch.cr().write(|_| {}); + ch.cr().modify(|w| { + w.set_tcie(false); + w.set_useie(false); + w.set_dteie(false); + w.set_suspie(false); + w.set_htie(false); + }); // Wake the future. It'll look at tcf and see it's set. STATE.ch_wakers[index].wake(); @@ -368,3 +396,409 @@ impl<'a, C: Channel> Future for Transfer<'a, C> { } } } + +struct DmaCtrlImpl<'a, C: Channel> { + channel: PeripheralRef<'a, C>, + word_size: WordSize, +} + +impl<'a, C: Channel> DmaCtrl for DmaCtrlImpl<'a, C> { + fn get_remaining_transfers(&self) -> usize { + let ch = self.channel.regs().ch(self.channel.num()); + (ch.br1().read().bndt() / self.word_size.bytes() as u16) as usize + } + + fn get_complete_count(&self) -> usize { + STATE.complete_count[self.channel.index()].load(Ordering::Acquire) + } + + fn reset_complete_count(&mut self) -> usize { + STATE.complete_count[self.channel.index()].swap(0, Ordering::AcqRel) + } + + fn set_waker(&mut self, waker: &Waker) { + STATE.ch_wakers[self.channel.index()].register(waker); + } +} + +struct RingBuffer {} + +impl RingBuffer { + fn configure<'a, W: Word>( + ch: &pac::gpdma::Channel, + channel_index: usize, + request: Request, + dir: Dir, + peri_addr: *mut W, + buffer: &'a mut [W], + _options: TransferOptions, + ) { + // "Preceding reads and writes cannot be moved past subsequent writes." + fence(Ordering::SeqCst); + + let (mem_addr, mem_len) = super::slice_ptr_parts_mut(buffer); + + ch.cr().write(|w| w.set_reset(true)); + ch.fcr().write(|w| w.0 = 0xFFFF_FFFF); // clear all irqs + + if mem_addr & 0b11 != 0 { + panic!("circular address must be 4-byte aligned"); + } + + STATE.circular_address[channel_index].store(mem_addr, Ordering::Release); + let lli = STATE.circular_address[channel_index].as_ptr() as u32; + ch.llr().write(|w| { + match dir { + Dir::MemoryToPeripheral => w.set_usa(true), + Dir::PeripheralToMemory => w.set_uda(true), + } + // lower 16 bits of the memory address + w.set_la(((lli >> 2usize) & 0x3fff) as u16); + }); + ch.lbar().write(|w| { + // upper 16 bits of the address of lli1 + w.set_lba((lli >> 16usize) as u16); + }); + + let data_size = W::size(); + ch.tr1().write(|w| { + w.set_sdw(data_size.into()); + w.set_ddw(data_size.into()); + w.set_sinc(dir == Dir::MemoryToPeripheral); + w.set_dinc(dir == Dir::PeripheralToMemory); + }); + ch.tr2().write(|w| { + w.set_dreq(match dir { + Dir::MemoryToPeripheral => vals::ChTr2Dreq::DESTINATIONPERIPHERAL, + Dir::PeripheralToMemory => vals::ChTr2Dreq::SOURCEPERIPHERAL, + }); + w.set_reqsel(request); + }); + ch.br1().write(|w| { + // BNDT is specified as bytes, not as number of transfers. + w.set_bndt((mem_len * data_size.bytes()) as u16) + }); + + match dir { + Dir::MemoryToPeripheral => { + ch.sar().write_value(mem_addr as _); + ch.dar().write_value(peri_addr as _); + } + Dir::PeripheralToMemory => { + ch.sar().write_value(peri_addr as _); + ch.dar().write_value(mem_addr as _); + } + } + } + + fn clear_irqs(ch: &pac::gpdma::Channel) { + ch.fcr().modify(|w| { + w.set_htf(true); + w.set_tcf(true); + w.set_suspf(true); + }); + } + + fn is_running(ch: &pac::gpdma::Channel) -> bool { + !ch.sr().read().tcf() + } + + fn request_suspend(ch: &pac::gpdma::Channel) { + ch.cr().modify(|w| { + w.set_susp(true); + }); + } + + async fn stop(ch: &pac::gpdma::Channel, set_waker: &mut dyn FnMut(&Waker)) { + use core::sync::atomic::compiler_fence; + + Self::request_suspend(ch); + + //wait until cr.susp reads as true + poll_fn(|cx| { + set_waker(cx.waker()); + + compiler_fence(Ordering::SeqCst); + + let cr = ch.cr().read(); + if cr.susp() { + Poll::Ready(()) + } else { + Poll::Pending + } + }) + .await + } + + fn start(ch: &pac::gpdma::Channel) { + Self::clear_irqs(ch); + ch.cr().modify(|w| { + w.set_susp(false); + w.set_en(true); + w.set_tcie(true); + w.set_useie(true); + w.set_dteie(true); + w.set_suspie(true); + w.set_htie(true); + }); + } +} + +/// This is a Readable ring buffer. It reads data from a peripheral into a buffer. The reads happen in circular mode. +/// There are interrupts on complete and half complete. You should read half the buffer on every read. +pub struct ReadableRingBuffer<'a, C: Channel, W: Word> { + channel: PeripheralRef<'a, C>, + ringbuf: ReadableDmaRingBuffer<'a, W>, +} + +impl<'a, C: Channel, W: Word> ReadableRingBuffer<'a, C, W> { + /// Create a new Readable ring buffer. + pub unsafe fn new( + channel: impl Peripheral

+ 'a, + request: Request, + peri_addr: *mut W, + buffer: &'a mut [W], + options: TransferOptions, + ) -> Self { + into_ref!(channel); + + #[cfg(dmamux)] + super::dmamux::configure_dmamux(&mut channel, request); + + RingBuffer::configure( + &channel.regs().ch(channel.num()), + channel.index(), + request, + Dir::PeripheralToMemory, + peri_addr, + buffer, + options, + ); + + Self { + channel, + ringbuf: ReadableDmaRingBuffer::new(buffer), + } + } + + /// Start reading the peripheral in ciccular mode. + pub fn start(&mut self) { + let ch = &self.channel.regs().ch(self.channel.num()); + RingBuffer::start(ch); + } + + /// Request the transfer to stop. Use is_running() to see when the transfer is complete. + pub fn request_stop(&mut self) { + RingBuffer::request_suspend(&self.channel.regs().ch(self.channel.num())); + } + + /// Await until the stop completes. This is not used with request_stop(). Just call and await. + /// It will stop when the current transfer is complete. + pub async fn stop(&mut self) { + RingBuffer::stop(&self.channel.regs().ch(self.channel.num()), &mut |waker| { + self.set_waker(waker) + }) + .await + } + + /// Clear the buffers internal pointers. + pub fn clear(&mut self) { + self.ringbuf.clear(&mut DmaCtrlImpl { + channel: self.channel.reborrow(), + word_size: W::size(), + }); + } + + /// Read elements from the ring buffer + /// Return a tuple of the length read and the length remaining in the buffer + /// If not all of the elements were read, then there will be some elements in the buffer remaining + /// The length remaining is the capacity, ring_buf.len(), less the elements remaining after the read + /// OverrunError is returned if the portion to be read was overwritten by the DMA controller. + pub fn read(&mut self, buf: &mut [W]) -> Result<(usize, usize), OverrunError> { + self.ringbuf.read( + &mut DmaCtrlImpl { + channel: self.channel.reborrow(), + word_size: W::size(), + }, + buf, + ) + } + + /// Read an exact number of elements from the ringbuffer. + /// + /// Returns the remaining number of elements available for immediate reading. + /// OverrunError is returned if the portion to be read was overwritten by the DMA controller. + /// + /// Async/Wake Behavior: + /// The underlying DMA peripheral only can wake us when its buffer pointer has reached the halfway point, + /// and when it wraps around. This means that when called with a buffer of length 'M', when this + /// ring buffer was created with a buffer of size 'N': + /// - If M equals N/2 or N/2 divides evenly into M, this function will return every N/2 elements read on the DMA source. + /// - Otherwise, this function may need up to N/2 extra elements to arrive before returning. + pub async fn read_exact(&mut self, buffer: &mut [W]) -> Result { + self.ringbuf + .read_exact( + &mut DmaCtrlImpl { + channel: self.channel.reborrow(), + word_size: W::size(), + }, + buffer, + ) + .await + } + + /// The capacity of the ringbuffer + pub const fn cap(&self) -> usize { + self.ringbuf.cap() + } + + /// Set the waker for the DMA controller. + pub fn set_waker(&mut self, waker: &Waker) { + DmaCtrlImpl { + channel: self.channel.reborrow(), + word_size: W::size(), + } + .set_waker(waker); + } + + /// Return whether this transfer is still running. + pub fn is_running(&mut self) -> bool { + RingBuffer::is_running(&self.channel.regs().ch(self.channel.num())) + } +} + +impl<'a, C: Channel, W: Word> Drop for ReadableRingBuffer<'a, C, W> { + fn drop(&mut self) { + self.request_stop(); + while self.is_running() {} + + // "Subsequent reads and writes cannot be moved ahead of preceding reads." + fence(Ordering::SeqCst); + } +} + +/// This is a Writable ring buffer. It writes data from a buffer to a peripheral. The writes happen in circular mode. +pub struct WritableRingBuffer<'a, C: Channel, W: Word> { + #[allow(dead_code)] //this is only read by the DMA controller + channel: PeripheralRef<'a, C>, + ringbuf: WritableDmaRingBuffer<'a, W>, +} + +impl<'a, C: Channel, W: Word> WritableRingBuffer<'a, C, W> { + /// Create a new Writable ring buffer. + pub unsafe fn new( + channel: impl Peripheral

+ 'a, + request: Request, + peri_addr: *mut W, + buffer: &'a mut [W], + options: TransferOptions, + ) -> Self { + into_ref!(channel); + + #[cfg(dmamux)] + super::dmamux::configure_dmamux(&mut channel, request); + + RingBuffer::configure( + &channel.regs().ch(channel.num()), + channel.index(), + request, + Dir::MemoryToPeripheral, + peri_addr, + buffer, + options, + ); + + Self { + channel, + ringbuf: WritableDmaRingBuffer::new(buffer), + } + } + + /// Start writing to the peripheral in circular mode. + pub fn start(&mut self) { + RingBuffer::start(&self.channel.regs().ch(self.channel.num())); + } + + /// Await until the stop completes. This is not used with request_stop(). Just call and await. + pub async fn stop(&mut self) { + RingBuffer::stop(&self.channel.regs().ch(self.channel.num()), &mut |waker| { + self.set_waker(waker) + }) + .await + } + + /// Request the transfer to stop. Use is_running() to see when the transfer is complete. + pub fn request_stop(&mut self) { + // reads can be stopped by disabling the enable flag + let ch = &self.channel.regs().ch(self.channel.num()); + ch.cr().modify(|w| w.set_en(false)); + } + + /// Write elements directly to the raw buffer. + /// This can be used to fill the buffer before starting the DMA transfer. + pub fn write_immediate(&mut self, buf: &[W]) -> Result<(usize, usize), OverrunError> { + self.ringbuf.write_immediate(buf) + } + + /// Clear the buffers internal pointers. + pub fn clear(&mut self) { + self.ringbuf.clear(&mut DmaCtrlImpl { + channel: self.channel.reborrow(), + word_size: W::size(), + }); + } + + /// Write elements from the ring buffer + /// Return a tuple of the length written and the length remaining in the buffer + pub fn write(&mut self, buf: &[W]) -> Result<(usize, usize), OverrunError> { + self.ringbuf.write( + &mut DmaCtrlImpl { + channel: self.channel.reborrow(), + word_size: W::size(), + }, + buf, + ) + } + + /// Write an exact number of elements to the ringbuffer. + pub async fn write_exact(&mut self, buffer: &[W]) -> Result { + self.ringbuf + .write_exact( + &mut DmaCtrlImpl { + channel: self.channel.reborrow(), + word_size: W::size(), + }, + buffer, + ) + .await + } + + /// The capacity of the ringbuffer + pub const fn cap(&self) -> usize { + self.ringbuf.cap() + } + + /// Set the waker for the DMA controller. + pub fn set_waker(&mut self, waker: &Waker) { + DmaCtrlImpl { + channel: self.channel.reborrow(), + word_size: W::size(), + } + .set_waker(waker); + } + + /// Return whether this transfer is still running. + pub fn is_running(&mut self) -> bool { + RingBuffer::is_running(&self.channel.regs().ch(self.channel.num())) + } +} + +impl<'a, C: Channel, W: Word> Drop for WritableRingBuffer<'a, C, W> { + fn drop(&mut self) { + self.request_stop(); + while self.is_running() {} + + // "Subsequent reads and writes cannot be moved ahead of preceding reads." + fence(Ordering::SeqCst); + } +} diff --git a/embassy-stm32/src/dma/ringbuffer.rs b/embassy-stm32/src/dma/ringbuffer.rs index c9f7a3026d..ea78f2ae02 100644 --- a/embassy-stm32/src/dma/ringbuffer.rs +++ b/embassy-stm32/src/dma/ringbuffer.rs @@ -263,6 +263,16 @@ impl<'a, W: Word> WritableDmaRingBuffer<'a, W> { self.cap() - dma.get_remaining_transfers() } + /// Write elements directl to the buffer. This should be done before the DMA is started. + pub fn write_immediate(&mut self, buffer: &[W]) -> Result<(usize, usize), OverrunError> { + if self.end != 0 { + return Err(OverrunError); + } + let written = self.copy_from(buffer, 0..self.cap()); + self.end = written % self.cap(); + Ok((written, self.cap() - written)) + } + /// Write an exact number of elements to the ringbuffer. pub async fn write_exact(&mut self, dma: &mut impl DmaCtrl, buffer: &[W]) -> Result { let mut written_data = 0; diff --git a/embassy-stm32/src/rcc/u5.rs b/embassy-stm32/src/rcc/u5.rs index 81bdec881c..36529748e7 100644 --- a/embassy-stm32/src/rcc/u5.rs +++ b/embassy-stm32/src/rcc/u5.rs @@ -45,6 +45,18 @@ pub struct PllConfig { /// The multiplied clock – `source` divided by `m` times `n` – must be between 128 and 544 /// MHz. The upper limit may be lower depending on the `Config { voltage_range }`. pub n: Plln, + /// The divider for the P output. + /// + /// When used to drive the system clock, `source` divided by `m` times `n` divided by `r` + /// must not exceed 160 MHz. System clocks above 55 MHz require a non-default + /// `Config { voltage_range }`. + pub p: Plldiv, + /// The divider for the Q output. + /// + /// When used to drive the system clock, `source` divided by `m` times `n` divided by `r` + /// must not exceed 160 MHz. System clocks above 55 MHz require a non-default + /// `Config { voltage_range }`. + pub q: Plldiv, /// The divider for the R output. /// /// When used to drive the system clock, `source` divided by `m` times `n` divided by `r` @@ -60,6 +72,8 @@ impl PllConfig { source: PllSource::HSI, m: Pllm::DIV1, n: Plln::MUL10, + p: Plldiv::DIV3, + q: Plldiv::DIV2, r: Plldiv::DIV1, } } @@ -70,7 +84,9 @@ impl PllConfig { source: PllSource::MSIS(Msirange::RANGE_48MHZ), m: Pllm::DIV3, n: Plln::MUL10, - r: Plldiv::DIV1, + p: Plldiv::DIV3, + q: Plldiv::DIV12, + r: Plldiv::DIV2, } } } @@ -301,7 +317,9 @@ pub(crate) unsafe fn init(config: Config) { RCC.pll1divr().modify(|w| { // Set the VCO multiplier w.set_plln(pll.n); - // Set the R output divisor + // Set the divisors + w.set_pllp(pll.p); + w.set_pllq(pll.q); w.set_pllr(pll.r); }); diff --git a/embassy-stm32/src/sai/mod.rs b/embassy-stm32/src/sai/mod.rs index ef88021845..f594beb686 100644 --- a/embassy-stm32/src/sai/mod.rs +++ b/embassy-stm32/src/sai/mod.rs @@ -428,6 +428,9 @@ impl MasterClockDivider { } } +/// Use this value for a frame length of 256. (256 won't fit in u8). +pub const MAX_FRAME_LENGTH: u8 = 0; + /// [`SAI`] configuration. #[allow(missing_docs)] #[non_exhaustive] @@ -475,7 +478,7 @@ impl Default for Config { slot_enable: 0b11, data_size: DataSize::Data16, stereo_mono: StereoMono::Stereo, - bit_order: BitOrder::LsbFirst, + bit_order: BitOrder::MsbFirst, frame_sync_offset: FrameSyncOffset::BeforeFirstBit, frame_sync_polarity: FrameSyncPolarity::ActiveLow, frame_sync_active_level_length: word::U7(16), @@ -536,7 +539,6 @@ fn get_ring_buffer<'d, T: Instance, C: Channel, W: word::Word>( tx_rx: TxRx, ) -> RingBuffer<'d, C, W> { let opts = TransferOptions { - half_transfer_ir: true, //the new_write() and new_read() always use circular mode ..Default::default() }; @@ -790,7 +792,11 @@ impl<'d, T: Instance, C: Channel, W: word::Word> Sai<'d, T, C, W> { w.set_fspol(config.frame_sync_polarity.fspol()); w.set_fsdef(config.frame_sync_definition.fsdef()); w.set_fsall(config.frame_sync_active_level_length.0 as u8 - 1); - w.set_frl(config.frame_length - 1); + if config.frame_length == MAX_FRAME_LENGTH { + w.set_frl(255); + } else { + w.set_frl(config.frame_length - 1); + } }); ch.slotr().modify(|w| { @@ -863,6 +869,41 @@ impl<'d, T: Instance, C: Channel, W: word::Word> Sai<'d, T, C, W> { ch.cr2().modify(|w| w.set_mute(value)); } + /// Returns true if the hardware is running. + pub fn is_running(&mut self) -> bool { + match &mut self.ring_buffer { + RingBuffer::Writable(buffer) => buffer.is_running(), + RingBuffer::Readable(buffer) => buffer.is_running(), + } + } + + /// Stops the hardware from reading/writing once the current buffers are exhausted. + /// After awaiting, the hardware will be off. + pub async fn stop(&mut self) { + match &mut self.ring_buffer { + RingBuffer::Writable(buffer) => buffer.stop().await, + RingBuffer::Readable(buffer) => buffer.stop().await, + } + } + + /// Clear the ring buffer. Doesn't write any value. Just resets the internal pointers. + pub fn clear(&mut self) { + match &mut self.ring_buffer { + RingBuffer::Writable(buffer) => buffer.clear(), + RingBuffer::Readable(buffer) => buffer.clear(), + } + } + + /// Write elements directly to the raw buffer. + /// This should be used before starting the audio stream. This will give the CPU time to + /// prepare the next audio frame while the initial audio frame is playing. + pub fn write_immediate(self: &mut Self, source_buffer: &[W]) -> Result<(usize, usize), Error> { + match self.ring_buffer { + RingBuffer::Writable(ref mut rb) => Ok(rb.write_immediate(source_buffer)?), + RingBuffer::Readable(_) => Err(Error::NotATransmitter), + } + } + /// Write data to the SAI ringbuffer. /// /// This appends the data to the buffer and returns immediately. The diff --git a/examples/stm32u5/src/bin/usb_serial.rs b/examples/stm32u5/src/bin/usb_serial.rs index 44d1df4f10..75dab3f4df 100644 --- a/examples/stm32u5/src/bin/usb_serial.rs +++ b/examples/stm32u5/src/bin/usb_serial.rs @@ -27,6 +27,8 @@ async fn main(_spawner: Spawner) { m: Pllm::DIV2, n: Plln::MUL10, r: Plldiv::DIV1, + p: Plldiv::DIV1, + q: Plldiv::DIV1, }); config.rcc.hsi48 = Some(Hsi48Config { sync_from_usb: true }); // needed for USB