From f5cb2a731e148e0deaa9481ba58b1e314c0ee653 Mon Sep 17 00:00:00 2001 From: Andrew Stapleton Date: Mon, 3 Feb 2025 10:42:54 -0800 Subject: [PATCH 01/13] gpdma: add GPDMA driver This adds support for one-shot DMA transfers for all supported transfer types with linear addressing. It does not yet support linked list buffer loading, or support 2D addressing. Memory-to-memory transfers were quite thoroughly tested by the provided example, memory-to-peripheral and peripheral-to-memory transfers were tested with the SPI peripheral (working on a test branch). Peripheral-to-peripheral transfers are assumed to work given their similarity to memory-to-peripheral/peripheral-to-memory transfers, but will be properly tested at a later stage. The driver includes helper structs for peripheral transfers in the gpdma::periph module which handle the common operations for setting up one-directional and full-duplex transfers using one, or two channels, respectively. --- Cargo.toml | 1 + examples/dma.rs | 204 +++++++++ src/gpdma.rs | 530 +++++++++++++++++++++++ src/gpdma/ch.rs | 774 ++++++++++++++++++++++++++++++++++ src/gpdma/config.rs | 412 ++++++++++++++++++ src/gpdma/config/transform.rs | 319 ++++++++++++++ src/gpdma/periph.rs | 246 +++++++++++ src/lib.rs | 3 + src/prelude.rs | 1 + 9 files changed, 2490 insertions(+) create mode 100644 examples/dma.rs create mode 100644 src/gpdma.rs create mode 100644 src/gpdma/ch.rs create mode 100644 src/gpdma/config.rs create mode 100644 src/gpdma/config/transform.rs create mode 100644 src/gpdma/periph.rs diff --git a/Cargo.toml b/Cargo.toml index da813f8..5e8726e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -66,6 +66,7 @@ defmt = [ cortex-m = { version = "^0.7.7", features = ["critical-section-single-core"] } stm32h5 = { package = "stm32h5", version = "0.16.0" } fugit = "0.3.7" +embedded-dma = "0.2" embedded-hal = "1.0.0" defmt = { version = "1.0.0", optional = true } paste = "1.0.15" diff --git a/examples/dma.rs b/examples/dma.rs new file mode 100644 index 0000000..7ad2bcd --- /dev/null +++ b/examples/dma.rs @@ -0,0 +1,204 @@ +// #![deny(warnings)] +#![no_main] +#![no_std] + +mod utilities; + +use core::mem::MaybeUninit; + +use cortex_m_rt::entry; +use cortex_m_semihosting::debug; +use stm32h5xx_hal::{ + gpdma::{config::transform::*, DmaConfig, DmaTransfer}, + pac, + prelude::*, +}; + +static mut SOURCE_BYTES: MaybeUninit<[u8; 40]> = MaybeUninit::uninit(); +static mut DEST_BYTES: MaybeUninit<[u8; 40]> = MaybeUninit::zeroed(); +static mut DEST_HALF_WORDS: MaybeUninit<[u16; 20]> = MaybeUninit::uninit(); +static mut SOURCE_WORDS: MaybeUninit<[u32; 10]> = MaybeUninit::uninit(); +static mut DEST_WORDS: MaybeUninit<[u32; 10]> = MaybeUninit::uninit(); + +fn u8_to_u8_sequential() -> (&'static [u8; 40], &'static mut [u8; 40]) { + let buf: &mut [MaybeUninit; 40] = unsafe { + &mut *(core::ptr::addr_of_mut!(SOURCE_BYTES) + as *mut [MaybeUninit; 40]) + }; + + for (i, value) in buf.iter_mut().enumerate() { + unsafe { + value.as_mut_ptr().write(i as u8); + } + } + #[allow(static_mut_refs)] // TODO: Fix this + let src = unsafe { SOURCE_BYTES.assume_init_ref() }; + + let dest = + unsafe { (*core::ptr::addr_of_mut!(DEST_BYTES)).assume_init_mut() }; + + dest.fill(0); + + (src, dest) +} + +fn u32_to_u32_transform() -> (&'static [u32; 10], &'static mut [u32; 10]) { + let buf: &mut [MaybeUninit; 10] = unsafe { + &mut *(core::ptr::addr_of_mut!(SOURCE_WORDS) + as *mut [MaybeUninit; 10]) + }; + + buf.fill(MaybeUninit::new(0x12345678)); + + #[allow(static_mut_refs)] // TODO: Fix this + let src = unsafe { SOURCE_WORDS.assume_init_ref() }; + + let dest = + unsafe { (*core::ptr::addr_of_mut!(DEST_WORDS)).assume_init_mut() }; + + dest.fill(0); + (src, dest) +} + +fn u32_to_u16_truncate() -> (&'static [u32; 10], &'static mut [u16; 20]) { + let buf: &mut [MaybeUninit; 10] = unsafe { + &mut *(core::ptr::addr_of_mut!(SOURCE_WORDS) + as *mut [MaybeUninit; 10]) + }; + + buf.fill(MaybeUninit::new(0x12345678)); + + #[allow(static_mut_refs)] // TODO: Fix this + let src = unsafe { SOURCE_WORDS.assume_init_ref() }; + + let dest = unsafe { + (*core::ptr::addr_of_mut!(DEST_HALF_WORDS)).assume_init_mut() + }; + + dest.fill(0); + (src, dest) +} + +fn u32_to_u8_unpack() -> (&'static [u32; 10], &'static mut [u8; 40]) { + let buf: &mut [MaybeUninit; 10] = unsafe { + &mut *(core::ptr::addr_of_mut!(SOURCE_WORDS) + as *mut [MaybeUninit; 10]) + }; + + buf.fill(MaybeUninit::new(0x12345678)); + + #[allow(static_mut_refs)] // TODO: Fix this + let src = unsafe { SOURCE_WORDS.assume_init_ref() }; + + let dest = + unsafe { (*core::ptr::addr_of_mut!(DEST_BYTES)).assume_init_mut() }; + + dest.fill(0); + (src, dest) +} + +fn u8_to_u32_pack() -> (&'static [u8; 40], &'static mut [u32; 10]) { + let buf: &mut [MaybeUninit; 40] = unsafe { + &mut *(core::ptr::addr_of_mut!(SOURCE_BYTES) + as *mut [MaybeUninit; 40]) + }; + + for chunk in buf.chunks_mut(4) { + unsafe { + chunk[0].as_mut_ptr().write(0x78); + chunk[1].as_mut_ptr().write(0x56); + chunk[2].as_mut_ptr().write(0x34); + chunk[3].as_mut_ptr().write(0x12); + } + } + + #[allow(static_mut_refs)] // TODO: Fix this + let src = unsafe { SOURCE_BYTES.assume_init_ref() }; + + let dest = + unsafe { (*core::ptr::addr_of_mut!(DEST_WORDS)).assume_init_mut() }; + + dest.fill(0); + (src, dest) +} + +#[entry] +fn main() -> ! { + utilities::logger::init(); + + let dp = pac::Peripherals::take().unwrap(); + + let pwr = dp.PWR.constrain(); + let pwrcfg = pwr.vos0().freeze(); + + // Constrain and Freeze clock + let rcc = dp.RCC.constrain(); + let ccdr = rcc.sys_ck(250.MHz()).freeze(pwrcfg, &dp.SBS); + + let channels = dp.GPDMA1.channels(ccdr.peripheral.GPDMA1); + + let (source_buf, dest_buf) = u8_to_u8_sequential(); + + let channel = channels.0; + let config = DmaConfig::new(); + let transfer = + DmaTransfer::memory_to_memory(config, &channel, source_buf, dest_buf); + transfer.start().unwrap(); + transfer.wait_for_transfer_complete().unwrap(); + assert_eq!(source_buf, dest_buf); + + let (source_buf, dest_buf) = u32_to_u32_transform(); + let config = DmaConfig::new().with_data_transform( + DataTransform::builder() + .swap_destination_half_words() + .swap_destination_half_word_byte_order(), + ); + + let transfer = + DmaTransfer::memory_to_memory(config, &channel, source_buf, dest_buf); + + transfer.start().unwrap(); + transfer.wait_for_transfer_complete().unwrap(); + let expected = [0x78563412; 10]; + assert_eq!(expected, *dest_buf); + + let (source_buf, dest_buf) = u32_to_u16_truncate(); + let config = DmaConfig::new().with_data_transform( + DataTransform::builder().left_align_right_truncate(), + ); + let transfer = + DmaTransfer::memory_to_memory(config, &channel, source_buf, dest_buf); + + transfer.start().unwrap(); + transfer.wait_for_transfer_complete().unwrap(); + let expected = [0x1234; 10]; + assert_eq!(expected, (*dest_buf)[0..10]); + + let (source_buf, dest_buf) = u32_to_u8_unpack(); + let config = + DmaConfig::new().with_data_transform(DataTransform::builder().unpack()); + let transfer = + DmaTransfer::memory_to_memory(config, &channel, source_buf, dest_buf); + + transfer.start().unwrap(); + transfer.wait_for_transfer_complete().unwrap(); + let expected = [0x78, 0x56, 0x34, 0x12]; + assert_eq!(expected, (*dest_buf)[0..4]); + assert_eq!(expected, (*dest_buf)[36..40]); + + let (source_buf, dest_buf) = u8_to_u32_pack(); + let config = + DmaConfig::new().with_data_transform(DataTransform::builder().pack()); + let transfer = + DmaTransfer::memory_to_memory(config, &channel, source_buf, dest_buf); + + transfer.start().unwrap(); + transfer.wait_for_transfer_complete().unwrap(); + let expected = [0x12345678; 10]; + assert_eq!(expected, (*dest_buf)); + assert_eq!(expected, (*dest_buf)); + + loop { + debug::exit(debug::EXIT_SUCCESS) + } +} diff --git a/src/gpdma.rs b/src/gpdma.rs new file mode 100644 index 0000000..9117f86 --- /dev/null +++ b/src/gpdma.rs @@ -0,0 +1,530 @@ +//! The GPDMA is the general purpose DMA engine in use on the STM32H5 family of processors. It is +//! used to perform programmable data transfers that are offloaded from the CPU to the DMA engine. +//! +//! The GPDMA can perform the following transfers from a *source* address to a *destination* +//! address: +//! - Memory to memory +//! - Memory to peripheral +//! - Peripheral to memory +//! - Peripheral to peripheral +//! +//! Each GPDMA has 8 channels. Each channel can service any hardware request (or memory to memory +//! transfer) that is supported by the processor (ie. they're not tied to specific channels). All +//! channels support direct and linked-buffer transfers. However, the channels do have different +//! capabilities (see RM0492 Rev 3 section 15.3.2 for full details), notably that channels 0-5 can +//! only service transfers in a linear address space, while channels 6 & 7 can service transfers +//! using a 2D addressing scheme. Both GPDMA peripherals support the same requests/channel +//! capabilities. +//! +//! # Usage +//! At the most basic level transfers take a *source* address and a *destination* address and +//! transfers the data from the source to the destination. The [embedded-dma] traits `ReadBuffer` +//! and `WriteBuffer` represent a source and destination, respectively. +//! +//! ## Memory to memory transfers +//! As long as the buffers satisfy the constraints of embedded-dma's `ReadBuffer` and `WriteBuffer` +//! traits, they can be used directly with the Transfer API: +//! ``` +//! use stm32h5xx_hal::{pac, gpdma::{DmaConfig, DmaTransfer}; +//! +//! let source_buf = ... // source buffer +//! let dest_buf = ... // destination buffer +//! +//! let dp = pac::Peripherals::take().unwrap(); +//! let channels = dp.GPDMA1.channels(ccdr.peripheral.GPDMA1); +//! let channel = channels.0 +//! let config = DmaConfig::default(); +//! let mut transfer = DmaTransfer::memory_to_memory(config, channel, source_buf, dest_buf); +//! transfer.start().unwrap(); +//! transfer.wait_for_transfer_complete().unwrap(); +//! ``` +//! +//! ## Memory to peripheral transfers +//! +//! The peripheral must provide a `WriteBuffer` implementation for its data register to which the +//! DMA will write. Then it can be used similarly to the memory to memory transfer. The `Transfer` +//! API does provide for performing an operation immediately after enabling the DMA channel, via the +//! Transfer::start_with method, which allows for a closure to be provided. Additionally, a +//! hardware request line must be specified to the Config in order to connect the peripheral to the +//! DMA channel. Another additional option for these transfers is to perform block requests or burst +//! requests. +//! +//! ## Peripheral to memory transfers +//! +//! The peripheral must provide a `ReadBuffer` implementation for its data register from which the +//! DMA will read. Otherwise it is used similarly to Peripheral to memory transfers, including the +//! additional configuration requirements/options specified above. In addition, peripheral flow +//! control mode can be used to enable the peripheral to early terminate a transaction. Per RM0492 +//! Rev 3 section 15.3.6, this is only used by the I3C peripheral, and only on channels 0 and 7. +//! +//! ## Peripheral to peripheral transfers +//! +//! These work similarly to the peripheral to memory transfers, but the peripheral driving the +//! request must be identified via the typing of the TransferType implementation. +//! +//! ## Data transforms +//! +//! The GPDMA provides a data transformation pipeline which facilitates transforms for transfers +//! between peripherals or memory that have different source and destination data widths or byte +//! representations (e.g. little endian vs big endian) with zero CPU overhead. See +//! `config::DataTransformBuilder` for more information on it. +//! +//! # Channel/transfer arbitration +//! +//! Every transfer is assigned a priority and a AHB port assignments for each of it its source and +//! destination. The transfer priority is used by the GPDMA controller to arbitrate between requests +//! that are both ready to transfer data via one of the AHB ports. + +use crate::{ + pac::{gpdma1, GPDMA1, GPDMA2}, + rcc::{rec, ResetEnable}, + Sealed, +}; +use core::{ + marker::PhantomData, + ops::Deref, + sync::atomic::{fence, Ordering}, +}; +use embedded_dma::{ReadBuffer, Word as DmaWord, WriteBuffer}; + +mod ch; +pub mod config; +pub mod periph; + +pub use ch::{ + DmaChannel, DmaChannel0, DmaChannel1, DmaChannel2, DmaChannel3, + DmaChannel4, DmaChannel5, DmaChannel6, DmaChannel7, +}; +pub use config::DmaConfig; +use config::{ + HardwareRequest, MemoryToMemory, MemoryToPeripheral, PeripheralRequest, + PeripheralSource, PeripheralToMemory, PeripheralToPeripheral, + PeripheralToPeripheralDirection, TransferDirection, TransferType, +}; + +/// Supported word types for the STM32H5 GPDMA implementation. +/// +/// Currently only u8, u16, and u32 word types are supported. Signed types are currently not +/// supported because they would add a fair bit of complexity/redundancy to the DataTransform +/// implementation. This is easy to work around by having buffers of signed types implement Deref +/// to an unsigned type of the same width. +pub trait Word: DmaWord + Default + Copy {} + +impl Word for u32 {} +impl Word for u16 {} +impl Word for u8 {} + +/// Errors that can occur during operation +#[derive(Debug, Copy, Clone, Eq, PartialEq)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] +pub enum Error { + /// The DMA determined that a user setting was invalid while starting a transfer. + /// + /// See RM0492 Rev 3 Section 15.4.16 for details on how to debug + UserSettingError, + /// An error occurred transferring data during a transfer + /// + /// See RM0492 Rev 3 Section 15.4.16 for details on how to debug + DataTransferError, + /// An error occurred loading a linked transfer configuration + /// + /// See RM0492 Rev 3 Section 15.4.16 for details on how to debug + LinkTransferError, + + /// Resume was called on a channel that was not previously suspended + NotSuspendedError, +} + +pub trait GpdmaExt { + fn channels(self, rec: DMA::Rec) -> DmaChannels; +} + +impl GpdmaExt for DMA { + fn channels(self, rec: DMA::Rec) -> DmaChannels { + DmaChannels::new(self, rec) + } +} + +pub trait Instance: Sealed + Deref { + type Rec: ResetEnable; + + fn ptr() -> *const gpdma1::RegisterBlock; + + /// Access channel registers. Valid for channels 0-5 only. + /// # Safety + /// This function is unsafe because it allows access to the DMA channel registers + /// without enforcing exclusive access or checking that the channel index is valid. + /// The caller must ensure that the channel index is within bounds and that no data races occur. + unsafe fn ch(channel: usize) -> &'static gpdma1::CH { + (*Self::ptr()).ch(channel) + } + + /// Access 2D channel registers. Valid for channels 6 and 7 only. + /// # Safety + /// This function is unsafe because it allows access to the DMA channel registers + /// without enforcing exclusive access or checking that the channel index is valid. + /// The caller must ensure that the channel index is within bounds and that no data races occur. + unsafe fn ch2d(channel: usize) -> &'static gpdma1::CH2D { + // Note (unsafe): only accessing registers belonging to specific channel + (*Self::ptr()).ch2d(channel - 6) + } + + fn rec() -> Self::Rec; +} + +impl Sealed for GPDMA1 {} +impl Sealed for GPDMA2 {} + +impl Instance for GPDMA1 { + type Rec = rec::Gpdma1; + + fn ptr() -> *const gpdma1::RegisterBlock { + GPDMA1::ptr() + } + + fn rec() -> Self::Rec { + Self::Rec { + _marker: PhantomData, + } + } +} + +impl Instance for GPDMA2 { + type Rec = rec::Gpdma2; + + fn ptr() -> *const gpdma1::RegisterBlock { + GPDMA2::ptr() + } + + fn rec() -> Self::Rec { + Self::Rec { + _marker: PhantomData, + } + } +} + +/// DmaChannels represents the set of channels on each GPDMA peripheral. To use, simply move the +/// desired channel out of the tuple: +/// +/// ``` +/// let dp = pac::Peripherals::take().unwrap(); +/// let channels = dp.GPDMA1.channels(ccdr.peripheral.GPDMA1); +/// let channel = channels.0; +/// ``` +#[allow(private_interfaces)] +pub struct DmaChannels( + pub DmaChannel0, + pub DmaChannel1, + pub DmaChannel2, + pub DmaChannel3, + pub DmaChannel4, + pub DmaChannel5, + pub DmaChannel6, + pub DmaChannel7, +); + +impl DmaChannels { + /// Splits the DMA peripheral into channels. + pub(super) fn new(_regs: DMA, rec: DMA::Rec) -> Self { + let _ = rec.reset().enable(); + Self( + DmaChannel0::new(), + DmaChannel1::new(), + DmaChannel2::new(), + DmaChannel3::new(), + DmaChannel4::new(), + DmaChannel5::new(), + DmaChannel6::new(), + DmaChannel7::new(), + ) + } +} + +/// DmaTransfer represents a single transfer operation on a GPDMA channel. It is created using the +/// [`DmaTransfer::memory_to_memory`], [`DmaTransfer::memory_to_peripheral`], +/// [`DmaTransfer::peripheral_to_memory`], or [`DmaTransfer::peripheral_to_peripheral`] +/// methods, which take a channel and the source and destination buffers. The transfer can then be +/// started using the [`DmaTransfer::start`] or [`DmaTransfer::start_nonblocking`] methods. +pub struct DmaTransfer<'a, CH> +where + CH: DmaChannel, +{ + channel: &'a CH, +} + +impl<'a, CH: DmaChannel> DmaTransfer<'a, CH> { + fn new( + channel: &'a CH, + config: DmaConfig, + src_ptr: *const S, + dest_ptr: *mut D, + size: usize, + ) -> Self + where + S: Word, + D: Word, + T: TransferType, + { + assert!(size <= u16::MAX as usize, "Max block size is {}", u16::MAX); + channel.reset_channel(); + channel.set_source(src_ptr); + channel.set_destination(dest_ptr); + channel.set_transfer_size_bytes(size); + channel.apply_config(config); + + Self { channel } + } + + /// Create a new memory-to-memory transfer with the channel, source and destination buffers + /// provided. + pub fn memory_to_memory( + config: DmaConfig, + channel: &'a CH, + source: &'a [S], + destination: &'a mut [D], + ) -> Self + where + S: Word, + D: Word, + { + let src_width = core::mem::size_of::(); + let dest_width = core::mem::size_of::(); + + let src_ptr = source.as_ptr(); + let src_size = core::mem::size_of_val(source); + let dest_ptr = destination.as_mut_ptr(); + let dest_size = core::mem::size_of_val(destination); + + // Size must be aligned with destination width if source width is greater than destination + // width and packing mode is used, therefore the maximum size must be dictated by + // destination size (width * count). When not in packing mode, this still holds true as + // the destination size must not be exceeded (so only read the same number of words from + // the source as there is room in the destination) + let size = if src_width > dest_width { + dest_size + } else { + // When the source data width is less than or equal to the destination data width, we + // just need to ensure that the destination buffer is large enough to hold all of the + // source data. + assert!(src_size <= dest_size, "Transfer size ({src_size} bytes) will overflow the destination buffer ({dest_size} bytes)!"); + src_size + }; + + // We also need to ensure that the destination + + Self::new::( + channel, config, src_ptr, dest_ptr, size, + ) + } + + /// Create a new memory-to-peripheral transfer with the channel, source buffer and destination + /// peripheral provided. + pub fn memory_to_peripheral( + config: DmaConfig, + channel: &'a CH, + source: &'a [S], + mut destination: D, + ) -> Self + where + S: Word, + D: WriteBuffer, + { + let src_ptr = source.as_ptr(); + let src_size = core::mem::size_of_val(source); + let (dest_ptr, _) = unsafe { destination.write_buffer() }; + + Self::new::( + channel, config, src_ptr, dest_ptr, src_size, + ) + .apply_hardware_request_config(config) + } + + /// Create a new peripheral-to-memory transfer with the channel, source peripheral and + /// destination buffer provided. + pub fn peripheral_to_memory( + config: DmaConfig, + channel: &'a CH, + source: S, + destination: &'a mut [D], + ) -> Self + where + S: ReadBuffer, + D: Word, + { + let (src_ptr, _) = unsafe { source.read_buffer() }; + + let dest_ptr = destination.as_mut_ptr(); + let dest_size = core::mem::size_of_val(destination); + + Self::new::( + channel, config, src_ptr, dest_ptr, dest_size, + ) + .apply_hardware_request_config(config) + .apply_peripheral_source_config(config) + } + + /// Create a new peripheral-to-peripheral transfer with source and destination peripherals + /// provided. + pub fn peripheral_to_peripheral( + config: DmaConfig, S::Word, D::Word>, + channel: &'a CH, + source: S, + mut destination: D, + ) -> Self + where + S: ReadBuffer, + D: WriteBuffer, + T: PeripheralToPeripheralDirection, + { + let (src_ptr, src_words) = unsafe { source.read_buffer() }; + let (dest_ptr, dest_words) = unsafe { destination.write_buffer() }; + + let size = match T::DIRECTION { + TransferDirection::PeripheralToPeripheral( + PeripheralRequest::SourceRequest, + ) => src_words * core::mem::size_of::(), + TransferDirection::PeripheralToPeripheral( + PeripheralRequest::DestinationRequest, + ) => dest_words * core::mem::size_of::(), + _ => unreachable!(), + }; + + Self::new::>( + channel, config, src_ptr, dest_ptr, size, + ) + .apply_hardware_request_config(config) + .apply_peripheral_source_config(config) + } + + fn apply_hardware_request_config( + self, + config: DmaConfig, + ) -> Self { + self.channel.configure_hardware_request(config); + self + } + + fn apply_peripheral_source_config( + self, + config: DmaConfig, + ) -> Self { + self.channel.configure_peripheral_flow_control(config); + self + } +} + +impl<'a, CH: DmaChannel> DmaTransfer<'a, CH> { + fn start_transfer_internal(&self) { + // Preserve the instruction and bus ordering of preceding buffer access + // to the subsequent access by the DMA peripheral due to enabling it. + fence(Ordering::SeqCst); + + self.channel.enable(); + } + + /// Start a transfer. Does not block waiting for the transfer to start and does not check for + /// errors starting the transfer + pub fn start_nonblocking(&self) { + self.start_transfer_internal(); + } + + /// Start a transfer and block waiting for it to start. Returns an error if one occurred + /// starting the transfer. + pub fn start(&self) -> Result<(), Error> { + self.start_nonblocking(); + self.channel.wait_for_transfer_started() + } + + /// Suspend a transfer. Does not wait for channel transfer to be suspended and does not report + /// any errors that occur doing so. + pub fn suspend_nonblocking(&mut self) { + if self.channel.is_suspended() { + return; + } + self.channel.initiate_suspend(); + + // Preserve the instruction and bus sequence of the preceding disable and + // the subsequent buffer access. + fence(Ordering::SeqCst); + } + + /// Suspend a transfer and block waiting for it to be suspended. Returns an error if one + /// occurred during the transfer or while suspending the transfer. + pub fn suspend(&mut self) { + if !self.channel.is_suspended() { + self.channel.suspend_transfer(); + } + + // Preserve the instruction and bus sequence of the preceding disable and + // the subsequent buffer access. + fence(Ordering::SeqCst); + } + + /// Resume a transfer. Does not wait for channel transfer to be suspended and does not report + /// any errors that occur doing so. + pub fn resume_nonblocking(&mut self) -> Result<(), Error> { + if !self.channel.is_suspended() { + return Err(Error::NotSuspendedError); + } + // Preserve the instruction and bus ordering of preceding buffer access + // to the subsequent access by the DMA peripheral due to enabling it. + fence(Ordering::SeqCst); + + self.channel.initiate_resume(); + Ok(()) + } + + /// Resume a transfer and block waiting for it to be resumed. Returns an error if one occurred + /// resuming the transfer. + pub fn resume(&mut self) -> Result<(), Error> { + if !self.channel.is_suspended() { + return Err(Error::NotSuspendedError); + } + // Preserve the instruction and bus ordering of preceding buffer access + // to the subsequent access by the DMA peripheral due to enabling it. + fence(Ordering::SeqCst); + + self.channel.resume_transfer() + } + + pub fn is_running(&self) -> bool { + self.channel.is_running() + } + + /// Blocks waiting for a transfer to complete. Returns an error if one occurred during the + /// transfer. + pub fn wait_for_transfer_complete(&self) -> Result<(), Error> { + let result = self.channel.wait_for_transfer_complete(); + // Preserve the instruction and bus sequence of the preceding operation and + // the subsequent buffer access. + fence(Ordering::SeqCst); + result + } + + /// Blocks waiting for the half transfer complete event. Returns an error if one occurred during + /// the transfer. + pub fn wait_for_half_transfer_complete(&self) -> Result<(), Error> { + let result = self.channel.wait_for_half_transfer_complete(); + // Preserve the instruction and bus sequence of the preceding operation and + // the subsequent buffer access. + fence(Ordering::SeqCst); + result + } + + pub fn enable_interrupts(&self) { + self.channel.enable_transfer_interrupts(); + } + + pub fn disable_interrupts(&self) { + self.channel.disable_transfer_interrupts(); + } + + /// Abort a transaction and wait for it to suspend the transfer before resetting the channel + pub fn abort(&mut self) { + self.channel.abort(); + + // Preserve the instruction and bus sequence of the preceding disable and + // the subsequent buffer access. + fence(Ordering::SeqCst); + } +} diff --git a/src/gpdma/ch.rs b/src/gpdma/ch.rs new file mode 100644 index 0000000..efbfe5b --- /dev/null +++ b/src/gpdma/ch.rs @@ -0,0 +1,774 @@ +use core::{marker::PhantomData, ops::Deref}; + +use crate::stm32::gpdma1::{ + self, + ch::{CR, DAR, FCR, LBAR, SAR, SR, TR1, TR2}, +}; +use crate::Sealed; + +use super::{ + config::{ + transform::{DataTransform, PaddingAlignmentMode}, + AddressingMode, AhbPort, HardwareRequest, PeripheralRequest, + PeripheralSource, Priority, TransferDirection, TransferType, + }, + DmaConfig, Error, Instance, Word, +}; + +trait ChannelRegs: Sealed { + #[allow(unused)] // TODO: this will be used for linked-list transfers + fn lbar(&self) -> &LBAR; + fn fcr(&self) -> &FCR; + fn sr(&self) -> &SR; + fn cr(&self) -> &CR; + fn tr1(&self) -> &TR1; + fn tr2(&self) -> &TR2; + fn sar(&self) -> &SAR; + fn dar(&self) -> &DAR; + fn set_block_size(&self, size: u16); +} + +impl Sealed for gpdma1::CH {} +impl Sealed for gpdma1::CH2D {} + +impl ChannelRegs for gpdma1::CH { + fn lbar(&self) -> &LBAR { + self.lbar() + } + fn fcr(&self) -> &FCR { + self.fcr() + } + fn sr(&self) -> &SR { + self.sr() + } + fn cr(&self) -> &CR { + self.cr() + } + fn tr1(&self) -> &TR1 { + self.tr1() + } + fn tr2(&self) -> &TR2 { + self.tr2() + } + fn sar(&self) -> &SAR { + self.sar() + } + fn dar(&self) -> &DAR { + self.dar() + } + fn set_block_size(&self, size: u16) { + self.br1().modify(|_, w| w.bndt().set(size)); + } +} + +impl ChannelRegs for gpdma1::CH2D { + fn lbar(&self) -> &LBAR { + self.lbar() + } + fn fcr(&self) -> &FCR { + self.fcr() + } + fn sr(&self) -> &SR { + self.sr() + } + fn cr(&self) -> &CR { + self.cr() + } + fn tr1(&self) -> &TR1 { + self.tr1() + } + fn tr2(&self) -> &TR2 { + self.tr2() + } + fn sar(&self) -> &SAR { + self.sar() + } + fn dar(&self) -> &DAR { + self.dar() + } + fn set_block_size(&self, size: u16) { + self.br1().modify(|_, w| w.bndt().set(size)); + } +} + +/// DmaChannelRef provides access to individual channels of the GPDMA instance via Deref. +/// It implements the Channel and DmaChannel traits, and is exposed to user code via the DmaChannels +/// struct. It does not expose a public API to allow user code to use it directly, but should rather +/// be assigned to a DmaTransfer that manages a single transfer on a channel. +#[doc(hidden)] +pub struct DmaChannelRef { + _dma: PhantomData, + _ch: PhantomData, +} + +impl Deref + for DmaChannelRef +{ + type Target = gpdma1::CH; + + #[inline(always)] + fn deref(&self) -> &Self::Target { + // Note (unsafe): only accessing registers belonging to Channel N + unsafe { DMA::ch(N) } + } +} + +impl Deref + for DmaChannelRef +{ + type Target = gpdma1::CH2D; + + #[inline(always)] + fn deref(&self) -> &Self::Target { + // Note (unsafe): only accessing registers belonging to Channel N + unsafe { DMA::ch2d(N) } + } +} + +#[allow(private_bounds)] +impl DmaChannelRef +where + DMA: Instance, + CH: ChannelRegs, +{ + pub(super) fn new() -> Self { + DmaChannelRef { + _dma: PhantomData, + _ch: PhantomData, + } + } +} + +impl Sealed for DmaChannelRef {} + +/// Non-error transfer event, including transfer complete and half-transfer events. Half-transfer +/// events can be used for double-buffering/linked buffer transfers. +pub enum TransferEvent { + /// Transfer complete event has occurred + TransferComplete, + /// Half transfer event has occurred + HalfTransferComplete, +} + +// Checks for errors in the captured status register provided, and returns a Result<(), Error> +macro_rules! check_error { + ($sr:expr) => { + if $sr.usef().is_trigger() { + Err(Error::UserSettingError) + } else if $sr.dtef().is_trigger() { + Err(Error::DataTransferError) + } else if $sr.ulef().is_trigger() { + Err(Error::LinkTransferError) + } else { + Ok(()) + } + }; +} + +#[allow(private_bounds)] +impl DmaChannelRef +where + DMA: Instance, + CH: ChannelRegs, + Self: Deref, +{ + #[inline(always)] + fn reset(&self) { + self.cr().modify(|_, w| w.reset().reset()); + } + + // TODO: remove clippy allow when used. This will likely be useful in the future + #[allow(unused)] + #[inline(always)] + pub(super) fn is_enabled(&self) -> bool { + self.cr().read().en().is_enabled() + } + + /// Initiates the suspension of a transfer + #[inline(always)] + pub(super) fn suspend(&self) { + self.cr().modify(|_, w| w.susp().suspended()); + } + + /// Resume transfer + #[inline(always)] + fn resume(&self) { + self.cr().modify(|_, w| w.susp().not_suspended()); + } + + fn clear_all_event_flags(&self) { + self.fcr().write(|w| { + w.tcf() + .clear() + .htf() + .clear() + .dtef() + .clear() + .usef() + .clear() + .ulef() + .clear() + .suspf() + .clear() + .tof() + .clear() + }); + + interrupt_clear_clock_sync_delay!(self.sr()); + } + + #[inline(always)] + fn check_transfer_event( + &self, + event: TransferEvent, + ) -> Result { + let sr = self.sr().read(); + check_error!(sr).inspect_err(|_| self.clear_all_event_flags())?; + let triggered = match event { + TransferEvent::TransferComplete => sr.tcf().is_trigger(), + TransferEvent::HalfTransferComplete => sr.htf().is_trigger(), + }; + + if triggered { + // Clear the event flag if it has been triggered + self.fcr().write(|w| match event { + TransferEvent::TransferComplete => w.tcf().clear(), + TransferEvent::HalfTransferComplete => w.htf().clear(), + }); + + interrupt_clear_clock_sync_delay!(self.sr()); + } + Ok(triggered) + } + + // TODO: Remove clippy allow when FIFO use is implemented + #[allow(unused)] + #[inline(always)] + fn fifo_level(&self) -> u8 { + self.sr().read().fifol().bits() + } + + /// Checks if the channel is idle. Ignores error conditions. + #[inline(always)] + fn is_idle(&self) -> bool { + self.sr().read().idlef().is_trigger() + } + + #[inline(always)] + fn check_idle(&self) -> Result { + let sr = self.sr().read(); + check_error!(sr)?; + Ok(sr.idlef().is_trigger()) + } + + #[inline(always)] + fn set_source_address(&self, addr: u32) { + self.sar().write(|w| w.sa().set(addr)); + } + + #[inline(always)] + fn set_destination_address(&self, addr: u32) { + self.dar().write(|w| w.da().set(addr)); + } + + #[inline(always)] + fn set_source_addressing_mode(&self, mode: AddressingMode) { + self.tr1().modify(|_, w| match mode { + AddressingMode::ContiguouslyIncremented => w.sinc().contiguous(), + AddressingMode::Fixed => w.sinc().fixed_burst(), + }); + } + + #[inline(always)] + fn set_destination_addressing_mode(&self, mode: AddressingMode) { + self.tr1().modify(|_, w| match mode { + AddressingMode::ContiguouslyIncremented => w.dinc().contiguous(), + AddressingMode::Fixed => w.dinc().fixed_burst(), + }); + } + + #[inline(always)] + fn set_source_burst_length(&self, burst_length: u8) { + self.tr1().modify(|_, w| w.dbl_1().set(burst_length)); + } + + #[inline(always)] + fn set_destination_burst_length(&self, burst_length: u8) { + self.tr1().modify(|_, w| w.sbl_1().set(burst_length)); + } + + #[inline(always)] + fn set_source_ahb_port(&self, port: AhbPort) { + self.tr1().modify(|_, w| match port { + AhbPort::Port0 => w.sap().port0(), + AhbPort::Port1 => w.sap().port1(), + }); + } + + #[inline(always)] + fn set_destination_ahb_port(&self, port: AhbPort) { + self.tr1().modify(|_, w| match port { + AhbPort::Port0 => w.dap().port0(), + AhbPort::Port1 => w.dap().port1(), + }); + } + + #[inline(always)] + fn set_source_data_width(&self, width: usize) { + self.tr1().modify(|_, w| match width { + 1 => w.sdw_log2().byte(), + 2 => w.sdw_log2().half_word(), + 4 => w.sdw_log2().word(), + _ => unreachable!(), + }); + } + + #[inline(always)] + fn set_destination_data_width(&self, width: usize) { + self.tr1().modify(|_, w| match width { + 1 => w.ddw_log2().byte(), + 2 => w.ddw_log2().half_word(), + 4 => w.ddw_log2().word(), + _ => unreachable!(), + }); + } + + #[inline(always)] + fn set_source_byte_exchange(&self, source_byte_exchange: bool) { + self.tr1().modify(|_, w| { + if source_byte_exchange { + w.sbx().exchanged() + } else { + w.sbx().not_exchanged() + } + }); + } + + #[inline(always)] + fn set_padding_alignment_mode(&self, pam: PaddingAlignmentMode) { + self.tr1().modify(|_, w| match pam { + PaddingAlignmentMode::None => w, + _ => w.pam().set(pam.bits()), + }); + } + + #[inline(always)] + fn set_destination_half_word_exchange(&self, half_word_exchange: bool) { + self.tr1().modify(|_, w| { + if half_word_exchange { + w.dhx().exchanged() + } else { + w.dhx().not_exchanged() + } + }); + } + + #[inline(always)] + fn set_destination_byte_exchange(&self, destination_byte_exchange: bool) { + self.tr1().modify(|_, w| { + if destination_byte_exchange { + w.dbx().exchanged() + } else { + w.dbx().not_exchanged() + } + }); + } + + #[inline(always)] + fn set_priority(&self, priority: Priority) { + self.cr().modify(|_, w| match priority { + Priority::LowPriorityLowWeight => w.prio().low_prio_low_weight(), + Priority::LowPriorityMedWeight => w.prio().low_prio_mid_weight(), + Priority::LowPriorityHighWeight => w.prio().low_prio_high_weight(), + Priority::HighPriority => w.prio().high_prio(), + }); + } + + #[inline(always)] + fn set_transfer_type(&self, transfer_dir: TransferDirection) { + self.tr2().modify(|_, w| match transfer_dir { + TransferDirection::MemoryToMemory => w.swreq().software(), + TransferDirection::MemoryToPeripheral => { + w.swreq().hardware().dreq().destination() + } + TransferDirection::PeripheralToMemory => { + w.swreq().hardware().dreq().source() + } + TransferDirection::PeripheralToPeripheral( + PeripheralRequest::SourceRequest, + ) => w.swreq().hardware().dreq().source(), + TransferDirection::PeripheralToPeripheral( + PeripheralRequest::DestinationRequest, + ) => w.swreq().hardware().dreq().destination(), + }); + } + + // TODO: Use enum? + #[inline(always)] + fn set_request_line(&self, request: u8) { + self.tr2() + .modify(|_, w| unsafe { w.reqsel().bits(request) }); + } + + #[inline(always)] + fn set_block_request_mode(&self, block_requests_enabled: bool) { + self.tr2().modify(|_, w| { + if block_requests_enabled { + w.breq().block() + } else { + w.breq().burst() + } + }); + } + + #[inline(always)] + fn set_peripheral_flow_control_mode( + &self, + peripheral_control_enabled: bool, + ) { + self.tr2().modify(|_, w| { + if peripheral_control_enabled { + w.pfreq().peripheral_control_mode() + } else { + w.pfreq().gpdma_control_mode() + } + }); + } +} + +/// The Channel trait is a private trait that abstracts over control of the linear and 2D channels. +/// It exposes to the DmaTransfer struct all the methods needed to control transfers on a particular +/// channel. It is private in order to not expose the low level functionality beyond the gpdma +/// module. +#[doc(hidden)] +pub(super) trait Channel { + fn enable(&self); + + fn is_suspended(&self) -> bool; + + /// Initiates the suspension of a transfer + fn initiate_suspend(&self); + + /// Resume transfer + fn initiate_resume(&self); + + /// Checks whether the channel transfer is complete. If the channel indicates an error occurred, + /// during the transaction an `Error`` is returned. + fn check_transfer_complete(&self) -> Result; + + /// Checks whether the channel half transfer complete event has triggered. If the channel + /// indicates an error occurred, during the transaction an `Error`` is returned. + fn check_half_transfer_complete(&self) -> Result; + + /// Checks whether the channel transfer has started (has transitioned out of the idle state, or + /// the transfer complete event has already triggered if it is idle) + fn check_transfer_started(&self) -> Result; + + fn is_running(&self) -> bool; + + /// Reset the channel registers so it can be reused. + fn reset_channel(&self); + + /// Suspend the transfer and blocks until it has been suspended. Reports any that occur while + /// waiting for the transfer to suspend. + fn suspend_transfer(&self); + + /// Resumes a suspended transfer and blocks until the channel transitions out of the idle state + /// Reports any errors that occur resuming the transfer. + fn resume_transfer(&self) -> Result<(), Error>; + + /// Aborts an operation by suspending the transfer and resetting the channel. + fn abort(&self); + + /// Blocks waiting for a transfer to be started (or for it to be idle and complete). Reports any + /// errors that occur while waiting for the transfer to start. + fn wait_for_transfer_started(&self) -> Result<(), Error>; + + /// Blocks waiting for a transfer to complete. Reports any errors that occur during a transfer. + fn wait_for_transfer_complete(&self) -> Result<(), Error>; + /// Blocks waiting for a half transfer event to trigger. Reports any errors that occur during a + /// transfer. + fn wait_for_half_transfer_complete(&self) -> Result<(), Error>; + /// Apply a transfer configuration to the channel + fn apply_config( + &self, + config: DmaConfig, + ); + + /// Apply hardware request configuration to the channel. Not relevant to memory-to-memory + /// transfers. + fn configure_hardware_request( + &self, + config: DmaConfig, + ); + + /// Apply peripheral flow control configuration for transactions where a peripheral is the + /// source + fn configure_peripheral_flow_control< + T: PeripheralSource, + S: Word, + D: Word, + >( + &self, + config: DmaConfig, + ); + + /// Apply a data transform to the channel transfer + fn apply_data_transform(&self, data_transform: DataTransform); + /// Set the source address. This sets the source address and data width. + fn set_source(&self, ptr: *const W); + + /// Set the destination address. This sets the destination address and data width + fn set_destination(&self, ptr: *mut W); + + /// Set the transfer size in bytes (not words!). Size must be aligned with destination width if + /// source width is greater than destination width and packing mode is used. Otherwise the size + /// must be aligned with the source data width. + fn set_transfer_size_bytes(&self, size: usize); + + fn enable_transfer_interrupts(&self); + + fn disable_transfer_interrupts(&self); +} + +impl Channel for DmaChannelRef +where + DMA: Instance, + CH: ChannelRegs, + Self: Deref, +{ + /// Enable a transfer on the channel + #[inline(always)] + fn enable(&self) { + self.cr().modify(|_, w| w.en().enabled()); + } + + /// Checks whether the channel is suspended + #[inline(always)] + fn is_suspended(&self) -> bool { + self.sr().read().suspf().bit_is_set() + } + + /// Initiates the suspension of a transfer + fn initiate_suspend(&self) { + if self.is_suspended() { + return; + } + self.suspend(); + } + + /// Resume transfer + #[inline(always)] + fn initiate_resume(&self) { + self.resume(); + } + + /// Checks whether the channel transfer is complete. If the channel indicates an error occurred, + /// during the transaction an `Error`` is returned. + fn check_transfer_complete(&self) -> Result { + self.check_transfer_event(TransferEvent::TransferComplete) + } + + /// Checks whether the channel half transfer complete event has triggered. If the channel + /// indicates an error occurred, during the transaction an `Error`` is returned. + fn check_half_transfer_complete(&self) -> Result { + self.check_transfer_event(TransferEvent::HalfTransferComplete) + } + + /// Checks whether the channel transfer has started (has transitioned out of the idle state, or + /// the transfer complete event has already triggered if it is idle) + fn check_transfer_started(&self) -> Result { + // TODO: Resolve multiple status register reads + match self.check_idle() { + // If we're idle we might have finished the transaction already, so also check if the + // transfer complete flag is set + Ok(true) => self.check_transfer_complete(), + Ok(false) => Ok(false), + Err(error) => Err(error), + } + } + + /// Return whether or not a transfer is in progress on the channel. + #[inline(always)] + fn is_running(&self) -> bool { + !self.is_idle() + } + + /// Reset the channel registers and clear status flags so the channel can be reused. + fn reset_channel(&self) { + self.reset(); + self.clear_all_event_flags(); + } + + /// Suspend the transfer and blocks until it has been suspended. Reports any that occur while + /// waiting for the transfer to suspend. + fn suspend_transfer(&self) { + self.initiate_suspend(); + while !self.is_suspended() {} + } + + /// Resumes a suspended transfer and blocks until the channel transitions out of the idle state + /// Reports any errors that occur resuming the transfer. + fn resume_transfer(&self) -> Result<(), Error> { + self.initiate_resume(); + while !self.check_transfer_started()? {} + Ok(()) + } + + /// Aborts an operation by suspending the transfer and resetting the channel. + fn abort(&self) { + if !self.is_idle() { + self.suspend_transfer(); + } + + self.reset_channel(); + } + + /// Blocks waiting for a transfer to be started (or for it to be idle and complete). Reports any + /// errors that occur while waiting for the transfer to start. + fn wait_for_transfer_started(&self) -> Result<(), Error> { + while !self.check_transfer_started()? {} + Ok(()) + } + + /// Blocks waiting for a transfer to complete. Reports any errors that occur during a transfer. + fn wait_for_transfer_complete(&self) -> Result<(), Error> { + while !self.check_transfer_complete()? {} + Ok(()) + } + + /// Blocks waiting for a half transfer event to trigger. Reports any errors that occur during a + /// transfer. + fn wait_for_half_transfer_complete(&self) -> Result<(), Error> { + while !self.check_half_transfer_complete()? {} + Ok(()) + } + + /// Apply a transfer configuration to the channel + fn apply_config( + &self, + config: DmaConfig, + ) { + self.set_source_addressing_mode( + config.transfer_type.source_addressing_mode(), + ); + self.set_destination_addressing_mode( + config.transfer_type.destination_addressing_mode(), + ); + self.set_source_burst_length(config.source_burst_length); + self.set_destination_burst_length(config.destination_burst_length); + self.set_source_ahb_port(config.source_ahb_port); + self.set_destination_ahb_port(config.destination_ahb_port); + + self.set_transfer_type(T::DIRECTION); + self.set_priority(config.priority); + if config.enable_interrupts { + self.enable_transfer_interrupts(); + } + if let Some(data_transform) = config.data_transform { + self.apply_data_transform(data_transform); + } + } + + /// Apply hardware request configuration to the channel. Not relevant to memory-to-memory + /// transfers. + fn configure_hardware_request( + &self, + config: DmaConfig, + ) { + self.set_block_request_mode(config.transfer_type.block_request()); + self.set_request_line(config.transfer_type.request()); + } + + /// Apply peripheral flow control configuration for transactions where a peripheral is the + /// source + fn configure_peripheral_flow_control< + T: PeripheralSource, + S: Word, + D: Word, + >( + &self, + config: DmaConfig, + ) { + self.set_peripheral_flow_control_mode( + config.transfer_type.peripheral_flow_control(), + ); + } + + /// Apply a data transform to the channel transfer + fn apply_data_transform(&self, data_transform: DataTransform) { + self.set_source_byte_exchange(data_transform.source_byte_exchange); + self.set_padding_alignment_mode(data_transform.padding_alignment); + self.set_destination_half_word_exchange( + data_transform.dest_half_word_exchange, + ); + self.set_destination_byte_exchange(data_transform.dest_byte_exchange); + } + + /// Set the source address. This sets the source address and data width. + fn set_source(&self, ptr: *const W) { + self.set_source_address(ptr as u32); + self.set_source_data_width(core::mem::size_of::()); + } + + /// Set the destination address. This sets the destination address and data width + fn set_destination(&self, ptr: *mut W) { + self.set_destination_address(ptr as u32); + self.set_destination_data_width(core::mem::size_of::()); + } + + /// Set the transfer size in bytes (not words!). Size must be aligned with destination width if + /// source width is greater than destination width and packing mode is used. Otherwise the size + /// must be aligned with the source data width. + fn set_transfer_size_bytes(&self, size: usize) { + self.set_block_size(size as u16); + } + + /// Enable transfer interrupts for the channel. This enables the transfer complete, + /// half-transfer complete, data transfer error and user setting error interrupts. This is + /// useful for starting a transfer that will be monitored by an interrupt handler. + #[inline(always)] + fn enable_transfer_interrupts(&self) { + self.cr().modify(|_, w| { + w.tcie().enabled().dteie().enabled().useie().enabled() + }); + } + /// Disable transfer interrupts for the channel. It is expected that this will be called from + /// an interrupt handler after a transfer is completed. + #[inline(always)] + fn disable_transfer_interrupts(&self) { + self.cr().modify(|_, w| { + w.tcie().disabled().dteie().disabled().useie().disabled() + }); + } +} + +/// DmaChannel trait provides the API contract that all GPDMA channels exposed to the user +/// implement. +#[allow(private_bounds)] +pub trait DmaChannel: Channel {} + +impl DmaChannel for DmaChannelRef +where + DMA: Instance, + CH: ChannelRegs, + Self: Deref, +{ +} + +/// Channel 0 on GPDMA controller +pub type DmaChannel0 = DmaChannelRef; +/// Channel 1 on GPDMA controller +pub type DmaChannel1 = DmaChannelRef; +/// Channel 2 on GPDMA controller +pub type DmaChannel2 = DmaChannelRef; +/// Channel 3 on GPDMA controller +pub type DmaChannel3 = DmaChannelRef; +/// Channel 4 on GPDMA controller +pub type DmaChannel4 = DmaChannelRef; +/// Channel 5 on GPDMA controller +pub type DmaChannel5 = DmaChannelRef; +/// Channel 6 on GPDMA controller +pub type DmaChannel6 = DmaChannelRef; +/// Channel 7 on GPDMA controller +pub type DmaChannel7 = DmaChannelRef; diff --git a/src/gpdma/config.rs b/src/gpdma/config.rs new file mode 100644 index 0000000..89a6ac4 --- /dev/null +++ b/src/gpdma/config.rs @@ -0,0 +1,412 @@ +use core::marker::PhantomData; + +use super::Word; + +pub mod transform; +use transform::*; + +/// PeripheralRequests is used for peripheral-to-peripheral transfers to indicate which side of the +/// transfer is driving the request (ie. which has the hardware request assigned) +#[derive(Clone, Copy, Debug, PartialEq)] +pub enum PeripheralRequest { + SourceRequest, + DestinationRequest, +} + +/// The TransferDirection represents the available options for transfer types +#[derive(Clone, Copy, Debug, PartialEq)] +pub enum TransferDirection { + MemoryToMemory, + MemoryToPeripheral, + PeripheralToMemory, + PeripheralToPeripheral(PeripheralRequest), +} + +/// Addressing mode represents whether the source or destination address is contiguously incremented +/// or fixed during a transfer +#[derive(Clone, Copy, Default, Debug, PartialEq)] +pub enum AddressingMode { + #[default] + ContiguouslyIncremented, + Fixed, +} + +/// Transfer type encapsulates the transfer direction and the addressing mode for both the source +/// and destination of a transfer. +pub trait TransferType: crate::Sealed + Default { + const DIRECTION: TransferDirection; + + fn source_addressing_mode(&self) -> AddressingMode { + AddressingMode::ContiguouslyIncremented + } + + fn destination_addressing_mode(&self) -> AddressingMode { + AddressingMode::ContiguouslyIncremented + } +} + +/// Transfers to or from a peripheral have these additional options +pub trait HardwareRequest { + fn block_request(&self) -> bool; + fn enable_block_request(&mut self); + fn request(&self) -> u8; + fn set_request(&mut self, request: u8); +} + +/// When a peripheral is the source of the transfer it can optionally be configured in peripheral +/// flow control mode, when the peripheral supports it (currently just the I3C peripheral) +pub trait PeripheralSource { + fn peripheral_flow_control(&self) -> bool; + fn enable_peripheral_flow_control(&mut self); +} + +/// Represents the options specifically available for peripheral-to-memory transfers +#[derive(Clone, Copy, Debug, Default, PartialEq)] +pub struct PeripheralToMemory { + request: u8, + block_request: bool, + peripheral_flow_control: bool, +} + +impl crate::Sealed for PeripheralToMemory {} + +impl TransferType for PeripheralToMemory { + const DIRECTION: TransferDirection = TransferDirection::PeripheralToMemory; + + fn source_addressing_mode(&self) -> AddressingMode { + AddressingMode::Fixed + } +} + +impl HardwareRequest for PeripheralToMemory { + fn block_request(&self) -> bool { + self.block_request + } + + fn enable_block_request(&mut self) { + self.block_request = true; + } + + fn request(&self) -> u8 { + self.request + } + + fn set_request(&mut self, request: u8) { + self.request = request; + } +} + +impl PeripheralSource for PeripheralToMemory { + fn peripheral_flow_control(&self) -> bool { + self.peripheral_flow_control + } + + fn enable_peripheral_flow_control(&mut self) { + self.peripheral_flow_control = true; + } +} + +/// Represents the options specifically available for memory-to-peripheral transfers +#[derive(Clone, Copy, Debug, Default, PartialEq)] +pub struct MemoryToPeripheral { + request: u8, + block_request: bool, +} + +impl crate::Sealed for MemoryToPeripheral {} + +impl TransferType for MemoryToPeripheral { + const DIRECTION: TransferDirection = TransferDirection::MemoryToPeripheral; + + fn destination_addressing_mode(&self) -> AddressingMode { + AddressingMode::Fixed + } +} + +impl HardwareRequest for MemoryToPeripheral { + fn block_request(&self) -> bool { + self.block_request + } + + fn enable_block_request(&mut self) { + self.block_request = true; + } + + fn request(&self) -> u8 { + self.request + } + + fn set_request(&mut self, request: u8) { + self.request = request; + } +} + +/// Marker struct to indicate that the source peripheral drives the request via its request line. +#[derive(Clone, Copy, Debug, Default, PartialEq)] +pub struct SourceRequest; + +/// Marker struct to indicate that the destination peripheral drives the request via its request +/// line. +#[derive(Clone, Copy, Debug, Default, PartialEq)] +pub struct DestinationRequest; + +/// Indicates which peripheral in a peripheral-to-peripheral transfer is driving the request line +pub trait PeripheralToPeripheralDirection: Default + Clone + Copy { + const DIRECTION: TransferDirection; +} + +impl PeripheralToPeripheralDirection for SourceRequest { + const DIRECTION: TransferDirection = + TransferDirection::PeripheralToPeripheral( + PeripheralRequest::SourceRequest, + ); +} + +impl PeripheralToPeripheralDirection for DestinationRequest { + const DIRECTION: TransferDirection = + TransferDirection::PeripheralToPeripheral( + PeripheralRequest::DestinationRequest, + ); +} + +/// Represents the options specifically available for peripheral-to-peripheral transfers +#[derive(Clone, Copy, Debug, Default, PartialEq)] +pub struct PeripheralToPeripheral { + _peripheral_request: PhantomData, + request: u8, + block_request: bool, + peripheral_flow_control: bool, +} + +impl crate::Sealed for PeripheralToPeripheral {} + +impl TransferType + for PeripheralToPeripheral +{ + const DIRECTION: TransferDirection = T::DIRECTION; + + fn source_addressing_mode(&self) -> AddressingMode { + AddressingMode::Fixed + } + + fn destination_addressing_mode(&self) -> AddressingMode { + AddressingMode::Fixed + } +} + +impl HardwareRequest for PeripheralToPeripheral { + fn block_request(&self) -> bool { + self.block_request + } + + fn enable_block_request(&mut self) { + self.block_request = true; + } + + fn request(&self) -> u8 { + self.request + } + + fn set_request(&mut self, request: u8) { + self.request = request; + } +} + +impl PeripheralSource for PeripheralToPeripheral { + fn peripheral_flow_control(&self) -> bool { + self.peripheral_flow_control + } + + fn enable_peripheral_flow_control(&mut self) { + self.peripheral_flow_control = true; + } +} + +/// Marker struct for memory-to-memory transfers (no special options) +#[derive(Clone, Copy, Debug, Default, PartialEq)] +pub struct MemoryToMemory; + +impl crate::Sealed for MemoryToMemory {} + +impl TransferType for MemoryToMemory { + const DIRECTION: TransferDirection = TransferDirection::MemoryToMemory; +} + +/// Priority of the transfer. Used by the GPDMA channel arbitration to determine which transfer +/// to service. +#[derive(Clone, Copy, Debug, Default, PartialEq)] +pub enum Priority { + LowPriorityLowWeight = 0, + #[default] + LowPriorityMedWeight = 1, + LowPriorityHighWeight = 2, + HighPriority = 3, +} + +#[derive(Clone, Copy, Debug, Default, PartialEq)] +pub enum Continuation { + #[default] + Direct, + LinkedList, +} + +#[derive(Clone, Copy, Debug, Default, PartialEq)] +pub enum AhbPort { + #[default] + Port0 = 0, + Port1 = 1, +} + +const MAX_BURST_LEN: u8 = 64; + +/// Configuration options for a DMA transfer +#[derive(Clone, Copy, Debug, Default, PartialEq)] +pub struct DmaConfig { + _src_word: PhantomData, + _dest_word: PhantomData, + pub(super) transfer_type: T, + pub(super) priority: Priority, + pub(super) source_ahb_port: AhbPort, + pub(super) destination_ahb_port: AhbPort, + pub(super) source_burst_length: u8, + pub(super) destination_burst_length: u8, + pub(super) enable_interrupts: bool, + pub(super) data_transform: Option, +} + +impl DmaConfig { + /// Create a config with default settings + pub fn new() -> Self { + Self::default() + } + + /// Set the priority of the transfer. Default: Low Priority, Medium Weight + pub fn priority(mut self, priority: Priority) -> Self { + self.priority = priority; + self + } + + /// Set the source AHB port (0 or 1). Default: 0. + pub fn source_ahb_port(mut self, port: AhbPort) -> Self { + self.source_ahb_port = port; + self + } + + /// Set the destination AHB port (0 or 1). Default 0. + pub fn destination_ahb_port(mut self, port: AhbPort) -> Self { + self.destination_ahb_port = port; + self + } + + /// Set the source burst length in words (1 - 64 incl.). Default 1. + pub fn source_burst_length(mut self, len: u8) -> Self { + assert!( + (1..=MAX_BURST_LEN).contains(&len), + "Must specify a burst length between 1 and 64" + ); + self.source_burst_length = len - 1; + self + } + + /// Set the destination burst length in words (1 - 64 incl.). Default 1. + pub fn destination_burst_length(mut self, len: u8) -> Self { + assert!( + (1..=MAX_BURST_LEN).contains(&len), + "Must specify a burst length between 1 and 64" + ); + self.destination_burst_length = len - 1; + self + } + + pub fn enable_interrupts(mut self) -> Self { + self.enable_interrupts = true; + self + } + + /// Apply a data transform via a closure that takes a DataTransformBuilder that provides APIs + /// relevant to the source and destination data widths. + pub fn with_data_transform( + mut self, + builder: DataTransformBuilder, + ) -> Self { + self.data_transform = Some(builder.transform); + self + } +} + +impl DmaConfig { + /// Enable peripheral flow control (only supported by I3C) + pub fn enable_peripheral_flow_control(mut self) -> Self { + self.transfer_type.enable_peripheral_flow_control(); + self + } +} + +impl DmaConfig { + /// Enable block requests for peripherals that support it + pub fn enable_hardware_block_requests(mut self) -> Self { + self.transfer_type.enable_block_request(); + self + } + + /// Select the hardware request line + pub fn with_request(mut self, request: u8) -> Self { + self.transfer_type.set_request(request); + self + } +} + +#[cfg(test)] +mod test { + use crate::gpdma::{ + config::{self, MemoryToMemory}, + DmaConfig, + }; + + use super::*; + + impl DataTransform { + fn new( + source_byte_exchange: bool, + padding_alignment: PaddingAlignmentMode, + dest_half_word_exchange: bool, + dest_byte_exchange: bool, + ) -> Self { + Self { + source_byte_exchange, + padding_alignment, + dest_half_word_exchange, + dest_byte_exchange, + } + } + } + + #[test] + fn test_data_transform() { + let builder: DataTransformBuilder = + DataTransform::builder().swap_source_middle_bytes(); + assert_eq!( + builder.transform, + DataTransform::new(true, Default::default(), false, false) + ); + } + + #[test] + fn test_with_data_transform() { + let config: DmaConfig = DmaConfig::new(); + let transform = DataTransform::builder() + .swap_source_middle_bytes() + .left_align_right_truncate() + .swap_destination_half_word_byte_order(); + let config = config.with_data_transform(transform); + assert_eq!( + config.data_transform, + Some(DataTransform::new( + true, + PaddingAlignmentMode::LeftAlignedRightTruncated, + false, + true + )) + ); + } +} diff --git a/src/gpdma/config/transform.rs b/src/gpdma/config/transform.rs new file mode 100644 index 0000000..603072b --- /dev/null +++ b/src/gpdma/config/transform.rs @@ -0,0 +1,319 @@ +//! The transform module provides a configuration builder to set up the data transformations +//! supported by the GPDMA peripheral. +//! +//! # Usage +//! use stm32h5xx_hal::gpdma::DmaConfig; +//! use stm32h5xx_hal::gpdma::config::transform::*; // This ensures relevant traits are in scope +//! +//! let config: DmaConfig = DmaConfig::new().with_data_transform( +//! DataTransform::builder() +//! .swap_source_middle_bytes() +//! .right_align_left_truncate() +//! .swap_destination_half_word_byte_order() +//! ); +use core::marker::PhantomData; + +use super::Word; + +/// Represents the options available for the padding and alignment step in the data transformation +/// pipeline +#[derive(Clone, Copy, Debug, Default, PartialEq)] +pub(crate) enum PaddingAlignmentMode { + #[default] + None, + + // PAM1 - Source data width < Destination data width + ZeroPadded, + SignExtended, + Packed, + + // PAM2 - Source data width > Destination data width + RightAlignedLeftTruncated, + LeftAlignedRightTruncated, + Unpacked, +} + +impl PaddingAlignmentMode { + pub fn bits(&self) -> u8 { + match self { + PaddingAlignmentMode::None => { + panic!("Do not set PAM bits if no PAM mode was chosen") + } + PaddingAlignmentMode::ZeroPadded => 0, + PaddingAlignmentMode::SignExtended => 1, + PaddingAlignmentMode::Packed => 2, + PaddingAlignmentMode::RightAlignedLeftTruncated => 0, + PaddingAlignmentMode::LeftAlignedRightTruncated => 1, + PaddingAlignmentMode::Unpacked => 2, + } + } +} + +pub trait SourceByteExchange { + fn swap_source_middle_bytes(self) -> Self; +} + +pub trait PaddingAlignment { + fn right_align_zero_pad(self) -> Self; + fn right_align_sign_extend(self) -> Self; + fn pack(self) -> Self; +} + +pub trait TruncationAlignment { + fn right_align_left_truncate(self) -> Self; + fn left_align_right_truncate(self) -> Self; + fn unpack(self) -> Self; +} + +pub trait DestinationHalfWordExchange { + fn swap_destination_half_words(self) -> Self; +} + +pub trait DestinationByteExchange { + fn swap_destination_half_word_byte_order(self) -> Self; +} + +/// The DataTransformBuilder is used to configure the data transformation pipeline that the GPDMA +/// peripheral implements. +/// +/// Depending upon what word sizes are used for transfers, different pipeline steps are applicable: +/// +/// - The first possible step in the pipeline, the source byte exchange step is applicable to 32-bit +/// sources only and swaps the middle 2 bytes of the 32-bit word +/// - The next step is applicable when the source data width is not equal to the destination data +/// width: +/// - If the destination width is less than the source width, the data can be truncated (left or +/// right aligned) or unpacked into a FIFO to output all the data to subsequent destination +/// words (destination buffer size must be large enough to accomodate the size in bytes of the +/// unpacked source data) +/// - If the destination width is greater than the source width, the data can be zero- or +/// sign-extended, or it can be packed into the destination words. +/// - After the padding/alignment step, the order of the destination 16-bit half-words in a 32-bit +/// destination word can be swapped (only applicable if the destination word is 32-bit) +/// - Finally, the order of the bytes in each 16-bit destination (half-) word can be swapped (only +/// applicable for 32- and 16-bit destination word sizes) +/// +/// This builder allows each step to be specified, only when relevant to the source and destination +/// data-widths. +/// +/// To get a builder use [`DataTransform::builder()`]. Type inference is used to determine the +/// source and destination word sizes, so the builder can be created without specifying the types +/// explicitly. +#[derive(Clone, Copy, Debug, Default, PartialEq)] +pub struct DataTransformBuilder { + _source_type: PhantomData, + _destination_type: PhantomData, + pub(super) transform: DataTransform, +} + +impl DataTransformBuilder { + pub fn new() -> Self { + Self::default() + } +} + +impl SourceByteExchange for DataTransformBuilder { + /// The order of the unaligned middle bytes of a 32-bit source word is exchanged + /// ie. B3B2B1B0 -> B3B1B2B0 + fn swap_source_middle_bytes(mut self) -> Self { + self.transform.source_byte_exchange = true; + self + } +} + +impl PaddingAlignment for DataTransformBuilder { + /// Pad out the upper 16 bits of the 32-bit destination word with zeroes (default) + fn right_align_zero_pad(mut self) -> Self { + self.transform.padding_alignment = PaddingAlignmentMode::ZeroPadded; + self + } + + /// Sign extend the upper 16 bits of the 32-bit destination word + fn right_align_sign_extend(mut self) -> Self { + self.transform.padding_alignment = PaddingAlignmentMode::SignExtended; + self + } + + /// Pack subsequent 16-bit words into the 32-bit destination words + /// ie: B3B2,B1B0 -> B3B2B1B0 (see RM0492, Table 92) + fn pack(mut self) -> Self { + self.transform.padding_alignment = PaddingAlignmentMode::Packed; + self + } +} + +impl PaddingAlignment for DataTransformBuilder { + /// Pad out the upper 24 bits of the 32-bit destination word with zeroes (default) + fn right_align_zero_pad(mut self) -> Self { + self.transform.padding_alignment = PaddingAlignmentMode::ZeroPadded; + self + } + + /// Sign extend the upper 24 bits of the 32-bit destination word + fn right_align_sign_extend(mut self) -> Self { + self.transform.padding_alignment = PaddingAlignmentMode::SignExtended; + self + } + + /// Pack subsequent 8-bit words into the 32-bit destination words + /// ie: B3,B2,B1,B0 -> B3B2B1B0 (see RM0492, Table 92) + fn pack(mut self) -> Self { + self.transform.padding_alignment = PaddingAlignmentMode::Packed; + self + } +} + +impl PaddingAlignment for DataTransformBuilder { + /// Pad out the upper 8 bits of the 16-bit destination word with zeroes (default) + fn right_align_zero_pad(mut self) -> Self { + self.transform.padding_alignment = PaddingAlignmentMode::ZeroPadded; + self + } + + /// Sign extend the upper 8 bits of the 32-bit destination word + fn right_align_sign_extend(mut self) -> Self { + self.transform.padding_alignment = PaddingAlignmentMode::SignExtended; + self + } + + /// Pack subsequent 8-bit words into the 16-bit destination words + /// ie: B1,B0 -> B1B0 (see RM0492, Table 92) + fn pack(mut self) -> Self { + self.transform.padding_alignment = PaddingAlignmentMode::Packed; + self + } +} + +impl TruncationAlignment for DataTransformBuilder { + /// Keep the least significant 16-bits and truncate the rest (default) + /// + /// ie: B7B6B5B4,B3B2B1B0 -> B5B4,B1B0 (see RM0492, Table 92) + fn right_align_left_truncate(mut self) -> Self { + self.transform.padding_alignment = + PaddingAlignmentMode::RightAlignedLeftTruncated; + self + } + + /// Keep the most significant 16-bits and truncate the rest + /// + /// ie: B7B6B5B4,B3B2B1B0 -> B7B6,B3B2 (see RM0492, Table 92) + fn left_align_right_truncate(mut self) -> Self { + self.transform.padding_alignment = + PaddingAlignmentMode::LeftAlignedRightTruncated; + self + } + + /// Unpack each 32-bit word into separate 16-bit half-words. + /// Note that the destination buffer must have sufficient room for n*2 16-bit values where n is + /// the number of 32-bit words in the source buffer. + /// + /// ie: B7B6B5B4,B3B2B1B0 -> B7B6,B5B4,B3B2,B1B0 (see RM0492, Table 92) + fn unpack(mut self) -> Self { + self.transform.padding_alignment = PaddingAlignmentMode::Unpacked; + self + } +} + +impl TruncationAlignment for DataTransformBuilder { + /// Keep the least significant 8-bits and truncate the rest (default) + /// + /// ie: B7B6B5B4,B3B2B1B0 -> B4,B0 + fn right_align_left_truncate(mut self) -> DataTransformBuilder { + self.transform.padding_alignment = + PaddingAlignmentMode::RightAlignedLeftTruncated; + self + } + + /// Keep the most significant 8-bits and truncate the rest + /// + /// i.e: B7B6B5B4,B3B2B1B0 -> B7,B3 + fn left_align_right_truncate(mut self) -> DataTransformBuilder { + self.transform.padding_alignment = + PaddingAlignmentMode::LeftAlignedRightTruncated; + self + } + + /// Unpack each word or half-word into separate 8-bit bytes. + /// Note that the destination buffer must have sufficient room for n*2 8-bit values where n is + /// the number of word or half-words in the source buffer. + /// + /// ie: B7B6B5B4,B3B2B1B0 -> B7,B6,B5,B4,B3,B2,B1,B0 + fn unpack(mut self) -> DataTransformBuilder { + self.transform.padding_alignment = PaddingAlignmentMode::Unpacked; + self + } +} + +impl TruncationAlignment for DataTransformBuilder { + /// Keep the least significant 8 bits and truncate the rest (default) + /// + /// ie: B3B2,B1B0 -> B2,B0 (see RM0492, Table 92) + fn right_align_left_truncate(mut self) -> Self { + self.transform.padding_alignment = + PaddingAlignmentMode::RightAlignedLeftTruncated; + self + } + + /// Keep the most significant 8 bits and truncate the rest + /// + /// ie: B3B2,B1B0 -> B3,B1 (see RM0492, Table 92) + fn left_align_right_truncate(mut self) -> Self { + self.transform.padding_alignment = + PaddingAlignmentMode::LeftAlignedRightTruncated; + self + } + + /// Unpack each 16-bit word into separate 8-bit half-words. + /// Note that the destination buffer must have sufficient room for n*2 16-bit values where n is + /// the number of 32-bit words in the source buffer. + /// + /// ie: B3B2,B1B0 -> B3,B2,B1,B0 (see RM0492, Table 92) + fn unpack(mut self) -> Self { + self.transform.padding_alignment = PaddingAlignmentMode::Unpacked; + self + } +} + +impl DestinationHalfWordExchange for DataTransformBuilder { + /// Swap the order of the 16-bit half-words in the 32-bit destination word + fn swap_destination_half_words(mut self) -> Self { + self.transform.dest_half_word_exchange = true; + self + } +} + +impl DestinationByteExchange for DataTransformBuilder { + /// Swap the order of bytes in each 16-bit destination word + fn swap_destination_half_word_byte_order(mut self) -> Self { + self.transform.dest_byte_exchange = true; + self + } +} + +impl DestinationByteExchange for DataTransformBuilder { + /// Swap the order of bytes in each 16-bit destination half-word + fn swap_destination_half_word_byte_order(mut self) -> Self { + self.transform.dest_byte_exchange = true; + self + } +} + +/// DataTransform represents the configuration of the data transformation pipeline as produced +/// by the above builder structs. +#[derive(Clone, Copy, Debug, Default, PartialEq)] +pub struct DataTransform { + pub(crate) source_byte_exchange: bool, + pub(crate) padding_alignment: PaddingAlignmentMode, + pub(crate) dest_half_word_exchange: bool, + pub(crate) dest_byte_exchange: bool, +} + +impl DataTransform { + pub fn builder() -> DataTransformBuilder { + DataTransformBuilder { + _source_type: PhantomData, + _destination_type: PhantomData, + transform: DataTransform::default(), + } + } +} diff --git a/src/gpdma/periph.rs b/src/gpdma/periph.rs new file mode 100644 index 0000000..7d87b4a --- /dev/null +++ b/src/gpdma/periph.rs @@ -0,0 +1,246 @@ +//! This module provides traits and structs for managing DMA transactions for peripherals. +//! - By implementing the [`TxAddr`] and [`RxAddr`] traits, peripherals can be used with DMA for +//! memory-to-peripheral and peripheral-to-memory transfers, respectively. +//! - The [`Tx`] and [`Rx`] traits provide a define the interface for initiating DMA transfers for +//! TX and RX operations, respectively. +//! - The [`DmaTx`], [`DmaRx`] structs implement the [`Tx`] and [`Rx`] traits, respectively, and +//! encapsulate the logic for initializing these transfers. +//! - The [`DmaDuplex`] struct combines both TX and RX capabilities, allowing for full-duplex +//! operations. +use core::marker::PhantomData; + +use crate::Sealed; + +use super::{ + DmaChannel, DmaConfig, DmaTransfer, MemoryToPeripheral, PeripheralToMemory, + ReadBuffer, Word, WriteBuffer, +}; + +/// `TxAddr` is a trait that provides a method to obtain the address of the transmit data register +/// of a peripheral. This is used to facilitate memory-to-peripheral DMA transactions. The +/// peripheral must implement this trait. +pub trait TxAddr { + /// Returns a pointer to the peripheral's transmit data register. + /// + /// # Safety + /// + /// The caller must ensure that the returned pointer is only used when it is valid to access + /// the peripheral's transmit data register, and that no data races or invalid memory accesses + /// occur. + unsafe fn tx_addr() -> *mut W; +} + +/// `RxAddr` is a trait that provides a method to obtain the address of the receive data register +/// of a peripheral. This is used to facilitate peripheral-to-memory DMA transactions. The +/// peripheral must implement this trait. +pub trait RxAddr { + /// Returns a pointer to the peripheral's receive data register. + /// + /// # Safety + /// + /// The caller must ensure that the returned pointer is only used when it is valid to access + /// the peripheral's receive data register, and that no data races or invalid memory accesses + /// occur. + unsafe fn rx_addr() -> *const W; +} + +/// The `Tx` trait to defines the method needed for a peripheral DMA struct (ie. [`DmaTx`] or +/// [`DmaDuplex`]) that is used to initiate a memory-to-peripheral DMA transaction. It also +/// functions as a marker trait to indicate that the peripheral DMA struct can be used for +/// initiating transmissions. +pub trait Tx: Sealed { + type CH: DmaChannel; + fn init_tx_transfer<'a>( + &'a self, + config: DmaConfig, + words: &'a [W], + ) -> DmaTransfer<'a, Self::CH>; +} + +/// The `Rx` trait to defines the method needed for a peripheral DMA struct (ie. [`DmaRx`] or +/// [`DmaDuplex`]) that is used to initiate a peripheral-to-memory DMA transaction. It also +/// functions as a marker trait to indicate that the peripheral DMA struct can be used for +/// initiating receiving transfers. +pub trait Rx: Sealed { + type CH: DmaChannel; + fn init_rx_transfer<'a>( + &'a self, + config: DmaConfig, + words: &'a mut [W], + ) -> DmaTransfer<'a, Self::CH>; +} + +/// `DmaRx` encapsulates the initialization of a peripheral-to-memory DMA transaction for receiving +/// data. Used by peripheral DMA implementations. +pub struct DmaRx { + _periph: PhantomData, + _word: PhantomData, + channel: CH, +} + +impl DmaRx { + fn new(channel: CH) -> Self { + Self { + _periph: PhantomData, + _word: PhantomData, + channel, + } + } + + pub fn free(self) -> CH { + self.channel + } +} + +impl From for DmaRx { + fn from(channel: CH) -> Self { + Self::new(channel) + } +} + +unsafe impl, W: Word, CH> ReadBuffer + for &DmaRx +{ + type Word = W; + + unsafe fn read_buffer(&self) -> (*const Self::Word, usize) { + (PERIPH::rx_addr(), 1) + } +} + +impl Sealed for DmaRx {} + +impl Rx for DmaRx +where + PERIPH: RxAddr, + CH: DmaChannel, + W: Word, +{ + type CH = CH; + fn init_rx_transfer<'a>( + &'a self, + config: DmaConfig, + words: &'a mut [W], + ) -> DmaTransfer<'a, CH> { + DmaTransfer::peripheral_to_memory(config, &self.channel, self, words) + } +} + +/// `DmaTx` encapsulates the initialization of a memory-to-peripheral DMA transaction for +/// transmitting data. Used by peripheral DMA implementations. +pub struct DmaTx { + _periph: PhantomData, + _word: PhantomData, + channel: CH, +} + +impl DmaTx { + fn new(channel: CH) -> Self { + Self { + _periph: PhantomData, + _word: PhantomData, + channel, + } + } + + pub fn free(self) -> CH { + self.channel + } +} + +impl From for DmaTx { + fn from(channel: CH) -> Self { + Self::new(channel) + } +} + +unsafe impl, W: Word, CH> WriteBuffer + for &DmaTx +{ + type Word = W; + + unsafe fn write_buffer(&mut self) -> (*mut Self::Word, usize) { + (PERIPH::tx_addr(), 1) + } +} + +impl Sealed for DmaTx {} + +impl Tx for DmaTx +where + PERIPH: TxAddr, + CH: DmaChannel, + W: Word, +{ + type CH = CH; + fn init_tx_transfer<'a>( + &'a self, + config: DmaConfig, + words: &'a [W], + ) -> DmaTransfer<'a, CH> { + DmaTransfer::memory_to_peripheral(config, &self.channel, words, self) + } +} + +/// `DmaDuplex` encapsulates the initialization of both memory-to-peripheral and +/// peripheral-to-memory DMA transaction for to enable setting up of full-duplex transmission and +/// reception of data. Used by peripheral DMA implementations. +pub struct DmaDuplex { + tx: DmaTx, + rx: DmaRx, +} + +impl DmaDuplex +where + PERIPH: TxAddr + RxAddr, + W: Word, + TX: DmaChannel, + RX: DmaChannel, +{ + pub fn new(tx: TX, rx: RX) -> Self { + Self { + tx: DmaTx::from(tx), + rx: DmaRx::from(rx), + } + } + + pub fn free(self) -> (TX, RX) { + (self.tx.free(), self.rx.free()) + } +} + +impl Sealed for DmaDuplex {} + +impl Tx for DmaDuplex +where + PERIPH: TxAddr + RxAddr, + W: Word, + TX: DmaChannel, + RX: DmaChannel, +{ + type CH = TX; + fn init_tx_transfer<'a>( + &'a self, + config: DmaConfig, + words: &'a [W], + ) -> DmaTransfer<'a, TX> { + self.tx.init_tx_transfer(config, words) + } +} + +impl Rx for DmaDuplex +where + PERIPH: TxAddr + RxAddr, + W: Word + Word, + TX: DmaChannel, + RX: DmaChannel, +{ + type CH = RX; + fn init_rx_transfer<'a>( + &'a self, + config: DmaConfig, + words: &'a mut [W], + ) -> DmaTransfer<'a, RX> { + self.rx.init_rx_transfer(config, words) + } +} diff --git a/src/lib.rs b/src/lib.rs index 3c27058..cc3c317 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -82,6 +82,9 @@ pub mod dwt; #[cfg(feature = "device-selected")] pub mod usb; +#[cfg(feature = "device-selected")] +pub mod gpdma; + #[cfg(feature = "device-selected")] mod sealed { pub trait Sealed {} diff --git a/src/prelude.rs b/src/prelude.rs index becb02a..a73e785 100644 --- a/src/prelude.rs +++ b/src/prelude.rs @@ -2,6 +2,7 @@ pub use crate::delay::DelayExt as _stm32h5xx_hal_delay_DelayExt; pub use crate::dwt::DwtExt as _stm32h5xx_hal_delay_DwtExt; +pub use crate::gpdma::GpdmaExt as _stm32h5xx_hal_gpdma_GpdmaExt; pub use crate::gpio::GpioExt as _stm32h5xx_hal_gpio_GpioExt; pub use crate::i2c::I2cExt as _stm32h5xx_hal_i2c_I2cExt; pub use crate::icache::ICacheExt as _stm32h5xx_hal_icache_ICacheExt; From c65d4575a77e867d71b3fe91935e591c1dcd003b Mon Sep 17 00:00:00 2001 From: astapleton Date: Thu, 31 Jul 2025 13:48:17 -0700 Subject: [PATCH 02/13] Fixes, simpler dma example - Use stack allocated buffers for dma example, - fix wait_for_transfer_complete/wait_for_half_transfer_complete - remove clock sync delays --- examples/dma.rs | 181 ++++++++++++------------------------------------ src/gpdma/ch.rs | 12 ++-- 2 files changed, 53 insertions(+), 140 deletions(-) diff --git a/examples/dma.rs b/examples/dma.rs index 7ad2bcd..373148b 100644 --- a/examples/dma.rs +++ b/examples/dma.rs @@ -4,8 +4,6 @@ mod utilities; -use core::mem::MaybeUninit; - use cortex_m_rt::entry; use cortex_m_semihosting::debug; use stm32h5xx_hal::{ @@ -14,114 +12,6 @@ use stm32h5xx_hal::{ prelude::*, }; -static mut SOURCE_BYTES: MaybeUninit<[u8; 40]> = MaybeUninit::uninit(); -static mut DEST_BYTES: MaybeUninit<[u8; 40]> = MaybeUninit::zeroed(); -static mut DEST_HALF_WORDS: MaybeUninit<[u16; 20]> = MaybeUninit::uninit(); -static mut SOURCE_WORDS: MaybeUninit<[u32; 10]> = MaybeUninit::uninit(); -static mut DEST_WORDS: MaybeUninit<[u32; 10]> = MaybeUninit::uninit(); - -fn u8_to_u8_sequential() -> (&'static [u8; 40], &'static mut [u8; 40]) { - let buf: &mut [MaybeUninit; 40] = unsafe { - &mut *(core::ptr::addr_of_mut!(SOURCE_BYTES) - as *mut [MaybeUninit; 40]) - }; - - for (i, value) in buf.iter_mut().enumerate() { - unsafe { - value.as_mut_ptr().write(i as u8); - } - } - #[allow(static_mut_refs)] // TODO: Fix this - let src = unsafe { SOURCE_BYTES.assume_init_ref() }; - - let dest = - unsafe { (*core::ptr::addr_of_mut!(DEST_BYTES)).assume_init_mut() }; - - dest.fill(0); - - (src, dest) -} - -fn u32_to_u32_transform() -> (&'static [u32; 10], &'static mut [u32; 10]) { - let buf: &mut [MaybeUninit; 10] = unsafe { - &mut *(core::ptr::addr_of_mut!(SOURCE_WORDS) - as *mut [MaybeUninit; 10]) - }; - - buf.fill(MaybeUninit::new(0x12345678)); - - #[allow(static_mut_refs)] // TODO: Fix this - let src = unsafe { SOURCE_WORDS.assume_init_ref() }; - - let dest = - unsafe { (*core::ptr::addr_of_mut!(DEST_WORDS)).assume_init_mut() }; - - dest.fill(0); - (src, dest) -} - -fn u32_to_u16_truncate() -> (&'static [u32; 10], &'static mut [u16; 20]) { - let buf: &mut [MaybeUninit; 10] = unsafe { - &mut *(core::ptr::addr_of_mut!(SOURCE_WORDS) - as *mut [MaybeUninit; 10]) - }; - - buf.fill(MaybeUninit::new(0x12345678)); - - #[allow(static_mut_refs)] // TODO: Fix this - let src = unsafe { SOURCE_WORDS.assume_init_ref() }; - - let dest = unsafe { - (*core::ptr::addr_of_mut!(DEST_HALF_WORDS)).assume_init_mut() - }; - - dest.fill(0); - (src, dest) -} - -fn u32_to_u8_unpack() -> (&'static [u32; 10], &'static mut [u8; 40]) { - let buf: &mut [MaybeUninit; 10] = unsafe { - &mut *(core::ptr::addr_of_mut!(SOURCE_WORDS) - as *mut [MaybeUninit; 10]) - }; - - buf.fill(MaybeUninit::new(0x12345678)); - - #[allow(static_mut_refs)] // TODO: Fix this - let src = unsafe { SOURCE_WORDS.assume_init_ref() }; - - let dest = - unsafe { (*core::ptr::addr_of_mut!(DEST_BYTES)).assume_init_mut() }; - - dest.fill(0); - (src, dest) -} - -fn u8_to_u32_pack() -> (&'static [u8; 40], &'static mut [u32; 10]) { - let buf: &mut [MaybeUninit; 40] = unsafe { - &mut *(core::ptr::addr_of_mut!(SOURCE_BYTES) - as *mut [MaybeUninit; 40]) - }; - - for chunk in buf.chunks_mut(4) { - unsafe { - chunk[0].as_mut_ptr().write(0x78); - chunk[1].as_mut_ptr().write(0x56); - chunk[2].as_mut_ptr().write(0x34); - chunk[3].as_mut_ptr().write(0x12); - } - } - - #[allow(static_mut_refs)] // TODO: Fix this - let src = unsafe { SOURCE_BYTES.assume_init_ref() }; - - let dest = - unsafe { (*core::ptr::addr_of_mut!(DEST_WORDS)).assume_init_mut() }; - - dest.fill(0); - (src, dest) -} - #[entry] fn main() -> ! { utilities::logger::init(); @@ -137,66 +27,85 @@ fn main() -> ! { let channels = dp.GPDMA1.channels(ccdr.peripheral.GPDMA1); - let (source_buf, dest_buf) = u8_to_u8_sequential(); + // u8 to u8 + log::info!("u8 to u8 transfer"); + let src: [u8; 40] = core::array::from_fn(|i| i as u8); + let dest = &mut [0u8; 40]; let channel = channels.0; let config = DmaConfig::new(); - let transfer = - DmaTransfer::memory_to_memory(config, &channel, source_buf, dest_buf); + let transfer = DmaTransfer::memory_to_memory(config, &channel, &src, dest); transfer.start().unwrap(); transfer.wait_for_transfer_complete().unwrap(); - assert_eq!(source_buf, dest_buf); + assert_eq!(src, *dest, "u8 to u8 transfer failed"); - let (source_buf, dest_buf) = u32_to_u32_transform(); + // u32 to u32 with data transform + log::info!("u32 to u32 with data transform"); + let src = [0x12345678u32; 10]; + let dest = &mut [0u32; 10]; let config = DmaConfig::new().with_data_transform( DataTransform::builder() .swap_destination_half_words() .swap_destination_half_word_byte_order(), ); - let transfer = - DmaTransfer::memory_to_memory(config, &channel, source_buf, dest_buf); + let transfer = DmaTransfer::memory_to_memory(config, &channel, &src, dest); transfer.start().unwrap(); transfer.wait_for_transfer_complete().unwrap(); - let expected = [0x78563412; 10]; - assert_eq!(expected, *dest_buf); + assert_eq!( + [0x78563412; 10], *dest, + "u32 to u32 with data transform failed" + ); - let (source_buf, dest_buf) = u32_to_u16_truncate(); + // u32 to u16 with truncate + log::info!("u32 to u16 with truncate"); + let src = [0x12345678u32; 10]; + let dest = &mut [0u16; 20]; let config = DmaConfig::new().with_data_transform( DataTransform::builder().left_align_right_truncate(), ); - let transfer = - DmaTransfer::memory_to_memory(config, &channel, source_buf, dest_buf); + let transfer = DmaTransfer::memory_to_memory(config, &channel, &src, dest); transfer.start().unwrap(); transfer.wait_for_transfer_complete().unwrap(); - let expected = [0x1234; 10]; - assert_eq!(expected, (*dest_buf)[0..10]); + assert_eq!( + [0x1234; 10], + (*dest)[0..10], + "u32 to u16 with truncate failed" + ); + assert_eq!([0; 10], (*dest)[10..20], "u32 to u16 with truncate failed"); - let (source_buf, dest_buf) = u32_to_u8_unpack(); + // u32 to u8 with unpack + log::info!("u32 to u8 with unpack"); + let src = [0x12345678u32; 10]; + let dest = &mut [0u8; 40]; let config = DmaConfig::new().with_data_transform(DataTransform::builder().unpack()); - let transfer = - DmaTransfer::memory_to_memory(config, &channel, source_buf, dest_buf); + let transfer = DmaTransfer::memory_to_memory(config, &channel, &src, dest); transfer.start().unwrap(); transfer.wait_for_transfer_complete().unwrap(); let expected = [0x78, 0x56, 0x34, 0x12]; - assert_eq!(expected, (*dest_buf)[0..4]); - assert_eq!(expected, (*dest_buf)[36..40]); - - let (source_buf, dest_buf) = u8_to_u32_pack(); + assert_eq!(expected, (*dest)[0..4], "u32 to u8 unpack failed"); + assert_eq!(expected, (*dest)[36..40], "u32 to u8 unpack failed"); + + // u8 to u32 with pack + log::info!("u8 to u32 with pack"); + let mut src = [0u8; 40]; + let dest = &mut [0u32; 10]; + for chunk in src.chunks_mut(4) { + chunk.copy_from_slice(&[0x78, 0x56, 0x34, 0x12]); + } let config = DmaConfig::new().with_data_transform(DataTransform::builder().pack()); - let transfer = - DmaTransfer::memory_to_memory(config, &channel, source_buf, dest_buf); + let transfer = DmaTransfer::memory_to_memory(config, &channel, &src, dest); transfer.start().unwrap(); transfer.wait_for_transfer_complete().unwrap(); - let expected = [0x12345678; 10]; - assert_eq!(expected, (*dest_buf)); - assert_eq!(expected, (*dest_buf)); + assert_eq!([0x12345678; 10], (*dest), "u8 to u32 with pack failed"); + + log::info!("All tests passed!"); loop { debug::exit(debug::EXIT_SUCCESS) diff --git a/src/gpdma/ch.rs b/src/gpdma/ch.rs index efbfe5b..a6827cd 100644 --- a/src/gpdma/ch.rs +++ b/src/gpdma/ch.rs @@ -213,8 +213,6 @@ where .tof() .clear() }); - - interrupt_clear_clock_sync_delay!(self.sr()); } #[inline(always)] @@ -235,8 +233,6 @@ where TransferEvent::TransferComplete => w.tcf().clear(), TransferEvent::HalfTransferComplete => w.htf().clear(), }); - - interrupt_clear_clock_sync_delay!(self.sr()); } Ok(triggered) } @@ -633,6 +629,10 @@ where /// Blocks waiting for a transfer to complete. Reports any errors that occur during a transfer. fn wait_for_transfer_complete(&self) -> Result<(), Error> { + if !self.is_running() { + return Ok(()); + } + while !self.check_transfer_complete()? {} Ok(()) } @@ -640,6 +640,10 @@ where /// Blocks waiting for a half transfer event to trigger. Reports any errors that occur during a /// transfer. fn wait_for_half_transfer_complete(&self) -> Result<(), Error> { + if !self.is_running() { + return Ok(()); + } + while !self.check_half_transfer_complete()? {} Ok(()) } From 69eab2a910424fcf2f126e8058ed993613e4e943 Mon Sep 17 00:00:00 2001 From: astapleton Date: Thu, 31 Jul 2025 14:28:34 -0700 Subject: [PATCH 03/13] more fixes - Add defmt derives - improve consistency in return values for DataTransformBuilder - Removed side effects from check_transfer_event and added a separate function to clear events - Add drop implementation to abort a transfer - Modified wait_for_transfer_complete and wait_for_half_transform_complete to: - explicitly clear event flags - consume, but forget self, so the drop implementation is not called. - misc documentation improvements --- src/gpdma.rs | 30 ++++++++-- src/gpdma/ch.rs | 108 ++++++++++++++++------------------ src/gpdma/config.rs | 12 ++++ src/gpdma/config/transform.rs | 6 +- 4 files changed, 91 insertions(+), 65 deletions(-) diff --git a/src/gpdma.rs b/src/gpdma.rs index 9117f86..6ad2fbf 100644 --- a/src/gpdma.rs +++ b/src/gpdma.rs @@ -493,37 +493,57 @@ impl<'a, CH: DmaChannel> DmaTransfer<'a, CH> { /// Blocks waiting for a transfer to complete. Returns an error if one occurred during the /// transfer. - pub fn wait_for_transfer_complete(&self) -> Result<(), Error> { + pub fn wait_for_transfer_complete(self) -> Result<(), Error> { let result = self.channel.wait_for_transfer_complete(); // Preserve the instruction and bus sequence of the preceding operation and // the subsequent buffer access. fence(Ordering::SeqCst); + + core::mem::forget(self); // Prevents self from being dropped and attempting to abort result } /// Blocks waiting for the half transfer complete event. Returns an error if one occurred during /// the transfer. - pub fn wait_for_half_transfer_complete(&self) -> Result<(), Error> { + pub fn wait_for_half_transfer_complete(self) -> Result<(), Error> { let result = self.channel.wait_for_half_transfer_complete(); // Preserve the instruction and bus sequence of the preceding operation and // the subsequent buffer access. fence(Ordering::SeqCst); + + core::mem::forget(self); // Prevents self from being dropped and attempting to abort result } + /// Enable interrupts for this transfer. This will enable the transfer complete and half + /// transfer complete interrupts, as well as error interrupts. pub fn enable_interrupts(&self) { self.channel.enable_transfer_interrupts(); } + /// Disable interrupts for this transfer. pub fn disable_interrupts(&self) { self.channel.disable_transfer_interrupts(); } /// Abort a transaction and wait for it to suspend the transfer before resetting the channel - pub fn abort(&mut self) { - self.channel.abort(); + pub fn abort(self) { + // Allow Drop implementation to handle transfer abortion + } +} - // Preserve the instruction and bus sequence of the preceding disable and +impl<'a, CH> Drop for DmaTransfer<'a, CH> +where + CH: DmaChannel, +{ + fn drop(&mut self) { + if self.is_running() { + self.channel.abort(); + } + + self.disable_interrupts(); + + // Preserve the instruction and bus sequence of the preceding operation and // the subsequent buffer access. fence(Ordering::SeqCst); } diff --git a/src/gpdma/ch.rs b/src/gpdma/ch.rs index a6827cd..b0b41c5 100644 --- a/src/gpdma/ch.rs +++ b/src/gpdma/ch.rs @@ -196,6 +196,7 @@ where self.cr().modify(|_, w| w.susp().not_suspended()); } + /// Clear all event flags in the FCR register. fn clear_all_event_flags(&self) { self.fcr().write(|w| { w.tcf() @@ -216,27 +217,30 @@ where } #[inline(always)] + /// Checks if the specified transfer event has triggered or if an error has occurred. If an + /// error has occurred, it is returned. If the event has triggered, `Ok(true)` is returned. + /// Otherwise, if the event has not triggered, `Ok(false)` is returned. fn check_transfer_event( &self, event: TransferEvent, ) -> Result { let sr = self.sr().read(); - check_error!(sr).inspect_err(|_| self.clear_all_event_flags())?; + check_error!(sr)?; let triggered = match event { TransferEvent::TransferComplete => sr.tcf().is_trigger(), TransferEvent::HalfTransferComplete => sr.htf().is_trigger(), }; - if triggered { - // Clear the event flag if it has been triggered - self.fcr().write(|w| match event { - TransferEvent::TransferComplete => w.tcf().clear(), - TransferEvent::HalfTransferComplete => w.htf().clear(), - }); - } Ok(triggered) } + fn clear_transfer_event_flag(&self, event: TransferEvent) { + self.fcr().write(|w| match event { + TransferEvent::TransferComplete => w.tcf().clear(), + TransferEvent::HalfTransferComplete => w.htf().clear(), + }); + } + // TODO: Remove clippy allow when FIFO use is implemented #[allow(unused)] #[inline(always)] @@ -482,9 +486,11 @@ pub(super) trait Channel { /// Blocks waiting for a transfer to complete. Reports any errors that occur during a transfer. fn wait_for_transfer_complete(&self) -> Result<(), Error>; + /// Blocks waiting for a half transfer event to trigger. Reports any errors that occur during a /// transfer. fn wait_for_half_transfer_complete(&self) -> Result<(), Error>; + /// Apply a transfer configuration to the channel fn apply_config( &self, @@ -522,8 +528,13 @@ pub(super) trait Channel { /// must be aligned with the source data width. fn set_transfer_size_bytes(&self, size: usize); + /// Enable transfer interrupts for the channel. This enables the transfer complete, + /// half-transfer complete, data transfer error and user setting error interrupts. This is + /// useful for starting a transfer that will be monitored by an interrupt handler. fn enable_transfer_interrupts(&self); + /// Disable transfer interrupts for the channel. It is expected that this will be called from + /// an interrupt handler after a transfer is completed. fn disable_transfer_interrupts(&self); } @@ -533,19 +544,16 @@ where CH: ChannelRegs, Self: Deref, { - /// Enable a transfer on the channel #[inline(always)] fn enable(&self) { self.cr().modify(|_, w| w.en().enabled()); } - /// Checks whether the channel is suspended #[inline(always)] fn is_suspended(&self) -> bool { self.sr().read().suspf().bit_is_set() } - /// Initiates the suspension of a transfer fn initiate_suspend(&self) { if self.is_suspended() { return; @@ -553,26 +561,19 @@ where self.suspend(); } - /// Resume transfer #[inline(always)] fn initiate_resume(&self) { self.resume(); } - /// Checks whether the channel transfer is complete. If the channel indicates an error occurred, - /// during the transaction an `Error`` is returned. fn check_transfer_complete(&self) -> Result { self.check_transfer_event(TransferEvent::TransferComplete) } - /// Checks whether the channel half transfer complete event has triggered. If the channel - /// indicates an error occurred, during the transaction an `Error`` is returned. fn check_half_transfer_complete(&self) -> Result { self.check_transfer_event(TransferEvent::HalfTransferComplete) } - /// Checks whether the channel transfer has started (has transitioned out of the idle state, or - /// the transfer complete event has already triggered if it is idle) fn check_transfer_started(&self) -> Result { // TODO: Resolve multiple status register reads match self.check_idle() { @@ -584,34 +585,27 @@ where } } - /// Return whether or not a transfer is in progress on the channel. #[inline(always)] fn is_running(&self) -> bool { !self.is_idle() } - /// Reset the channel registers and clear status flags so the channel can be reused. fn reset_channel(&self) { self.reset(); self.clear_all_event_flags(); } - /// Suspend the transfer and blocks until it has been suspended. Reports any that occur while - /// waiting for the transfer to suspend. fn suspend_transfer(&self) { self.initiate_suspend(); while !self.is_suspended() {} } - /// Resumes a suspended transfer and blocks until the channel transitions out of the idle state - /// Reports any errors that occur resuming the transfer. fn resume_transfer(&self) -> Result<(), Error> { self.initiate_resume(); while !self.check_transfer_started()? {} Ok(()) } - /// Aborts an operation by suspending the transfer and resetting the channel. fn abort(&self) { if !self.is_idle() { self.suspend_transfer(); @@ -620,35 +614,49 @@ where self.reset_channel(); } - /// Blocks waiting for a transfer to be started (or for it to be idle and complete). Reports any - /// errors that occur while waiting for the transfer to start. fn wait_for_transfer_started(&self) -> Result<(), Error> { - while !self.check_transfer_started()? {} + while !self.check_transfer_started().inspect_err(|_| { + self.clear_all_event_flags(); + })? {} Ok(()) } - /// Blocks waiting for a transfer to complete. Reports any errors that occur during a transfer. fn wait_for_transfer_complete(&self) -> Result<(), Error> { - if !self.is_running() { - return Ok(()); + loop { + match self.check_transfer_complete() { + Ok(true) => { + self.clear_transfer_event_flag( + TransferEvent::TransferComplete, + ); + return Ok(()); + } + Ok(false) => continue, + Err(error) => { + self.clear_all_event_flags(); + return Err(error); + } + } } - - while !self.check_transfer_complete()? {} - Ok(()) } - /// Blocks waiting for a half transfer event to trigger. Reports any errors that occur during a - /// transfer. fn wait_for_half_transfer_complete(&self) -> Result<(), Error> { - if !self.is_running() { - return Ok(()); + loop { + match self.check_half_transfer_complete() { + Ok(true) => { + self.clear_transfer_event_flag( + TransferEvent::HalfTransferComplete, + ); + return Ok(()); + } + Ok(false) => continue, + Err(error) => { + self.clear_all_event_flags(); + return Err(error); + } + } } - - while !self.check_half_transfer_complete()? {} - Ok(()) } - /// Apply a transfer configuration to the channel fn apply_config( &self, config: DmaConfig, @@ -674,8 +682,6 @@ where } } - /// Apply hardware request configuration to the channel. Not relevant to memory-to-memory - /// transfers. fn configure_hardware_request( &self, config: DmaConfig, @@ -684,8 +690,6 @@ where self.set_request_line(config.transfer_type.request()); } - /// Apply peripheral flow control configuration for transactions where a peripheral is the - /// source fn configure_peripheral_flow_control< T: PeripheralSource, S: Word, @@ -699,7 +703,6 @@ where ); } - /// Apply a data transform to the channel transfer fn apply_data_transform(&self, data_transform: DataTransform) { self.set_source_byte_exchange(data_transform.source_byte_exchange); self.set_padding_alignment_mode(data_transform.padding_alignment); @@ -709,36 +712,27 @@ where self.set_destination_byte_exchange(data_transform.dest_byte_exchange); } - /// Set the source address. This sets the source address and data width. fn set_source(&self, ptr: *const W) { self.set_source_address(ptr as u32); self.set_source_data_width(core::mem::size_of::()); } - /// Set the destination address. This sets the destination address and data width fn set_destination(&self, ptr: *mut W) { self.set_destination_address(ptr as u32); self.set_destination_data_width(core::mem::size_of::()); } - /// Set the transfer size in bytes (not words!). Size must be aligned with destination width if - /// source width is greater than destination width and packing mode is used. Otherwise the size - /// must be aligned with the source data width. fn set_transfer_size_bytes(&self, size: usize) { self.set_block_size(size as u16); } - /// Enable transfer interrupts for the channel. This enables the transfer complete, - /// half-transfer complete, data transfer error and user setting error interrupts. This is - /// useful for starting a transfer that will be monitored by an interrupt handler. #[inline(always)] fn enable_transfer_interrupts(&self) { self.cr().modify(|_, w| { w.tcie().enabled().dteie().enabled().useie().enabled() }); } - /// Disable transfer interrupts for the channel. It is expected that this will be called from - /// an interrupt handler after a transfer is completed. + #[inline(always)] fn disable_transfer_interrupts(&self) { self.cr().modify(|_, w| { diff --git a/src/gpdma/config.rs b/src/gpdma/config.rs index 89a6ac4..485ed00 100644 --- a/src/gpdma/config.rs +++ b/src/gpdma/config.rs @@ -8,6 +8,7 @@ use transform::*; /// PeripheralRequests is used for peripheral-to-peripheral transfers to indicate which side of the /// transfer is driving the request (ie. which has the hardware request assigned) #[derive(Clone, Copy, Debug, PartialEq)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] pub enum PeripheralRequest { SourceRequest, DestinationRequest, @@ -15,6 +16,7 @@ pub enum PeripheralRequest { /// The TransferDirection represents the available options for transfer types #[derive(Clone, Copy, Debug, PartialEq)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] pub enum TransferDirection { MemoryToMemory, MemoryToPeripheral, @@ -25,6 +27,7 @@ pub enum TransferDirection { /// Addressing mode represents whether the source or destination address is contiguously incremented /// or fixed during a transfer #[derive(Clone, Copy, Default, Debug, PartialEq)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] pub enum AddressingMode { #[default] ContiguouslyIncremented, @@ -62,6 +65,7 @@ pub trait PeripheralSource { /// Represents the options specifically available for peripheral-to-memory transfers #[derive(Clone, Copy, Debug, Default, PartialEq)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] pub struct PeripheralToMemory { request: u8, block_request: bool, @@ -108,6 +112,7 @@ impl PeripheralSource for PeripheralToMemory { /// Represents the options specifically available for memory-to-peripheral transfers #[derive(Clone, Copy, Debug, Default, PartialEq)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] pub struct MemoryToPeripheral { request: u8, block_request: bool, @@ -143,11 +148,13 @@ impl HardwareRequest for MemoryToPeripheral { /// Marker struct to indicate that the source peripheral drives the request via its request line. #[derive(Clone, Copy, Debug, Default, PartialEq)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] pub struct SourceRequest; /// Marker struct to indicate that the destination peripheral drives the request via its request /// line. #[derive(Clone, Copy, Debug, Default, PartialEq)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] pub struct DestinationRequest; /// Indicates which peripheral in a peripheral-to-peripheral transfer is driving the request line @@ -171,6 +178,7 @@ impl PeripheralToPeripheralDirection for DestinationRequest { /// Represents the options specifically available for peripheral-to-peripheral transfers #[derive(Clone, Copy, Debug, Default, PartialEq)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] pub struct PeripheralToPeripheral { _peripheral_request: PhantomData, request: u8, @@ -235,6 +243,7 @@ impl TransferType for MemoryToMemory { /// Priority of the transfer. Used by the GPDMA channel arbitration to determine which transfer /// to service. #[derive(Clone, Copy, Debug, Default, PartialEq)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] pub enum Priority { LowPriorityLowWeight = 0, #[default] @@ -244,6 +253,7 @@ pub enum Priority { } #[derive(Clone, Copy, Debug, Default, PartialEq)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] pub enum Continuation { #[default] Direct, @@ -251,6 +261,7 @@ pub enum Continuation { } #[derive(Clone, Copy, Debug, Default, PartialEq)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] pub enum AhbPort { #[default] Port0 = 0, @@ -261,6 +272,7 @@ const MAX_BURST_LEN: u8 = 64; /// Configuration options for a DMA transfer #[derive(Clone, Copy, Debug, Default, PartialEq)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] pub struct DmaConfig { _src_word: PhantomData, _dest_word: PhantomData, diff --git a/src/gpdma/config/transform.rs b/src/gpdma/config/transform.rs index 603072b..92cca44 100644 --- a/src/gpdma/config/transform.rs +++ b/src/gpdma/config/transform.rs @@ -218,7 +218,7 @@ impl TruncationAlignment for DataTransformBuilder { /// Keep the least significant 8-bits and truncate the rest (default) /// /// ie: B7B6B5B4,B3B2B1B0 -> B4,B0 - fn right_align_left_truncate(mut self) -> DataTransformBuilder { + fn right_align_left_truncate(mut self) -> Self { self.transform.padding_alignment = PaddingAlignmentMode::RightAlignedLeftTruncated; self @@ -227,7 +227,7 @@ impl TruncationAlignment for DataTransformBuilder { /// Keep the most significant 8-bits and truncate the rest /// /// i.e: B7B6B5B4,B3B2B1B0 -> B7,B3 - fn left_align_right_truncate(mut self) -> DataTransformBuilder { + fn left_align_right_truncate(mut self) -> Self { self.transform.padding_alignment = PaddingAlignmentMode::LeftAlignedRightTruncated; self @@ -238,7 +238,7 @@ impl TruncationAlignment for DataTransformBuilder { /// the number of word or half-words in the source buffer. /// /// ie: B7B6B5B4,B3B2B1B0 -> B7,B6,B5,B4,B3,B2,B1,B0 - fn unpack(mut self) -> DataTransformBuilder { + fn unpack(mut self) -> Self { self.transform.padding_alignment = PaddingAlignmentMode::Unpacked; self } From 5d9fc2200ca97d0475d37aef5b49135478974dd7 Mon Sep 17 00:00:00 2001 From: astapleton Date: Thu, 31 Jul 2025 14:33:58 -0700 Subject: [PATCH 04/13] missed some defmt derives --- src/gpdma/config.rs | 1 + src/gpdma/config/transform.rs | 3 +++ 2 files changed, 4 insertions(+) diff --git a/src/gpdma/config.rs b/src/gpdma/config.rs index 485ed00..0be6299 100644 --- a/src/gpdma/config.rs +++ b/src/gpdma/config.rs @@ -232,6 +232,7 @@ impl PeripheralSource for PeripheralToPeripheral { /// Marker struct for memory-to-memory transfers (no special options) #[derive(Clone, Copy, Debug, Default, PartialEq)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] pub struct MemoryToMemory; impl crate::Sealed for MemoryToMemory {} diff --git a/src/gpdma/config/transform.rs b/src/gpdma/config/transform.rs index 92cca44..aeade1f 100644 --- a/src/gpdma/config/transform.rs +++ b/src/gpdma/config/transform.rs @@ -18,6 +18,7 @@ use super::Word; /// Represents the options available for the padding and alignment step in the data transformation /// pipeline #[derive(Clone, Copy, Debug, Default, PartialEq)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] pub(crate) enum PaddingAlignmentMode { #[default] None, @@ -100,6 +101,7 @@ pub trait DestinationByteExchange { /// source and destination word sizes, so the builder can be created without specifying the types /// explicitly. #[derive(Clone, Copy, Debug, Default, PartialEq)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] pub struct DataTransformBuilder { _source_type: PhantomData, _destination_type: PhantomData, @@ -301,6 +303,7 @@ impl DestinationByteExchange for DataTransformBuilder { /// DataTransform represents the configuration of the data transformation pipeline as produced /// by the above builder structs. #[derive(Clone, Copy, Debug, Default, PartialEq)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] pub struct DataTransform { pub(crate) source_byte_exchange: bool, pub(crate) padding_alignment: PaddingAlignmentMode, From 6f3aa814425c0452dbf6e16baa591c843dddbe69 Mon Sep 17 00:00:00 2001 From: astapleton Date: Thu, 31 Jul 2025 19:52:31 -0700 Subject: [PATCH 05/13] use ReadBuffer, WriteBuffer to force immovable buffers; fix example --- examples/dma.rs | 187 +++++++++++++++++++++++++++++++++----------- src/gpdma.rs | 44 +++++------ src/gpdma/periph.rs | 65 +++++++++------ 3 files changed, 207 insertions(+), 89 deletions(-) diff --git a/examples/dma.rs b/examples/dma.rs index 373148b..3a00378 100644 --- a/examples/dma.rs +++ b/examples/dma.rs @@ -4,6 +4,8 @@ mod utilities; +use core::mem::MaybeUninit; + use cortex_m_rt::entry; use cortex_m_semihosting::debug; use stm32h5xx_hal::{ @@ -12,6 +14,114 @@ use stm32h5xx_hal::{ prelude::*, }; +static mut SOURCE_BYTES: MaybeUninit<[u8; 40]> = MaybeUninit::uninit(); +static mut DEST_BYTES: MaybeUninit<[u8; 40]> = MaybeUninit::zeroed(); +static mut DEST_HALF_WORDS: MaybeUninit<[u16; 20]> = MaybeUninit::uninit(); +static mut SOURCE_WORDS: MaybeUninit<[u32; 10]> = MaybeUninit::uninit(); +static mut DEST_WORDS: MaybeUninit<[u32; 10]> = MaybeUninit::uninit(); + +fn u8_to_u8_sequential() -> (&'static [u8; 40], &'static mut [u8; 40]) { + let buf: &mut [MaybeUninit; 40] = unsafe { + &mut *(core::ptr::addr_of_mut!(SOURCE_BYTES) + as *mut [MaybeUninit; 40]) + }; + + for (i, value) in buf.iter_mut().enumerate() { + unsafe { + value.as_mut_ptr().write(i as u8); + } + } + #[allow(static_mut_refs)] // TODO: Fix this + let src = unsafe { SOURCE_BYTES.assume_init_ref() }; + + let dest = + unsafe { (*core::ptr::addr_of_mut!(DEST_BYTES)).assume_init_mut() }; + + dest.fill(0); + + (src, dest) +} + +fn u32_to_u32_transform() -> (&'static [u32; 10], &'static mut [u32; 10]) { + let buf: &mut [MaybeUninit; 10] = unsafe { + &mut *(core::ptr::addr_of_mut!(SOURCE_WORDS) + as *mut [MaybeUninit; 10]) + }; + + buf.fill(MaybeUninit::new(0x12345678)); + + #[allow(static_mut_refs)] // TODO: Fix this + let src = unsafe { SOURCE_WORDS.assume_init_ref() }; + + let dest = + unsafe { (*core::ptr::addr_of_mut!(DEST_WORDS)).assume_init_mut() }; + + dest.fill(0); + (src, dest) +} + +fn u32_to_u16_truncate() -> (&'static [u32; 10], &'static mut [u16; 20]) { + let buf: &mut [MaybeUninit; 10] = unsafe { + &mut *(core::ptr::addr_of_mut!(SOURCE_WORDS) + as *mut [MaybeUninit; 10]) + }; + + buf.fill(MaybeUninit::new(0x12345678)); + + #[allow(static_mut_refs)] // TODO: Fix this + let src = unsafe { SOURCE_WORDS.assume_init_ref() }; + + let dest = unsafe { + (*core::ptr::addr_of_mut!(DEST_HALF_WORDS)).assume_init_mut() + }; + + dest.fill(0); + (src, dest) +} + +fn u32_to_u8_unpack() -> (&'static [u32; 10], &'static mut [u8; 40]) { + let buf: &mut [MaybeUninit; 10] = unsafe { + &mut *(core::ptr::addr_of_mut!(SOURCE_WORDS) + as *mut [MaybeUninit; 10]) + }; + + buf.fill(MaybeUninit::new(0x12345678)); + + #[allow(static_mut_refs)] // TODO: Fix this + let src = unsafe { SOURCE_WORDS.assume_init_ref() }; + + let dest = + unsafe { (*core::ptr::addr_of_mut!(DEST_BYTES)).assume_init_mut() }; + + dest.fill(0); + (src, dest) +} + +fn u8_to_u32_pack() -> (&'static [u8; 40], &'static mut [u32; 10]) { + let buf: &mut [MaybeUninit; 40] = unsafe { + &mut *(core::ptr::addr_of_mut!(SOURCE_BYTES) + as *mut [MaybeUninit; 40]) + }; + + for chunk in buf.chunks_mut(4) { + unsafe { + chunk[0].as_mut_ptr().write(0x78); + chunk[1].as_mut_ptr().write(0x56); + chunk[2].as_mut_ptr().write(0x34); + chunk[3].as_mut_ptr().write(0x12); + } + } + + #[allow(static_mut_refs)] // TODO: Fix this + let src = unsafe { SOURCE_BYTES.assume_init_ref() }; + + let dest = + unsafe { (*core::ptr::addr_of_mut!(DEST_WORDS)).assume_init_mut() }; + + dest.fill(0); + (src, dest) +} + #[entry] fn main() -> ! { utilities::logger::init(); @@ -27,85 +137,72 @@ fn main() -> ! { let channels = dp.GPDMA1.channels(ccdr.peripheral.GPDMA1); - // u8 to u8 - log::info!("u8 to u8 transfer"); - let src: [u8; 40] = core::array::from_fn(|i| i as u8); - let dest = &mut [0u8; 40]; + let (source_buf, dest_buf) = u8_to_u8_sequential(); + let source_copy = unsafe { &*(source_buf.as_ptr() as *const [u8; 40]) }; + let dest_copy = unsafe { &*(dest_buf.as_ptr() as *const [u8; 40]) }; let channel = channels.0; let config = DmaConfig::new(); - let transfer = DmaTransfer::memory_to_memory(config, &channel, &src, dest); + let transfer = + DmaTransfer::memory_to_memory(config, &channel, source_buf, dest_buf); transfer.start().unwrap(); transfer.wait_for_transfer_complete().unwrap(); - assert_eq!(src, *dest, "u8 to u8 transfer failed"); + assert_eq!(source_copy, dest_copy); - // u32 to u32 with data transform - log::info!("u32 to u32 with data transform"); - let src = [0x12345678u32; 10]; - let dest = &mut [0u32; 10]; + let (source_buf, dest_buf) = u32_to_u32_transform(); + let dest_copy = unsafe { &*(dest_buf.as_ptr() as *const [u32; 10]) }; let config = DmaConfig::new().with_data_transform( DataTransform::builder() .swap_destination_half_words() .swap_destination_half_word_byte_order(), ); - let transfer = DmaTransfer::memory_to_memory(config, &channel, &src, dest); + let transfer = + DmaTransfer::memory_to_memory(config, &channel, source_buf, dest_buf); transfer.start().unwrap(); transfer.wait_for_transfer_complete().unwrap(); - assert_eq!( - [0x78563412; 10], *dest, - "u32 to u32 with data transform failed" - ); + let expected = [0x78563412; 10]; + assert_eq!(expected, *dest_copy); - // u32 to u16 with truncate - log::info!("u32 to u16 with truncate"); - let src = [0x12345678u32; 10]; - let dest = &mut [0u16; 20]; + let (source_buf, dest_buf) = u32_to_u16_truncate(); + let dest_copy = unsafe { &*(dest_buf.as_ptr() as *const [u16; 20]) }; let config = DmaConfig::new().with_data_transform( DataTransform::builder().left_align_right_truncate(), ); - let transfer = DmaTransfer::memory_to_memory(config, &channel, &src, dest); + let transfer = + DmaTransfer::memory_to_memory(config, &channel, source_buf, dest_buf); transfer.start().unwrap(); transfer.wait_for_transfer_complete().unwrap(); - assert_eq!( - [0x1234; 10], - (*dest)[0..10], - "u32 to u16 with truncate failed" - ); - assert_eq!([0; 10], (*dest)[10..20], "u32 to u16 with truncate failed"); + let expected = [0x1234; 10]; + assert_eq!(expected, (*dest_copy)[0..10]); - // u32 to u8 with unpack - log::info!("u32 to u8 with unpack"); - let src = [0x12345678u32; 10]; - let dest = &mut [0u8; 40]; + let (source_buf, dest_buf) = u32_to_u8_unpack(); + let dest_copy = unsafe { &*(dest_buf.as_ptr() as *const [u8; 40]) }; let config = DmaConfig::new().with_data_transform(DataTransform::builder().unpack()); - let transfer = DmaTransfer::memory_to_memory(config, &channel, &src, dest); + let transfer = + DmaTransfer::memory_to_memory(config, &channel, source_buf, dest_buf); transfer.start().unwrap(); transfer.wait_for_transfer_complete().unwrap(); let expected = [0x78, 0x56, 0x34, 0x12]; - assert_eq!(expected, (*dest)[0..4], "u32 to u8 unpack failed"); - assert_eq!(expected, (*dest)[36..40], "u32 to u8 unpack failed"); - - // u8 to u32 with pack - log::info!("u8 to u32 with pack"); - let mut src = [0u8; 40]; - let dest = &mut [0u32; 10]; - for chunk in src.chunks_mut(4) { - chunk.copy_from_slice(&[0x78, 0x56, 0x34, 0x12]); - } + assert_eq!(expected, (*dest_copy)[0..4]); + assert_eq!(expected, (*dest_copy)[36..40]); + + let (source_buf, dest_buf) = u8_to_u32_pack(); + let dest_copy = unsafe { &*(dest_buf.as_ptr() as *const [u32; 10]) }; let config = DmaConfig::new().with_data_transform(DataTransform::builder().pack()); - let transfer = DmaTransfer::memory_to_memory(config, &channel, &src, dest); + let transfer = + DmaTransfer::memory_to_memory(config, &channel, source_buf, dest_buf); transfer.start().unwrap(); transfer.wait_for_transfer_complete().unwrap(); - assert_eq!([0x12345678; 10], (*dest), "u8 to u32 with pack failed"); - - log::info!("All tests passed!"); + let expected = [0x12345678; 10]; + assert_eq!(expected, (*dest_copy)); + assert_eq!(expected, (*dest_copy)); loop { debug::exit(debug::EXIT_SUCCESS) diff --git a/src/gpdma.rs b/src/gpdma.rs index 6ad2fbf..f86cc6f 100644 --- a/src/gpdma.rs +++ b/src/gpdma.rs @@ -278,22 +278,22 @@ impl<'a, CH: DmaChannel> DmaTransfer<'a, CH> { /// Create a new memory-to-memory transfer with the channel, source and destination buffers /// provided. pub fn memory_to_memory( - config: DmaConfig, + config: DmaConfig, channel: &'a CH, - source: &'a [S], - destination: &'a mut [D], + source: S, + mut destination: D, ) -> Self where - S: Word, - D: Word, + S: ReadBuffer, + D: WriteBuffer, { let src_width = core::mem::size_of::(); let dest_width = core::mem::size_of::(); - let src_ptr = source.as_ptr(); - let src_size = core::mem::size_of_val(source); - let dest_ptr = destination.as_mut_ptr(); - let dest_size = core::mem::size_of_val(destination); + let (src_ptr, src_words) = unsafe { source.read_buffer() }; + let src_size = core::mem::size_of::() * src_words; + let (dest_ptr, dest_words) = unsafe { destination.write_buffer() }; + let dest_size = core::mem::size_of::() * dest_words; // Size must be aligned with destination width if source width is greater than destination // width and packing mode is used, therefore the maximum size must be dictated by @@ -312,7 +312,7 @@ impl<'a, CH: DmaChannel> DmaTransfer<'a, CH> { // We also need to ensure that the destination - Self::new::( + Self::new::( channel, config, src_ptr, dest_ptr, size, ) } @@ -320,20 +320,20 @@ impl<'a, CH: DmaChannel> DmaTransfer<'a, CH> { /// Create a new memory-to-peripheral transfer with the channel, source buffer and destination /// peripheral provided. pub fn memory_to_peripheral( - config: DmaConfig, + config: DmaConfig, channel: &'a CH, - source: &'a [S], + source: S, mut destination: D, ) -> Self where - S: Word, + S: ReadBuffer, D: WriteBuffer, { - let src_ptr = source.as_ptr(); - let src_size = core::mem::size_of_val(source); + let (src_ptr, src_words) = unsafe { source.read_buffer() }; + let src_size = core::mem::size_of::() * src_words; let (dest_ptr, _) = unsafe { destination.write_buffer() }; - Self::new::( + Self::new::( channel, config, src_ptr, dest_ptr, src_size, ) .apply_hardware_request_config(config) @@ -342,21 +342,21 @@ impl<'a, CH: DmaChannel> DmaTransfer<'a, CH> { /// Create a new peripheral-to-memory transfer with the channel, source peripheral and /// destination buffer provided. pub fn peripheral_to_memory( - config: DmaConfig, + config: DmaConfig, channel: &'a CH, source: S, - destination: &'a mut [D], + mut destination: D, ) -> Self where S: ReadBuffer, - D: Word, + D: WriteBuffer, { let (src_ptr, _) = unsafe { source.read_buffer() }; - let dest_ptr = destination.as_mut_ptr(); - let dest_size = core::mem::size_of_val(destination); + let (dest_ptr, dest_words) = unsafe { destination.write_buffer() }; + let dest_size = core::mem::size_of::() * dest_words; - Self::new::( + Self::new::( channel, config, src_ptr, dest_ptr, dest_size, ) .apply_hardware_request_config(config) diff --git a/src/gpdma/periph.rs b/src/gpdma/periph.rs index 7d87b4a..a999182 100644 --- a/src/gpdma/periph.rs +++ b/src/gpdma/periph.rs @@ -50,11 +50,13 @@ pub trait RxAddr { /// initiating transmissions. pub trait Tx: Sealed { type CH: DmaChannel; - fn init_tx_transfer<'a>( + fn init_tx_transfer<'a, S>( &'a self, config: DmaConfig, - words: &'a [W], - ) -> DmaTransfer<'a, Self::CH>; + source: S, + ) -> DmaTransfer<'a, Self::CH> + where + S: ReadBuffer; } /// The `Rx` trait to defines the method needed for a peripheral DMA struct (ie. [`DmaRx`] or @@ -63,11 +65,13 @@ pub trait Tx: Sealed { /// initiating receiving transfers. pub trait Rx: Sealed { type CH: DmaChannel; - fn init_rx_transfer<'a>( + fn init_rx_transfer<'a, D>( &'a self, config: DmaConfig, - words: &'a mut [W], - ) -> DmaTransfer<'a, Self::CH>; + destination: D, + ) -> DmaTransfer<'a, Self::CH> + where + D: WriteBuffer; } /// `DmaRx` encapsulates the initialization of a peripheral-to-memory DMA transaction for receiving @@ -117,12 +121,20 @@ where W: Word, { type CH = CH; - fn init_rx_transfer<'a>( + fn init_rx_transfer<'a, D>( &'a self, config: DmaConfig, - words: &'a mut [W], - ) -> DmaTransfer<'a, CH> { - DmaTransfer::peripheral_to_memory(config, &self.channel, self, words) + destination: D, + ) -> DmaTransfer<'a, CH> + where + D: WriteBuffer, + { + DmaTransfer::peripheral_to_memory( + config, + &self.channel, + self, + destination, + ) } } @@ -173,12 +185,15 @@ where W: Word, { type CH = CH; - fn init_tx_transfer<'a>( + fn init_tx_transfer<'a, S>( &'a self, config: DmaConfig, - words: &'a [W], - ) -> DmaTransfer<'a, CH> { - DmaTransfer::memory_to_peripheral(config, &self.channel, words, self) + source: S, + ) -> DmaTransfer<'a, CH> + where + S: ReadBuffer, + { + DmaTransfer::memory_to_peripheral(config, &self.channel, source, self) } } @@ -219,12 +234,15 @@ where RX: DmaChannel, { type CH = TX; - fn init_tx_transfer<'a>( + fn init_tx_transfer<'a, S>( &'a self, config: DmaConfig, - words: &'a [W], - ) -> DmaTransfer<'a, TX> { - self.tx.init_tx_transfer(config, words) + source: S, + ) -> DmaTransfer<'a, TX> + where + S: ReadBuffer, + { + self.tx.init_tx_transfer(config, source) } } @@ -236,11 +254,14 @@ where RX: DmaChannel, { type CH = RX; - fn init_rx_transfer<'a>( + fn init_rx_transfer<'a, D>( &'a self, config: DmaConfig, - words: &'a mut [W], - ) -> DmaTransfer<'a, RX> { - self.rx.init_rx_transfer(config, words) + destination: D, + ) -> DmaTransfer<'a, RX> + where + D: WriteBuffer, + { + self.rx.init_rx_transfer(config, destination) } } From 95e3f781ccc185f896dfb962bbb27c3feaef5aa2 Mon Sep 17 00:00:00 2001 From: astapleton Date: Fri, 1 Aug 2025 11:12:09 -0700 Subject: [PATCH 06/13] fix example again --- examples/dma.rs | 170 +++++++++++------------------------------------- 1 file changed, 39 insertions(+), 131 deletions(-) diff --git a/examples/dma.rs b/examples/dma.rs index 3a00378..a718af8 100644 --- a/examples/dma.rs +++ b/examples/dma.rs @@ -4,8 +4,7 @@ mod utilities; -use core::mem::MaybeUninit; - +use cortex_m::singleton; use cortex_m_rt::entry; use cortex_m_semihosting::debug; use stm32h5xx_hal::{ @@ -14,114 +13,6 @@ use stm32h5xx_hal::{ prelude::*, }; -static mut SOURCE_BYTES: MaybeUninit<[u8; 40]> = MaybeUninit::uninit(); -static mut DEST_BYTES: MaybeUninit<[u8; 40]> = MaybeUninit::zeroed(); -static mut DEST_HALF_WORDS: MaybeUninit<[u16; 20]> = MaybeUninit::uninit(); -static mut SOURCE_WORDS: MaybeUninit<[u32; 10]> = MaybeUninit::uninit(); -static mut DEST_WORDS: MaybeUninit<[u32; 10]> = MaybeUninit::uninit(); - -fn u8_to_u8_sequential() -> (&'static [u8; 40], &'static mut [u8; 40]) { - let buf: &mut [MaybeUninit; 40] = unsafe { - &mut *(core::ptr::addr_of_mut!(SOURCE_BYTES) - as *mut [MaybeUninit; 40]) - }; - - for (i, value) in buf.iter_mut().enumerate() { - unsafe { - value.as_mut_ptr().write(i as u8); - } - } - #[allow(static_mut_refs)] // TODO: Fix this - let src = unsafe { SOURCE_BYTES.assume_init_ref() }; - - let dest = - unsafe { (*core::ptr::addr_of_mut!(DEST_BYTES)).assume_init_mut() }; - - dest.fill(0); - - (src, dest) -} - -fn u32_to_u32_transform() -> (&'static [u32; 10], &'static mut [u32; 10]) { - let buf: &mut [MaybeUninit; 10] = unsafe { - &mut *(core::ptr::addr_of_mut!(SOURCE_WORDS) - as *mut [MaybeUninit; 10]) - }; - - buf.fill(MaybeUninit::new(0x12345678)); - - #[allow(static_mut_refs)] // TODO: Fix this - let src = unsafe { SOURCE_WORDS.assume_init_ref() }; - - let dest = - unsafe { (*core::ptr::addr_of_mut!(DEST_WORDS)).assume_init_mut() }; - - dest.fill(0); - (src, dest) -} - -fn u32_to_u16_truncate() -> (&'static [u32; 10], &'static mut [u16; 20]) { - let buf: &mut [MaybeUninit; 10] = unsafe { - &mut *(core::ptr::addr_of_mut!(SOURCE_WORDS) - as *mut [MaybeUninit; 10]) - }; - - buf.fill(MaybeUninit::new(0x12345678)); - - #[allow(static_mut_refs)] // TODO: Fix this - let src = unsafe { SOURCE_WORDS.assume_init_ref() }; - - let dest = unsafe { - (*core::ptr::addr_of_mut!(DEST_HALF_WORDS)).assume_init_mut() - }; - - dest.fill(0); - (src, dest) -} - -fn u32_to_u8_unpack() -> (&'static [u32; 10], &'static mut [u8; 40]) { - let buf: &mut [MaybeUninit; 10] = unsafe { - &mut *(core::ptr::addr_of_mut!(SOURCE_WORDS) - as *mut [MaybeUninit; 10]) - }; - - buf.fill(MaybeUninit::new(0x12345678)); - - #[allow(static_mut_refs)] // TODO: Fix this - let src = unsafe { SOURCE_WORDS.assume_init_ref() }; - - let dest = - unsafe { (*core::ptr::addr_of_mut!(DEST_BYTES)).assume_init_mut() }; - - dest.fill(0); - (src, dest) -} - -fn u8_to_u32_pack() -> (&'static [u8; 40], &'static mut [u32; 10]) { - let buf: &mut [MaybeUninit; 40] = unsafe { - &mut *(core::ptr::addr_of_mut!(SOURCE_BYTES) - as *mut [MaybeUninit; 40]) - }; - - for chunk in buf.chunks_mut(4) { - unsafe { - chunk[0].as_mut_ptr().write(0x78); - chunk[1].as_mut_ptr().write(0x56); - chunk[2].as_mut_ptr().write(0x34); - chunk[3].as_mut_ptr().write(0x12); - } - } - - #[allow(static_mut_refs)] // TODO: Fix this - let src = unsafe { SOURCE_BYTES.assume_init_ref() }; - - let dest = - unsafe { (*core::ptr::addr_of_mut!(DEST_WORDS)).assume_init_mut() }; - - dest.fill(0); - (src, dest) -} - #[entry] fn main() -> ! { utilities::logger::init(); @@ -137,53 +28,62 @@ fn main() -> ! { let channels = dp.GPDMA1.channels(ccdr.peripheral.GPDMA1); - let (source_buf, dest_buf) = u8_to_u8_sequential(); - let source_copy = unsafe { &*(source_buf.as_ptr() as *const [u8; 40]) }; - let dest_copy = unsafe { &*(dest_buf.as_ptr() as *const [u8; 40]) }; + log::info!("u8 to u8"); + let src = + singleton!(: [u8; 40] = core::array::from_fn(|i| i as u8)).unwrap(); + let dest = singleton!(: [u8; 40] = [0u8; 40]).unwrap(); + let source_copy = unsafe { &*(src.as_ptr() as *const [u8; 40]) }; + let dest_copy = unsafe { &*(dest.as_ptr() as *const [u8; 40]) }; let channel = channels.0; let config = DmaConfig::new(); - let transfer = - DmaTransfer::memory_to_memory(config, &channel, source_buf, dest_buf); + let transfer = DmaTransfer::memory_to_memory(config, &channel, src, dest); transfer.start().unwrap(); transfer.wait_for_transfer_complete().unwrap(); assert_eq!(source_copy, dest_copy); - let (source_buf, dest_buf) = u32_to_u32_transform(); - let dest_copy = unsafe { &*(dest_buf.as_ptr() as *const [u32; 10]) }; + log::info!("u32 to u32 with data transform"); + let src = singleton!(: [u32; 10] = [0x12345678u32; 10]).unwrap(); + let dest = singleton!(: [u32; 10] = [0u32; 10]).unwrap(); + + let dest_copy = unsafe { &*(dest.as_ptr() as *const [u32; 10]) }; + let config = DmaConfig::new().with_data_transform( DataTransform::builder() .swap_destination_half_words() .swap_destination_half_word_byte_order(), ); - let transfer = - DmaTransfer::memory_to_memory(config, &channel, source_buf, dest_buf); + let transfer = DmaTransfer::memory_to_memory(config, &channel, src, dest); transfer.start().unwrap(); transfer.wait_for_transfer_complete().unwrap(); let expected = [0x78563412; 10]; assert_eq!(expected, *dest_copy); - let (source_buf, dest_buf) = u32_to_u16_truncate(); - let dest_copy = unsafe { &*(dest_buf.as_ptr() as *const [u16; 20]) }; + log::info!("u32 to u16 with truncate"); + let src = singleton!(: [u32; 10] = [0x12345678u32; 10]).unwrap(); + let dest = singleton!(: [u16; 20] = [0u16; 20]).unwrap(); + let dest_copy = unsafe { &*(dest.as_ptr() as *const [u16; 20]) }; let config = DmaConfig::new().with_data_transform( DataTransform::builder().left_align_right_truncate(), ); - let transfer = - DmaTransfer::memory_to_memory(config, &channel, source_buf, dest_buf); + let transfer = DmaTransfer::memory_to_memory(config, &channel, src, dest); transfer.start().unwrap(); transfer.wait_for_transfer_complete().unwrap(); let expected = [0x1234; 10]; assert_eq!(expected, (*dest_copy)[0..10]); - let (source_buf, dest_buf) = u32_to_u8_unpack(); - let dest_copy = unsafe { &*(dest_buf.as_ptr() as *const [u8; 40]) }; + log::info!("u32 to u8 with unpack"); + let src = singleton!(: [u32; 10] = [0x12345678u32; 10]).unwrap(); + let dest = singleton!(: [u8; 40] = [0u8; 40]).unwrap(); + + let dest_copy = unsafe { &*(dest.as_ptr() as *const [u8; 40]) }; + let config = DmaConfig::new().with_data_transform(DataTransform::builder().unpack()); - let transfer = - DmaTransfer::memory_to_memory(config, &channel, source_buf, dest_buf); + let transfer = DmaTransfer::memory_to_memory(config, &channel, src, dest); transfer.start().unwrap(); transfer.wait_for_transfer_complete().unwrap(); @@ -191,12 +91,19 @@ fn main() -> ! { assert_eq!(expected, (*dest_copy)[0..4]); assert_eq!(expected, (*dest_copy)[36..40]); - let (source_buf, dest_buf) = u8_to_u32_pack(); - let dest_copy = unsafe { &*(dest_buf.as_ptr() as *const [u32; 10]) }; + log::info!("u8 to u32 with pack"); + let src = singleton!(: [u8; 40] = [0u8; 40]).unwrap(); + let dest = singleton!(: [u32; 10] = [0u32; 10]).unwrap(); + + for chunk in src.chunks_mut(4) { + chunk.copy_from_slice(&[0x78, 0x56, 0x34, 0x12]); + } + + let dest_copy = unsafe { &*(dest.as_ptr() as *const [u32; 10]) }; + let config = DmaConfig::new().with_data_transform(DataTransform::builder().pack()); - let transfer = - DmaTransfer::memory_to_memory(config, &channel, source_buf, dest_buf); + let transfer = DmaTransfer::memory_to_memory(config, &channel, src, dest); transfer.start().unwrap(); transfer.wait_for_transfer_complete().unwrap(); @@ -204,6 +111,7 @@ fn main() -> ! { assert_eq!(expected, (*dest_copy)); assert_eq!(expected, (*dest_copy)); + log::info!("All tests passed!"); loop { debug::exit(debug::EXIT_SUCCESS) } From 793a9d5dff76c39ee6a8fbd728fe19aa1eccb0a4 Mon Sep 17 00:00:00 2001 From: Andrew Stapleton Date: Sat, 2 Aug 2025 11:04:49 -0700 Subject: [PATCH 07/13] doc clarification Co-authored-by: Albin Hedman --- src/gpdma.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/gpdma.rs b/src/gpdma.rs index f86cc6f..ed52117 100644 --- a/src/gpdma.rs +++ b/src/gpdma.rs @@ -12,7 +12,7 @@ //! transfer) that is supported by the processor (ie. they're not tied to specific channels). All //! channels support direct and linked-buffer transfers. However, the channels do have different //! capabilities (see RM0492 Rev 3 section 15.3.2 for full details), notably that channels 0-5 can -//! only service transfers in a linear address space, while channels 6 & 7 can service transfers +//! only service transfers in a linear address space, while channels 6 & 7 can also service transfers //! using a 2D addressing scheme. Both GPDMA peripherals support the same requests/channel //! capabilities. //! From 336a8c42b1b18b6acc41b965b4de446e26bf689e Mon Sep 17 00:00:00 2001 From: astapleton Date: Sat, 2 Aug 2025 14:57:09 -0700 Subject: [PATCH 08/13] Use mutable reference to channels when creating transfers --- examples/dma.rs | 17 ++-- src/gpdma.rs | 32 ++++---- src/gpdma/ch.rs | 72 ++++++++--------- src/gpdma/periph.rs | 183 +++++++++++++++++++++++--------------------- 4 files changed, 156 insertions(+), 148 deletions(-) diff --git a/examples/dma.rs b/examples/dma.rs index a718af8..cc42747 100644 --- a/examples/dma.rs +++ b/examples/dma.rs @@ -35,9 +35,10 @@ fn main() -> ! { let source_copy = unsafe { &*(src.as_ptr() as *const [u8; 40]) }; let dest_copy = unsafe { &*(dest.as_ptr() as *const [u8; 40]) }; - let channel = channels.0; + let mut channel = channels.0; let config = DmaConfig::new(); - let transfer = DmaTransfer::memory_to_memory(config, &channel, src, dest); + let mut transfer = + DmaTransfer::memory_to_memory(config, &mut channel, src, dest); transfer.start().unwrap(); transfer.wait_for_transfer_complete().unwrap(); assert_eq!(source_copy, dest_copy); @@ -54,7 +55,8 @@ fn main() -> ! { .swap_destination_half_word_byte_order(), ); - let transfer = DmaTransfer::memory_to_memory(config, &channel, src, dest); + let mut transfer = + DmaTransfer::memory_to_memory(config, &mut channel, src, dest); transfer.start().unwrap(); transfer.wait_for_transfer_complete().unwrap(); @@ -68,7 +70,8 @@ fn main() -> ! { let config = DmaConfig::new().with_data_transform( DataTransform::builder().left_align_right_truncate(), ); - let transfer = DmaTransfer::memory_to_memory(config, &channel, src, dest); + let mut transfer = + DmaTransfer::memory_to_memory(config, &mut channel, src, dest); transfer.start().unwrap(); transfer.wait_for_transfer_complete().unwrap(); @@ -83,7 +86,8 @@ fn main() -> ! { let config = DmaConfig::new().with_data_transform(DataTransform::builder().unpack()); - let transfer = DmaTransfer::memory_to_memory(config, &channel, src, dest); + let mut transfer = + DmaTransfer::memory_to_memory(config, &mut channel, src, dest); transfer.start().unwrap(); transfer.wait_for_transfer_complete().unwrap(); @@ -103,7 +107,8 @@ fn main() -> ! { let config = DmaConfig::new().with_data_transform(DataTransform::builder().pack()); - let transfer = DmaTransfer::memory_to_memory(config, &channel, src, dest); + let mut transfer = + DmaTransfer::memory_to_memory(config, &mut channel, src, dest); transfer.start().unwrap(); transfer.wait_for_transfer_complete().unwrap(); diff --git a/src/gpdma.rs b/src/gpdma.rs index ed52117..2dd1656 100644 --- a/src/gpdma.rs +++ b/src/gpdma.rs @@ -249,12 +249,12 @@ pub struct DmaTransfer<'a, CH> where CH: DmaChannel, { - channel: &'a CH, + channel: &'a mut CH, } impl<'a, CH: DmaChannel> DmaTransfer<'a, CH> { fn new( - channel: &'a CH, + channel: &'a mut CH, config: DmaConfig, src_ptr: *const S, dest_ptr: *mut D, @@ -279,7 +279,7 @@ impl<'a, CH: DmaChannel> DmaTransfer<'a, CH> { /// provided. pub fn memory_to_memory( config: DmaConfig, - channel: &'a CH, + channel: &'a mut CH, source: S, mut destination: D, ) -> Self @@ -321,7 +321,7 @@ impl<'a, CH: DmaChannel> DmaTransfer<'a, CH> { /// peripheral provided. pub fn memory_to_peripheral( config: DmaConfig, - channel: &'a CH, + channel: &'a mut CH, source: S, mut destination: D, ) -> Self @@ -343,7 +343,7 @@ impl<'a, CH: DmaChannel> DmaTransfer<'a, CH> { /// destination buffer provided. pub fn peripheral_to_memory( config: DmaConfig, - channel: &'a CH, + channel: &'a mut CH, source: S, mut destination: D, ) -> Self @@ -367,7 +367,7 @@ impl<'a, CH: DmaChannel> DmaTransfer<'a, CH> { /// provided. pub fn peripheral_to_peripheral( config: DmaConfig, S::Word, D::Word>, - channel: &'a CH, + channel: &'a mut CH, source: S, mut destination: D, ) -> Self @@ -414,7 +414,7 @@ impl<'a, CH: DmaChannel> DmaTransfer<'a, CH> { } impl<'a, CH: DmaChannel> DmaTransfer<'a, CH> { - fn start_transfer_internal(&self) { + fn start_transfer_internal(&mut self) { // Preserve the instruction and bus ordering of preceding buffer access // to the subsequent access by the DMA peripheral due to enabling it. fence(Ordering::SeqCst); @@ -424,13 +424,13 @@ impl<'a, CH: DmaChannel> DmaTransfer<'a, CH> { /// Start a transfer. Does not block waiting for the transfer to start and does not check for /// errors starting the transfer - pub fn start_nonblocking(&self) { + pub fn start_nonblocking(&mut self) { self.start_transfer_internal(); } /// Start a transfer and block waiting for it to start. Returns an error if one occurred /// starting the transfer. - pub fn start(&self) -> Result<(), Error> { + pub fn start(&mut self) -> Result<(), Error> { self.start_nonblocking(); self.channel.wait_for_transfer_started() } @@ -505,24 +505,18 @@ impl<'a, CH: DmaChannel> DmaTransfer<'a, CH> { /// Blocks waiting for the half transfer complete event. Returns an error if one occurred during /// the transfer. - pub fn wait_for_half_transfer_complete(self) -> Result<(), Error> { - let result = self.channel.wait_for_half_transfer_complete(); - // Preserve the instruction and bus sequence of the preceding operation and - // the subsequent buffer access. - fence(Ordering::SeqCst); - - core::mem::forget(self); // Prevents self from being dropped and attempting to abort - result + pub fn wait_for_half_transfer_complete(&mut self) -> Result<(), Error> { + self.channel.wait_for_half_transfer_complete() } /// Enable interrupts for this transfer. This will enable the transfer complete and half /// transfer complete interrupts, as well as error interrupts. - pub fn enable_interrupts(&self) { + pub fn enable_interrupts(&mut self) { self.channel.enable_transfer_interrupts(); } /// Disable interrupts for this transfer. - pub fn disable_interrupts(&self) { + pub fn disable_interrupts(&mut self) { self.channel.disable_transfer_interrupts(); } diff --git a/src/gpdma/ch.rs b/src/gpdma/ch.rs index b0b41c5..a7c67b4 100644 --- a/src/gpdma/ch.rs +++ b/src/gpdma/ch.rs @@ -442,12 +442,12 @@ where /// module. #[doc(hidden)] pub(super) trait Channel { - fn enable(&self); + fn enable(&mut self); fn is_suspended(&self) -> bool; /// Initiates the suspension of a transfer - fn initiate_suspend(&self); + fn initiate_suspend(&mut self); /// Resume transfer fn initiate_resume(&self); @@ -467,40 +467,40 @@ pub(super) trait Channel { fn is_running(&self) -> bool; /// Reset the channel registers so it can be reused. - fn reset_channel(&self); + fn reset_channel(&mut self); /// Suspend the transfer and blocks until it has been suspended. Reports any that occur while /// waiting for the transfer to suspend. - fn suspend_transfer(&self); + fn suspend_transfer(&mut self); /// Resumes a suspended transfer and blocks until the channel transitions out of the idle state /// Reports any errors that occur resuming the transfer. - fn resume_transfer(&self) -> Result<(), Error>; + fn resume_transfer(&mut self) -> Result<(), Error>; /// Aborts an operation by suspending the transfer and resetting the channel. - fn abort(&self); + fn abort(&mut self); /// Blocks waiting for a transfer to be started (or for it to be idle and complete). Reports any /// errors that occur while waiting for the transfer to start. - fn wait_for_transfer_started(&self) -> Result<(), Error>; + fn wait_for_transfer_started(&mut self) -> Result<(), Error>; /// Blocks waiting for a transfer to complete. Reports any errors that occur during a transfer. - fn wait_for_transfer_complete(&self) -> Result<(), Error>; + fn wait_for_transfer_complete(&mut self) -> Result<(), Error>; /// Blocks waiting for a half transfer event to trigger. Reports any errors that occur during a /// transfer. - fn wait_for_half_transfer_complete(&self) -> Result<(), Error>; + fn wait_for_half_transfer_complete(&mut self) -> Result<(), Error>; /// Apply a transfer configuration to the channel fn apply_config( - &self, + &mut self, config: DmaConfig, ); /// Apply hardware request configuration to the channel. Not relevant to memory-to-memory /// transfers. fn configure_hardware_request( - &self, + &mut self, config: DmaConfig, ); @@ -511,31 +511,31 @@ pub(super) trait Channel { S: Word, D: Word, >( - &self, + &mut self, config: DmaConfig, ); /// Apply a data transform to the channel transfer - fn apply_data_transform(&self, data_transform: DataTransform); + fn apply_data_transform(&mut self, data_transform: DataTransform); /// Set the source address. This sets the source address and data width. - fn set_source(&self, ptr: *const W); + fn set_source(&mut self, ptr: *const W); /// Set the destination address. This sets the destination address and data width - fn set_destination(&self, ptr: *mut W); + fn set_destination(&mut self, ptr: *mut W); /// Set the transfer size in bytes (not words!). Size must be aligned with destination width if /// source width is greater than destination width and packing mode is used. Otherwise the size /// must be aligned with the source data width. - fn set_transfer_size_bytes(&self, size: usize); + fn set_transfer_size_bytes(&mut self, size: usize); /// Enable transfer interrupts for the channel. This enables the transfer complete, /// half-transfer complete, data transfer error and user setting error interrupts. This is /// useful for starting a transfer that will be monitored by an interrupt handler. - fn enable_transfer_interrupts(&self); + fn enable_transfer_interrupts(&mut self); /// Disable transfer interrupts for the channel. It is expected that this will be called from /// an interrupt handler after a transfer is completed. - fn disable_transfer_interrupts(&self); + fn disable_transfer_interrupts(&mut self); } impl Channel for DmaChannelRef @@ -545,7 +545,7 @@ where Self: Deref, { #[inline(always)] - fn enable(&self) { + fn enable(&mut self) { self.cr().modify(|_, w| w.en().enabled()); } @@ -554,7 +554,7 @@ where self.sr().read().suspf().bit_is_set() } - fn initiate_suspend(&self) { + fn initiate_suspend(&mut self) { if self.is_suspended() { return; } @@ -590,23 +590,23 @@ where !self.is_idle() } - fn reset_channel(&self) { + fn reset_channel(&mut self) { self.reset(); self.clear_all_event_flags(); } - fn suspend_transfer(&self) { + fn suspend_transfer(&mut self) { self.initiate_suspend(); while !self.is_suspended() {} } - fn resume_transfer(&self) -> Result<(), Error> { + fn resume_transfer(&mut self) -> Result<(), Error> { self.initiate_resume(); while !self.check_transfer_started()? {} Ok(()) } - fn abort(&self) { + fn abort(&mut self) { if !self.is_idle() { self.suspend_transfer(); } @@ -614,14 +614,14 @@ where self.reset_channel(); } - fn wait_for_transfer_started(&self) -> Result<(), Error> { + fn wait_for_transfer_started(&mut self) -> Result<(), Error> { while !self.check_transfer_started().inspect_err(|_| { self.clear_all_event_flags(); })? {} Ok(()) } - fn wait_for_transfer_complete(&self) -> Result<(), Error> { + fn wait_for_transfer_complete(&mut self) -> Result<(), Error> { loop { match self.check_transfer_complete() { Ok(true) => { @@ -639,7 +639,7 @@ where } } - fn wait_for_half_transfer_complete(&self) -> Result<(), Error> { + fn wait_for_half_transfer_complete(&mut self) -> Result<(), Error> { loop { match self.check_half_transfer_complete() { Ok(true) => { @@ -658,7 +658,7 @@ where } fn apply_config( - &self, + &mut self, config: DmaConfig, ) { self.set_source_addressing_mode( @@ -683,7 +683,7 @@ where } fn configure_hardware_request( - &self, + &mut self, config: DmaConfig, ) { self.set_block_request_mode(config.transfer_type.block_request()); @@ -695,7 +695,7 @@ where S: Word, D: Word, >( - &self, + &mut self, config: DmaConfig, ) { self.set_peripheral_flow_control_mode( @@ -703,7 +703,7 @@ where ); } - fn apply_data_transform(&self, data_transform: DataTransform) { + fn apply_data_transform(&mut self, data_transform: DataTransform) { self.set_source_byte_exchange(data_transform.source_byte_exchange); self.set_padding_alignment_mode(data_transform.padding_alignment); self.set_destination_half_word_exchange( @@ -712,29 +712,29 @@ where self.set_destination_byte_exchange(data_transform.dest_byte_exchange); } - fn set_source(&self, ptr: *const W) { + fn set_source(&mut self, ptr: *const W) { self.set_source_address(ptr as u32); self.set_source_data_width(core::mem::size_of::()); } - fn set_destination(&self, ptr: *mut W) { + fn set_destination(&mut self, ptr: *mut W) { self.set_destination_address(ptr as u32); self.set_destination_data_width(core::mem::size_of::()); } - fn set_transfer_size_bytes(&self, size: usize) { + fn set_transfer_size_bytes(&mut self, size: usize) { self.set_block_size(size as u16); } #[inline(always)] - fn enable_transfer_interrupts(&self) { + fn enable_transfer_interrupts(&mut self) { self.cr().modify(|_, w| { w.tcie().enabled().dteie().enabled().useie().enabled() }); } #[inline(always)] - fn disable_transfer_interrupts(&self) { + fn disable_transfer_interrupts(&mut self) { self.cr().modify(|_, w| { w.tcie().disabled().dteie().disabled().useie().disabled() }); diff --git a/src/gpdma/periph.rs b/src/gpdma/periph.rs index a999182..e326beb 100644 --- a/src/gpdma/periph.rs +++ b/src/gpdma/periph.rs @@ -7,7 +7,7 @@ //! encapsulate the logic for initializing these transfers. //! - The [`DmaDuplex`] struct combines both TX and RX capabilities, allowing for full-duplex //! operations. -use core::marker::PhantomData; +use core::{cell::Cell, marker::PhantomData}; use crate::Sealed; @@ -16,6 +16,36 @@ use super::{ ReadBuffer, Word, WriteBuffer, }; +/// `PeriphTxBuffer` is a wrapper around a peripheral's transmit data register address, used to +/// provide a WriteBuffer implementation for initiating memory-to-peripheral DMA transfers. +struct PeriphTxBuffer, W: Word> { + _addr: PhantomData, + _word: PhantomData, +} + +unsafe impl, W: Word> WriteBuffer for PeriphTxBuffer { + type Word = W; + + unsafe fn write_buffer(&mut self) -> (*mut Self::Word, usize) { + (A::tx_addr(), 1) + } +} + +/// `PeriphRxBuffer` is a wrapper around a peripheral's receive data register address, used to +/// provide a ReadBuffer implementation for initiating peripheral-to-memory DMA transfers. +struct PeriphRxBuffer, W: Word> { + _addr: PhantomData, + _word: PhantomData, +} + +unsafe impl, W: Word> ReadBuffer for PeriphRxBuffer { + type Word = W; + + unsafe fn read_buffer(&self) -> (*const Self::Word, usize) { + (A::rx_addr(), 1) + } +} + /// `TxAddr` is a trait that provides a method to obtain the address of the transmit data register /// of a peripheral. This is used to facilitate memory-to-peripheral DMA transactions. The /// peripheral must implement this trait. @@ -44,36 +74,48 @@ pub trait RxAddr { unsafe fn rx_addr() -> *const W; } -/// The `Tx` trait to defines the method needed for a peripheral DMA struct (ie. [`DmaTx`] or -/// [`DmaDuplex`]) that is used to initiate a memory-to-peripheral DMA transaction. It also -/// functions as a marker trait to indicate that the peripheral DMA struct can be used for -/// initiating transmissions. -pub trait Tx: Sealed { - type CH: DmaChannel; - fn init_tx_transfer<'a, S>( - &'a self, - config: DmaConfig, - source: S, - ) -> DmaTransfer<'a, Self::CH> +trait TxBuffer { + /// Returns a `PeriphTxBuffer` that provides a write buffer for the peripheral's transmit data + /// register. This is used to initiate memory-to-peripheral DMA transfers. Implemented + /// automatically for any implementer of `TxAddr`. + /// + /// # Safety + /// TxAddr already requires the caller to ensure that the returned pointer is valid and as such + /// is marked unsafe, so marking this method as unsafe is redundant. + fn tx_buffer() -> PeriphTxBuffer where - S: ReadBuffer; + Self: TxAddr + Sized, + { + PeriphTxBuffer { + _addr: PhantomData, + _word: PhantomData, + } + } } -/// The `Rx` trait to defines the method needed for a peripheral DMA struct (ie. [`DmaRx`] or -/// [`DmaDuplex`]) that is used to initiate a peripheral-to-memory DMA transaction. It also -/// functions as a marker trait to indicate that the peripheral DMA struct can be used for -/// initiating receiving transfers. -pub trait Rx: Sealed { - type CH: DmaChannel; - fn init_rx_transfer<'a, D>( - &'a self, - config: DmaConfig, - destination: D, - ) -> DmaTransfer<'a, Self::CH> +impl> TxBuffer for T {} + +trait RxBuffer { + /// Returns a `PeriphRxBuffer` that provides a read buffer for the peripheral's receive data + /// register. This is used to initiate peripheral-to-memory DMA transfers. Implemented + /// automatically for any implementer of `RxAddr`. + /// + /// # Safety + /// RxAddr already requires the caller to ensure that the returned pointer is valid and as such + /// is marked unsafe, so marking this method as unsafe is redundant. + fn rx_buffer() -> PeriphRxBuffer where - D: WriteBuffer; + Self: RxAddr + Sized, + { + PeriphRxBuffer { + _addr: PhantomData, + _word: PhantomData, + } + } } +impl> RxBuffer for T {} + /// `DmaRx` encapsulates the initialization of a peripheral-to-memory DMA transaction for receiving /// data. Used by peripheral DMA implementations. pub struct DmaRx { @@ -102,27 +144,16 @@ impl From for DmaRx { } } -unsafe impl, W: Word, CH> ReadBuffer - for &DmaRx -{ - type Word = W; - - unsafe fn read_buffer(&self) -> (*const Self::Word, usize) { - (PERIPH::rx_addr(), 1) - } -} - impl Sealed for DmaRx {} -impl Rx for DmaRx +impl DmaRx where PERIPH: RxAddr, CH: DmaChannel, W: Word, { - type CH = CH; - fn init_rx_transfer<'a, D>( - &'a self, + pub fn init_rx_transfer<'a, D>( + &'a mut self, config: DmaConfig, destination: D, ) -> DmaTransfer<'a, CH> @@ -131,8 +162,8 @@ where { DmaTransfer::peripheral_to_memory( config, - &self.channel, - self, + &mut self.channel, + PERIPH::rx_buffer(), destination, ) } @@ -166,34 +197,28 @@ impl From for DmaTx { } } -unsafe impl, W: Word, CH> WriteBuffer - for &DmaTx -{ - type Word = W; - - unsafe fn write_buffer(&mut self) -> (*mut Self::Word, usize) { - (PERIPH::tx_addr(), 1) - } -} - impl Sealed for DmaTx {} -impl Tx for DmaTx +impl DmaTx where PERIPH: TxAddr, CH: DmaChannel, W: Word, { - type CH = CH; - fn init_tx_transfer<'a, S>( - &'a self, + pub fn init_tx_transfer<'a, S>( + &'a mut self, config: DmaConfig, source: S, ) -> DmaTransfer<'a, CH> where S: ReadBuffer, { - DmaTransfer::memory_to_peripheral(config, &self.channel, source, self) + DmaTransfer::memory_to_peripheral( + config, + &mut self.channel, + source, + PERIPH::tx_buffer(), + ) } } @@ -201,8 +226,8 @@ where /// peripheral-to-memory DMA transaction for to enable setting up of full-duplex transmission and /// reception of data. Used by peripheral DMA implementations. pub struct DmaDuplex { - tx: DmaTx, - rx: DmaRx, + tx: Cell>, + rx: Cell>, } impl DmaDuplex @@ -214,54 +239,38 @@ where { pub fn new(tx: TX, rx: RX) -> Self { Self { - tx: DmaTx::from(tx), - rx: DmaRx::from(rx), + tx: Cell::new(DmaTx::from(tx)), + rx: Cell::new(DmaRx::from(rx)), } } pub fn free(self) -> (TX, RX) { - (self.tx.free(), self.rx.free()) + (self.tx.into_inner().free(), self.rx.into_inner().free()) } } impl Sealed for DmaDuplex {} -impl Tx for DmaDuplex +impl DmaDuplex where PERIPH: TxAddr + RxAddr, W: Word, TX: DmaChannel, RX: DmaChannel, { - type CH = TX; - fn init_tx_transfer<'a, S>( - &'a self, - config: DmaConfig, + pub fn init_duplex_transfer<'a, S, D>( + &'a mut self, + tx_config: DmaConfig, + rx_config: DmaConfig, source: S, - ) -> DmaTransfer<'a, TX> - where - S: ReadBuffer, - { - self.tx.init_tx_transfer(config, source) - } -} - -impl Rx for DmaDuplex -where - PERIPH: TxAddr + RxAddr, - W: Word + Word, - TX: DmaChannel, - RX: DmaChannel, -{ - type CH = RX; - fn init_rx_transfer<'a, D>( - &'a self, - config: DmaConfig, destination: D, - ) -> DmaTransfer<'a, RX> + ) -> (DmaTransfer<'a, TX>, DmaTransfer<'a, RX>) where + S: ReadBuffer, D: WriteBuffer, { - self.rx.init_rx_transfer(config, destination) + let tx = self.tx.get_mut().init_tx_transfer(tx_config, source); + let rx = self.rx.get_mut().init_rx_transfer(rx_config, destination); + (tx, rx) } } From 9bbaca77aa90002b5f4b558fca6adc20c0f249a2 Mon Sep 17 00:00:00 2001 From: astapleton Date: Sat, 2 Aug 2025 15:01:26 -0700 Subject: [PATCH 09/13] fix data width check; remove incomplete documentation --- src/gpdma.rs | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/src/gpdma.rs b/src/gpdma.rs index 2dd1656..3791235 100644 --- a/src/gpdma.rs +++ b/src/gpdma.rs @@ -287,13 +287,13 @@ impl<'a, CH: DmaChannel> DmaTransfer<'a, CH> { S: ReadBuffer, D: WriteBuffer, { - let src_width = core::mem::size_of::(); - let dest_width = core::mem::size_of::(); + let src_width = core::mem::size_of::(); + let dest_width = core::mem::size_of::(); let (src_ptr, src_words) = unsafe { source.read_buffer() }; - let src_size = core::mem::size_of::() * src_words; + let src_size = src_width * src_words; let (dest_ptr, dest_words) = unsafe { destination.write_buffer() }; - let dest_size = core::mem::size_of::() * dest_words; + let dest_size = dest_width * dest_words; // Size must be aligned with destination width if source width is greater than destination // width and packing mode is used, therefore the maximum size must be dictated by @@ -310,8 +310,6 @@ impl<'a, CH: DmaChannel> DmaTransfer<'a, CH> { src_size }; - // We also need to ensure that the destination - Self::new::( channel, config, src_ptr, dest_ptr, size, ) From 5c587f32793e514e19c12066a2b6628188dee05a Mon Sep 17 00:00:00 2001 From: astapleton Date: Mon, 4 Aug 2025 16:43:35 -0700 Subject: [PATCH 10/13] Hold onto source/destination; Remove UB from example --- examples/dma.rs | 113 +++++++++++++++++++++++++++++---------- src/gpdma.rs | 126 ++++++++++++++++++++++++-------------------- src/gpdma/periph.rs | 29 +++++----- 3 files changed, 172 insertions(+), 96 deletions(-) diff --git a/examples/dma.rs b/examples/dma.rs index cc42747..dc80e80 100644 --- a/examples/dma.rs +++ b/examples/dma.rs @@ -7,12 +7,52 @@ mod utilities; use cortex_m::singleton; use cortex_m_rt::entry; use cortex_m_semihosting::debug; +use embedded_dma::{ReadBuffer, WriteBuffer}; use stm32h5xx_hal::{ - gpdma::{config::transform::*, DmaConfig, DmaTransfer}, + gpdma::{config::transform::*, DmaConfig, DmaTransfer, Word}, pac, prelude::*, }; +// Buffer is used to manage a reference to a static buffer returned by the cortex_m::singleton! +// macro and which can be with the DmaTransfer API (which requires passing ReadBuffer and +// WriteBuffer implementations by value) and then used to access the buffer after the transfer has +// completed. +struct Buffer { + data: &'static mut [T; N], +} + +impl Buffer +where + T: Word + 'static, +{ + fn new(data: &'static mut [T; N]) -> Self { + Self { data } + } +} + +unsafe impl ReadBuffer for &Buffer +where + T: Word + 'static, +{ + type Word = T; + + unsafe fn read_buffer(&self) -> (*const Self::Word, usize) { + (self.data.as_ptr(), N) + } +} + +unsafe impl WriteBuffer for &mut Buffer +where + T: Word + 'static, +{ + type Word = T; + + unsafe fn write_buffer(&mut self) -> (*mut Self::Word, usize) { + (self.data.as_mut_ptr(), N) + } +} + #[entry] fn main() -> ! { utilities::logger::init(); @@ -31,23 +71,27 @@ fn main() -> ! { log::info!("u8 to u8"); let src = singleton!(: [u8; 40] = core::array::from_fn(|i| i as u8)).unwrap(); + + let src_buffer = Buffer::new(src); let dest = singleton!(: [u8; 40] = [0u8; 40]).unwrap(); - let source_copy = unsafe { &*(src.as_ptr() as *const [u8; 40]) }; - let dest_copy = unsafe { &*(dest.as_ptr() as *const [u8; 40]) }; + let mut dest_buffer = Buffer::new(dest); let mut channel = channels.0; let config = DmaConfig::new(); - let mut transfer = - DmaTransfer::memory_to_memory(config, &mut channel, src, dest); + let mut transfer = DmaTransfer::memory_to_memory( + config, + &mut channel, + &src_buffer, + &mut dest_buffer, + ); transfer.start().unwrap(); transfer.wait_for_transfer_complete().unwrap(); - assert_eq!(source_copy, dest_copy); + assert_eq!(src_buffer.data, dest_buffer.data); log::info!("u32 to u32 with data transform"); let src = singleton!(: [u32; 10] = [0x12345678u32; 10]).unwrap(); let dest = singleton!(: [u32; 10] = [0u32; 10]).unwrap(); - - let dest_copy = unsafe { &*(dest.as_ptr() as *const [u32; 10]) }; + let mut dest_buffer = Buffer::new(dest); let config = DmaConfig::new().with_data_transform( DataTransform::builder() @@ -55,66 +99,81 @@ fn main() -> ! { .swap_destination_half_word_byte_order(), ); - let mut transfer = - DmaTransfer::memory_to_memory(config, &mut channel, src, dest); + let mut transfer = DmaTransfer::memory_to_memory( + config, + &mut channel, + src, + &mut dest_buffer, + ); transfer.start().unwrap(); transfer.wait_for_transfer_complete().unwrap(); let expected = [0x78563412; 10]; - assert_eq!(expected, *dest_copy); + assert_eq!(expected, *dest_buffer.data); log::info!("u32 to u16 with truncate"); let src = singleton!(: [u32; 10] = [0x12345678u32; 10]).unwrap(); let dest = singleton!(: [u16; 20] = [0u16; 20]).unwrap(); - let dest_copy = unsafe { &*(dest.as_ptr() as *const [u16; 20]) }; + let mut dest_buffer = Buffer::new(dest); + let config = DmaConfig::new().with_data_transform( DataTransform::builder().left_align_right_truncate(), ); - let mut transfer = - DmaTransfer::memory_to_memory(config, &mut channel, src, dest); + let mut transfer = DmaTransfer::memory_to_memory( + config, + &mut channel, + src, + &mut dest_buffer, + ); transfer.start().unwrap(); transfer.wait_for_transfer_complete().unwrap(); let expected = [0x1234; 10]; - assert_eq!(expected, (*dest_copy)[0..10]); + assert_eq!(expected, (*dest_buffer.data)[0..10]); log::info!("u32 to u8 with unpack"); let src = singleton!(: [u32; 10] = [0x12345678u32; 10]).unwrap(); let dest = singleton!(: [u8; 40] = [0u8; 40]).unwrap(); - - let dest_copy = unsafe { &*(dest.as_ptr() as *const [u8; 40]) }; + let mut dest_buffer = Buffer::new(dest); let config = DmaConfig::new().with_data_transform(DataTransform::builder().unpack()); - let mut transfer = - DmaTransfer::memory_to_memory(config, &mut channel, src, dest); + let mut transfer = DmaTransfer::memory_to_memory( + config, + &mut channel, + src, + &mut dest_buffer, + ); transfer.start().unwrap(); transfer.wait_for_transfer_complete().unwrap(); let expected = [0x78, 0x56, 0x34, 0x12]; - assert_eq!(expected, (*dest_copy)[0..4]); - assert_eq!(expected, (*dest_copy)[36..40]); + assert_eq!(expected, (*dest_buffer.data)[0..4]); + assert_eq!(expected, (*dest_buffer.data)[36..40]); log::info!("u8 to u32 with pack"); let src = singleton!(: [u8; 40] = [0u8; 40]).unwrap(); let dest = singleton!(: [u32; 10] = [0u32; 10]).unwrap(); + let mut dest_buffer = Buffer::new(dest); for chunk in src.chunks_mut(4) { chunk.copy_from_slice(&[0x78, 0x56, 0x34, 0x12]); } - let dest_copy = unsafe { &*(dest.as_ptr() as *const [u32; 10]) }; - let config = DmaConfig::new().with_data_transform(DataTransform::builder().pack()); - let mut transfer = - DmaTransfer::memory_to_memory(config, &mut channel, src, dest); + let mut transfer = DmaTransfer::memory_to_memory( + config, + &mut channel, + src, + &mut dest_buffer, + ); transfer.start().unwrap(); transfer.wait_for_transfer_complete().unwrap(); let expected = [0x12345678; 10]; - assert_eq!(expected, (*dest_copy)); - assert_eq!(expected, (*dest_copy)); + assert_eq!(expected, (*dest_buffer.data)); + assert_eq!(expected, (*dest_buffer.data)); log::info!("All tests passed!"); loop { diff --git a/src/gpdma.rs b/src/gpdma.rs index 3791235..55f260c 100644 --- a/src/gpdma.rs +++ b/src/gpdma.rs @@ -245,54 +245,71 @@ impl DmaChannels { /// [`DmaTransfer::peripheral_to_memory`], or [`DmaTransfer::peripheral_to_peripheral`] /// methods, which take a channel and the source and destination buffers. The transfer can then be /// started using the [`DmaTransfer::start`] or [`DmaTransfer::start_nonblocking`] methods. -pub struct DmaTransfer<'a, CH> +pub struct DmaTransfer<'a, CH, S, D> where CH: DmaChannel, + S: ReadBuffer, + D: WriteBuffer, { channel: &'a mut CH, + + // Hold onto source and destination for the lifetime of the transfer to ensure that any + // user implementations of ReadBuffer and WriteBuffer that are themselves references are held + // for the duration of the transfer and so prevent access to the underlying data. + #[allow(dead_code)] + source: S, + #[allow(dead_code)] + destination: D, } -impl<'a, CH: DmaChannel> DmaTransfer<'a, CH> { - fn new( +impl<'a, CH, S, D> DmaTransfer<'a, CH, S, D> +where + CH: DmaChannel, + S: ReadBuffer, + D: WriteBuffer, +{ + fn new( channel: &'a mut CH, - config: DmaConfig, - src_ptr: *const S, - dest_ptr: *mut D, + config: DmaConfig, + source: S, + mut destination: D, size: usize, ) -> Self where - S: Word, - D: Word, T: TransferType, { assert!(size <= u16::MAX as usize, "Max block size is {}", u16::MAX); + + let (src_ptr, _) = unsafe { source.read_buffer() }; + let (dest_ptr, _) = unsafe { destination.write_buffer() }; + channel.reset_channel(); channel.set_source(src_ptr); channel.set_destination(dest_ptr); channel.set_transfer_size_bytes(size); channel.apply_config(config); - Self { channel } + Self { + channel, + source, + destination, + } } /// Create a new memory-to-memory transfer with the channel, source and destination buffers /// provided. - pub fn memory_to_memory( + pub fn memory_to_memory( config: DmaConfig, channel: &'a mut CH, source: S, mut destination: D, - ) -> Self - where - S: ReadBuffer, - D: WriteBuffer, - { + ) -> Self { let src_width = core::mem::size_of::(); let dest_width = core::mem::size_of::(); - let (src_ptr, src_words) = unsafe { source.read_buffer() }; + let (_, src_words) = unsafe { source.read_buffer() }; let src_size = src_width * src_words; - let (dest_ptr, dest_words) = unsafe { destination.write_buffer() }; + let (_, dest_words) = unsafe { destination.write_buffer() }; let dest_size = dest_width * dest_words; // Size must be aligned with destination width if source width is greater than destination @@ -310,52 +327,47 @@ impl<'a, CH: DmaChannel> DmaTransfer<'a, CH> { src_size }; - Self::new::( - channel, config, src_ptr, dest_ptr, size, - ) + Self::new::(channel, config, source, destination, size) } /// Create a new memory-to-peripheral transfer with the channel, source buffer and destination /// peripheral provided. - pub fn memory_to_peripheral( + pub fn memory_to_peripheral( config: DmaConfig, channel: &'a mut CH, source: S, - mut destination: D, - ) -> Self - where - S: ReadBuffer, - D: WriteBuffer, - { - let (src_ptr, src_words) = unsafe { source.read_buffer() }; + destination: D, + ) -> Self { + let (_, src_words) = unsafe { source.read_buffer() }; let src_size = core::mem::size_of::() * src_words; - let (dest_ptr, _) = unsafe { destination.write_buffer() }; - Self::new::( - channel, config, src_ptr, dest_ptr, src_size, + Self::new::( + channel, + config, + source, + destination, + src_size, ) .apply_hardware_request_config(config) } /// Create a new peripheral-to-memory transfer with the channel, source peripheral and /// destination buffer provided. - pub fn peripheral_to_memory( + pub fn peripheral_to_memory( config: DmaConfig, channel: &'a mut CH, source: S, mut destination: D, - ) -> Self - where - S: ReadBuffer, - D: WriteBuffer, - { - let (src_ptr, _) = unsafe { source.read_buffer() }; - - let (dest_ptr, dest_words) = unsafe { destination.write_buffer() }; + ) -> Self { + let (_, dest_words) = unsafe { destination.write_buffer() }; let dest_size = core::mem::size_of::() * dest_words; - Self::new::( - channel, config, src_ptr, dest_ptr, dest_size, + Self::new::( + channel, + config, + source, + destination, + dest_size, ) .apply_hardware_request_config(config) .apply_peripheral_source_config(config) @@ -363,19 +375,17 @@ impl<'a, CH: DmaChannel> DmaTransfer<'a, CH> { /// Create a new peripheral-to-peripheral transfer with source and destination peripherals /// provided. - pub fn peripheral_to_peripheral( + pub fn peripheral_to_peripheral( config: DmaConfig, S::Word, D::Word>, channel: &'a mut CH, source: S, mut destination: D, ) -> Self where - S: ReadBuffer, - D: WriteBuffer, T: PeripheralToPeripheralDirection, { - let (src_ptr, src_words) = unsafe { source.read_buffer() }; - let (dest_ptr, dest_words) = unsafe { destination.write_buffer() }; + let (_, src_words) = unsafe { source.read_buffer() }; + let (_, dest_words) = unsafe { destination.write_buffer() }; let size = match T::DIRECTION { TransferDirection::PeripheralToPeripheral( @@ -387,31 +397,33 @@ impl<'a, CH: DmaChannel> DmaTransfer<'a, CH> { _ => unreachable!(), }; - Self::new::>( - channel, config, src_ptr, dest_ptr, size, + Self::new::>( + channel, + config, + source, + destination, + size, ) .apply_hardware_request_config(config) .apply_peripheral_source_config(config) } - fn apply_hardware_request_config( + fn apply_hardware_request_config( self, - config: DmaConfig, + config: DmaConfig, ) -> Self { self.channel.configure_hardware_request(config); self } - fn apply_peripheral_source_config( + fn apply_peripheral_source_config( self, - config: DmaConfig, + config: DmaConfig, ) -> Self { self.channel.configure_peripheral_flow_control(config); self } -} -impl<'a, CH: DmaChannel> DmaTransfer<'a, CH> { fn start_transfer_internal(&mut self) { // Preserve the instruction and bus ordering of preceding buffer access // to the subsequent access by the DMA peripheral due to enabling it. @@ -524,9 +536,11 @@ impl<'a, CH: DmaChannel> DmaTransfer<'a, CH> { } } -impl<'a, CH> Drop for DmaTransfer<'a, CH> +impl<'a, CH, S, D> Drop for DmaTransfer<'a, CH, S, D> where CH: DmaChannel, + S: ReadBuffer, + D: WriteBuffer, { fn drop(&mut self) { if self.is_running() { diff --git a/src/gpdma/periph.rs b/src/gpdma/periph.rs index e326beb..b72f96d 100644 --- a/src/gpdma/periph.rs +++ b/src/gpdma/periph.rs @@ -7,7 +7,7 @@ //! encapsulate the logic for initializing these transfers. //! - The [`DmaDuplex`] struct combines both TX and RX capabilities, allowing for full-duplex //! operations. -use core::{cell::Cell, marker::PhantomData}; +use core::marker::PhantomData; use crate::Sealed; @@ -18,7 +18,7 @@ use super::{ /// `PeriphTxBuffer` is a wrapper around a peripheral's transmit data register address, used to /// provide a WriteBuffer implementation for initiating memory-to-peripheral DMA transfers. -struct PeriphTxBuffer, W: Word> { +pub struct PeriphTxBuffer, W: Word> { _addr: PhantomData, _word: PhantomData, } @@ -33,7 +33,7 @@ unsafe impl, W: Word> WriteBuffer for PeriphTxBuffer { /// `PeriphRxBuffer` is a wrapper around a peripheral's receive data register address, used to /// provide a ReadBuffer implementation for initiating peripheral-to-memory DMA transfers. -struct PeriphRxBuffer, W: Word> { +pub struct PeriphRxBuffer, W: Word> { _addr: PhantomData, _word: PhantomData, } @@ -156,7 +156,7 @@ where &'a mut self, config: DmaConfig, destination: D, - ) -> DmaTransfer<'a, CH> + ) -> DmaTransfer<'a, CH, PeriphRxBuffer, D> where D: WriteBuffer, { @@ -209,7 +209,7 @@ where &'a mut self, config: DmaConfig, source: S, - ) -> DmaTransfer<'a, CH> + ) -> DmaTransfer<'a, CH, S, PeriphTxBuffer> where S: ReadBuffer, { @@ -226,8 +226,8 @@ where /// peripheral-to-memory DMA transaction for to enable setting up of full-duplex transmission and /// reception of data. Used by peripheral DMA implementations. pub struct DmaDuplex { - tx: Cell>, - rx: Cell>, + tx: DmaTx, + rx: DmaRx, } impl DmaDuplex @@ -239,13 +239,13 @@ where { pub fn new(tx: TX, rx: RX) -> Self { Self { - tx: Cell::new(DmaTx::from(tx)), - rx: Cell::new(DmaRx::from(rx)), + tx: DmaTx::from(tx), + rx: DmaRx::from(rx), } } pub fn free(self) -> (TX, RX) { - (self.tx.into_inner().free(), self.rx.into_inner().free()) + (self.tx.free(), self.rx.free()) } } @@ -264,13 +264,16 @@ where rx_config: DmaConfig, source: S, destination: D, - ) -> (DmaTransfer<'a, TX>, DmaTransfer<'a, RX>) + ) -> ( + DmaTransfer<'a, TX, S, PeriphTxBuffer>, + DmaTransfer<'a, RX, PeriphRxBuffer, D>, + ) where S: ReadBuffer, D: WriteBuffer, { - let tx = self.tx.get_mut().init_tx_transfer(tx_config, source); - let rx = self.rx.get_mut().init_rx_transfer(rx_config, destination); + let tx = self.tx.init_tx_transfer(tx_config, source); + let rx = self.rx.init_rx_transfer(rx_config, destination); (tx, rx) } } From cd32400c67734cd6aea5364bf319da1893fa4462 Mon Sep 17 00:00:00 2001 From: astapleton Date: Mon, 4 Aug 2025 16:45:21 -0700 Subject: [PATCH 11/13] mute type complexity clippy --- src/gpdma/periph.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/gpdma/periph.rs b/src/gpdma/periph.rs index b72f96d..0baaca9 100644 --- a/src/gpdma/periph.rs +++ b/src/gpdma/periph.rs @@ -258,6 +258,7 @@ where TX: DmaChannel, RX: DmaChannel, { + #[allow(clippy::type_complexity)] pub fn init_duplex_transfer<'a, S, D>( &'a mut self, tx_config: DmaConfig, From 5b0d9beb8c5f633cb11e01a641686d7520845295 Mon Sep 17 00:00:00 2001 From: astapleton Date: Mon, 4 Aug 2025 16:51:57 -0700 Subject: [PATCH 12/13] rename fields --- src/gpdma.rs | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/src/gpdma.rs b/src/gpdma.rs index 55f260c..051d71f 100644 --- a/src/gpdma.rs +++ b/src/gpdma.rs @@ -256,10 +256,8 @@ where // Hold onto source and destination for the lifetime of the transfer to ensure that any // user implementations of ReadBuffer and WriteBuffer that are themselves references are held // for the duration of the transfer and so prevent access to the underlying data. - #[allow(dead_code)] - source: S, - #[allow(dead_code)] - destination: D, + _source: S, + _destination: D, } impl<'a, CH, S, D> DmaTransfer<'a, CH, S, D> @@ -291,8 +289,8 @@ where Self { channel, - source, - destination, + _source: source, + _destination: destination, } } From 100bf1bf5d4fb0eccbe5da7a09d632324ebc4c16 Mon Sep 17 00:00:00 2001 From: astapleton Date: Mon, 4 Aug 2025 17:05:38 -0700 Subject: [PATCH 13/13] Add free method, don't consume self in abort/wait_for_transfer_complete --- examples/dma.rs | 110 ++++++++++++------------------------------------ src/gpdma.rs | 50 +++++++++++++--------- 2 files changed, 55 insertions(+), 105 deletions(-) diff --git a/examples/dma.rs b/examples/dma.rs index dc80e80..5562e52 100644 --- a/examples/dma.rs +++ b/examples/dma.rs @@ -7,52 +7,12 @@ mod utilities; use cortex_m::singleton; use cortex_m_rt::entry; use cortex_m_semihosting::debug; -use embedded_dma::{ReadBuffer, WriteBuffer}; use stm32h5xx_hal::{ - gpdma::{config::transform::*, DmaConfig, DmaTransfer, Word}, + gpdma::{config::transform::*, DmaConfig, DmaTransfer}, pac, prelude::*, }; -// Buffer is used to manage a reference to a static buffer returned by the cortex_m::singleton! -// macro and which can be with the DmaTransfer API (which requires passing ReadBuffer and -// WriteBuffer implementations by value) and then used to access the buffer after the transfer has -// completed. -struct Buffer { - data: &'static mut [T; N], -} - -impl Buffer -where - T: Word + 'static, -{ - fn new(data: &'static mut [T; N]) -> Self { - Self { data } - } -} - -unsafe impl ReadBuffer for &Buffer -where - T: Word + 'static, -{ - type Word = T; - - unsafe fn read_buffer(&self) -> (*const Self::Word, usize) { - (self.data.as_ptr(), N) - } -} - -unsafe impl WriteBuffer for &mut Buffer -where - T: Word + 'static, -{ - type Word = T; - - unsafe fn write_buffer(&mut self) -> (*mut Self::Word, usize) { - (self.data.as_mut_ptr(), N) - } -} - #[entry] fn main() -> ! { utilities::logger::init(); @@ -72,26 +32,20 @@ fn main() -> ! { let src = singleton!(: [u8; 40] = core::array::from_fn(|i| i as u8)).unwrap(); - let src_buffer = Buffer::new(src); let dest = singleton!(: [u8; 40] = [0u8; 40]).unwrap(); - let mut dest_buffer = Buffer::new(dest); let mut channel = channels.0; let config = DmaConfig::new(); - let mut transfer = DmaTransfer::memory_to_memory( - config, - &mut channel, - &src_buffer, - &mut dest_buffer, - ); + let mut transfer = + DmaTransfer::memory_to_memory(config, &mut channel, src, dest); transfer.start().unwrap(); transfer.wait_for_transfer_complete().unwrap(); - assert_eq!(src_buffer.data, dest_buffer.data); + let (src, dest) = transfer.free(); + assert_eq!(src, dest); log::info!("u32 to u32 with data transform"); let src = singleton!(: [u32; 10] = [0x12345678u32; 10]).unwrap(); let dest = singleton!(: [u32; 10] = [0u32; 10]).unwrap(); - let mut dest_buffer = Buffer::new(dest); let config = DmaConfig::new().with_data_transform( DataTransform::builder() @@ -99,62 +53,52 @@ fn main() -> ! { .swap_destination_half_word_byte_order(), ); - let mut transfer = DmaTransfer::memory_to_memory( - config, - &mut channel, - src, - &mut dest_buffer, - ); + let mut transfer = + DmaTransfer::memory_to_memory(config, &mut channel, src, dest); transfer.start().unwrap(); transfer.wait_for_transfer_complete().unwrap(); + let (_, dest) = transfer.free(); + let expected = [0x78563412; 10]; - assert_eq!(expected, *dest_buffer.data); + assert_eq!(expected, *dest); log::info!("u32 to u16 with truncate"); let src = singleton!(: [u32; 10] = [0x12345678u32; 10]).unwrap(); let dest = singleton!(: [u16; 20] = [0u16; 20]).unwrap(); - let mut dest_buffer = Buffer::new(dest); let config = DmaConfig::new().with_data_transform( DataTransform::builder().left_align_right_truncate(), ); - let mut transfer = DmaTransfer::memory_to_memory( - config, - &mut channel, - src, - &mut dest_buffer, - ); + let mut transfer = + DmaTransfer::memory_to_memory(config, &mut channel, src, dest); transfer.start().unwrap(); transfer.wait_for_transfer_complete().unwrap(); + let (_, dest) = transfer.free(); + let expected = [0x1234; 10]; - assert_eq!(expected, (*dest_buffer.data)[0..10]); + assert_eq!(expected, (*dest)[0..10]); log::info!("u32 to u8 with unpack"); let src = singleton!(: [u32; 10] = [0x12345678u32; 10]).unwrap(); let dest = singleton!(: [u8; 40] = [0u8; 40]).unwrap(); - let mut dest_buffer = Buffer::new(dest); let config = DmaConfig::new().with_data_transform(DataTransform::builder().unpack()); - let mut transfer = DmaTransfer::memory_to_memory( - config, - &mut channel, - src, - &mut dest_buffer, - ); + let mut transfer = + DmaTransfer::memory_to_memory(config, &mut channel, src, dest); transfer.start().unwrap(); transfer.wait_for_transfer_complete().unwrap(); + let (_, dest) = transfer.free(); let expected = [0x78, 0x56, 0x34, 0x12]; - assert_eq!(expected, (*dest_buffer.data)[0..4]); - assert_eq!(expected, (*dest_buffer.data)[36..40]); + assert_eq!(expected, (*dest)[0..4]); + assert_eq!(expected, (*dest)[36..40]); log::info!("u8 to u32 with pack"); let src = singleton!(: [u8; 40] = [0u8; 40]).unwrap(); let dest = singleton!(: [u32; 10] = [0u32; 10]).unwrap(); - let mut dest_buffer = Buffer::new(dest); for chunk in src.chunks_mut(4) { chunk.copy_from_slice(&[0x78, 0x56, 0x34, 0x12]); @@ -162,18 +106,16 @@ fn main() -> ! { let config = DmaConfig::new().with_data_transform(DataTransform::builder().pack()); - let mut transfer = DmaTransfer::memory_to_memory( - config, - &mut channel, - src, - &mut dest_buffer, - ); + let mut transfer = + DmaTransfer::memory_to_memory(config, &mut channel, src, dest); transfer.start().unwrap(); transfer.wait_for_transfer_complete().unwrap(); + let (_, dest) = transfer.free(); + let expected = [0x12345678; 10]; - assert_eq!(expected, (*dest_buffer.data)); - assert_eq!(expected, (*dest_buffer.data)); + assert_eq!(expected, *dest); + assert_eq!(expected, *dest); log::info!("All tests passed!"); loop { diff --git a/src/gpdma.rs b/src/gpdma.rs index 051d71f..554b069 100644 --- a/src/gpdma.rs +++ b/src/gpdma.rs @@ -82,6 +82,7 @@ use crate::{ }; use core::{ marker::PhantomData, + mem, ops::Deref, sync::atomic::{fence, Ordering}, }; @@ -252,12 +253,8 @@ where D: WriteBuffer, { channel: &'a mut CH, - - // Hold onto source and destination for the lifetime of the transfer to ensure that any - // user implementations of ReadBuffer and WriteBuffer that are themselves references are held - // for the duration of the transfer and so prevent access to the underlying data. - _source: S, - _destination: D, + source: S, + destination: D, } impl<'a, CH, S, D> DmaTransfer<'a, CH, S, D> @@ -289,8 +286,8 @@ where Self { channel, - _source: source, - _destination: destination, + source, + destination, } } @@ -501,13 +498,12 @@ where /// Blocks waiting for a transfer to complete. Returns an error if one occurred during the /// transfer. - pub fn wait_for_transfer_complete(self) -> Result<(), Error> { + pub fn wait_for_transfer_complete(&mut self) -> Result<(), Error> { let result = self.channel.wait_for_transfer_complete(); // Preserve the instruction and bus sequence of the preceding operation and // the subsequent buffer access. fence(Ordering::SeqCst); - core::mem::forget(self); // Prevents self from being dropped and attempting to abort result } @@ -529,8 +525,28 @@ where } /// Abort a transaction and wait for it to suspend the transfer before resetting the channel - pub fn abort(self) { - // Allow Drop implementation to handle transfer abortion + pub fn abort(&mut self) { + if self.is_running() { + self.channel.abort(); + } + + self.disable_interrupts(); + + // Preserve the instruction and bus sequence of the preceding operation and + // the subsequent buffer access. + fence(Ordering::SeqCst); + } + + pub fn free(mut self) -> (S, D) { + self.abort(); + let (src, dest) = unsafe { + ( + core::ptr::read(&self.source), + core::ptr::read(&self.destination), + ) + }; + mem::forget(self); + (src, dest) } } @@ -541,14 +557,6 @@ where D: WriteBuffer, { fn drop(&mut self) { - if self.is_running() { - self.channel.abort(); - } - - self.disable_interrupts(); - - // Preserve the instruction and bus sequence of the preceding operation and - // the subsequent buffer access. - fence(Ordering::SeqCst); + self.abort(); } }