diff --git a/crates/examples/sel4cp/http-server/pds/virtio-blk-driver/src/main.rs b/crates/examples/sel4cp/http-server/pds/virtio-blk-driver/src/main.rs index fbb948fc5..ef3f70f5f 100644 --- a/crates/examples/sel4cp/http-server/pds/virtio-blk-driver/src/main.rs +++ b/crates/examples/sel4cp/http-server/pds/virtio-blk-driver/src/main.rs @@ -6,6 +6,7 @@ extern crate alloc; use alloc::boxed::Box; use alloc::collections::BTreeMap; +use core::pin::Pin; use core::ptr::NonNull; use virtio_drivers::{ @@ -91,7 +92,7 @@ struct HandlerImpl { client_region: ExternallySharedRef<'static, [u8]>, client_client_dma_region_paddr: usize, ring_buffers: RingBuffers<'static, fn() -> Result<(), !>, BlockIORequest>, - pending: BTreeMap>, + pending: BTreeMap>>, } struct PendingEntry { @@ -111,18 +112,24 @@ impl Handler for HandlerImpl { while self.dev.peek_used().is_some() { let token = self.dev.peek_used().unwrap(); let mut pending_entry = self.pending.remove(&token).unwrap(); - let range_start = pending_entry.client_req.buf().encoded_addr() - - self.client_client_dma_region_paddr; - let range_end = range_start - + usize::try_from(pending_entry.client_req.buf().len()).unwrap(); - let range = range_start..range_end; - let mut unsafe_buf = self.client_region.as_mut_ptr().index(range).as_raw_ptr(); + let buf_range = { + let start = pending_entry.client_req.buf().encoded_addr() + - self.client_client_dma_region_paddr; + let len = usize::try_from(pending_entry.client_req.buf().len()).unwrap(); + start..start + len + }; + let mut buf_ptr = self + .client_region + .as_mut_ptr() + .index(buf_range) + .as_raw_ptr(); unsafe { + let pending_entry = &mut *pending_entry; self.dev .complete_read_block( token, &pending_entry.virtio_req, - unsafe_buf.as_mut(), + buf_ptr.as_mut(), &mut pending_entry.virtio_resp, ) .unwrap(); @@ -140,23 +147,29 @@ impl Handler for HandlerImpl { while self.pending.len() < QUEUE_SIZE && !self.ring_buffers.free().is_empty() { let client_req = self.ring_buffers.free_mut().dequeue().unwrap(); assert_eq!(client_req.ty().unwrap(), BlockIORequestType::Read); - let block_id = client_req.block_id(); - let range_start = - client_req.buf().encoded_addr() - self.client_client_dma_region_paddr; - let range_end = range_start + usize::try_from(client_req.buf().len()).unwrap(); - let range = range_start..range_end; - let mut unsafe_buf = self.client_region.as_mut_ptr().index(range).as_raw_ptr(); - let mut pending_entry = Box::new(PendingEntry { + let mut pending_entry = Box::pin(PendingEntry { client_req, virtio_req: BlkReq::default(), virtio_resp: BlkResp::default(), }); + let buf_range = { + let start = + client_req.buf().encoded_addr() - self.client_client_dma_region_paddr; + let len = usize::try_from(client_req.buf().len()).unwrap(); + start..start + len + }; + let mut buf_ptr = self + .client_region + .as_mut_ptr() + .index(buf_range) + .as_raw_ptr(); let token = unsafe { + let pending_entry = &mut *pending_entry; self.dev .read_block_nb( - block_id, + pending_entry.client_req.block_id(), &mut pending_entry.virtio_req, - unsafe_buf.as_mut(), + buf_ptr.as_mut(), &mut pending_entry.virtio_resp, ) .unwrap() diff --git a/crates/examples/sel4cp/http-server/pds/virtio-net-driver/src/main.rs b/crates/examples/sel4cp/http-server/pds/virtio-net-driver/src/main.rs index 036c74ea9..f9438210c 100644 --- a/crates/examples/sel4cp/http-server/pds/virtio-net-driver/src/main.rs +++ b/crates/examples/sel4cp/http-server/pds/virtio-net-driver/src/main.rs @@ -23,8 +23,8 @@ use sel4cp_http_server_example_virtio_net_driver_interface_types::*; const DEVICE: Channel = Channel::new(0); const CLIENT: Channel = Channel::new(1); -const NET_BUFFER_LEN: usize = 2048; const NET_QUEUE_SIZE: usize = 16; +const NET_BUFFER_LEN: usize = 2048; #[protection_domain( heap_size = 512 * 1024, @@ -105,43 +105,51 @@ impl Handler for HandlerImpl { match channel { DEVICE | CLIENT => { let mut notify_rx = false; + while self.dev.can_recv() && !self.rx_ring_buffers.free().is_empty() { let rx_buf = self.dev.receive().unwrap(); let desc = self.rx_ring_buffers.free_mut().dequeue().unwrap(); let desc_len = usize::try_from(desc.len()).unwrap(); assert!(desc_len >= rx_buf.packet_len()); - let start = desc.encoded_addr() - self.client_client_dma_region_paddr; - let end = start + rx_buf.packet_len(); - let range = start..end; + let buf_range = { + let start = desc.encoded_addr() - self.client_client_dma_region_paddr; + start..start + rx_buf.packet_len() + }; self.client_region .as_mut_ptr() - .index(range) + .index(buf_range) .copy_from_slice(rx_buf.packet()); self.dev.recycle_rx_buffer(rx_buf).unwrap(); self.rx_ring_buffers.used_mut().enqueue(desc).unwrap(); notify_rx = true; } + if notify_rx { self.rx_ring_buffers.notify().unwrap(); } + let mut notify_tx = false; + while !self.tx_ring_buffers.free().is_empty() && self.dev.can_send() { let desc = self.tx_ring_buffers.free_mut().dequeue().unwrap(); - let start = desc.encoded_addr() - self.client_client_dma_region_paddr; - let end = start + usize::try_from(desc.len()).unwrap(); - let range = start..end; - let mut tx_buf = self.dev.new_tx_buffer(range.len()); + let buf_range = { + let start = desc.encoded_addr() - self.client_client_dma_region_paddr; + start..start + usize::try_from(desc.len()).unwrap() + }; + let mut tx_buf = self.dev.new_tx_buffer(buf_range.len()); self.client_region .as_ptr() - .index(range) + .index(buf_range) .copy_into_slice(tx_buf.packet_mut()); self.dev.send(tx_buf).unwrap(); self.tx_ring_buffers.used_mut().enqueue(desc).unwrap(); notify_tx = true; } + if notify_tx { self.tx_ring_buffers.notify().unwrap(); } + self.dev.ack_interrupt(); DEVICE.irq_ack().unwrap(); }