Skip to content

Commit 6650a4e

Browse files
MatiasVarastefano-garzarella
authored andcommitted
Add proof for conformance to 2.7.7.2 section
Add the verify_spec_2_7_7_2() proof to verify that the implementation of queue satisfies 2.7.7.2 requirement. The proof relies on whether the EVENT_IDX feature has been negotiated. Conversely with `test_needs_notification()` test, this proof `tests` for all possible values of the queue structure. Signed-off-by: Matias Ezequiel Vara Larsen <[email protected]> Signed-off-by: Siddharth Priya <[email protected]>
1 parent c0668d5 commit 6650a4e

File tree

4 files changed

+286
-2
lines changed

4 files changed

+286
-2
lines changed

.buildkite/custom-tests.json

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -52,8 +52,7 @@
5252
{
5353
"test_name": "prove-virtio-queue",
5454
"command": "cargo kani --package virtio-queue",
55-
"platform": ["x86_64", "aarch64"],
56-
"timeout_in_minutes": 20
55+
"platform": ["x86_64", "aarch64"]
5756
}
5857
]
5958
}

virtio-queue/Cargo.toml

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,18 @@ criterion = "0.6.0"
2323
vm-memory = { workspace = true, features = ["backend-mmap", "backend-atomic"] }
2424
memoffset = "0.9.0"
2525

26+
[target.'cfg(kani)'.dependencies]
27+
libc = "0.2.161"
28+
vm-memory = { workspace = true, features = ["backend-mmap"] }
29+
2630
[[bench]]
2731
name = "main"
2832
harness = false
33+
34+
# From https://model-checking.github.io/kani/usage.html#configuration-in-cargotoml
35+
#
36+
# Starting with Rust 1.80 (or nightly-2024-05-05), every reachable #[cfg] will be automatically
37+
# checked that they match the expected config names and values. To avoid warnings on
38+
# cfg(kani), we recommend adding the check-cfg lint config in your crate's Cargo.toml
39+
[lints.rust]
40+
unexpected_cfgs = { level = "warn", check-cfg = ['cfg(kani)'] }

virtio-queue/src/queue.rs

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,9 @@ use crate::desc::{split::VirtqUsedElem, RawDescriptor};
2323
use crate::{error, DescriptorChain, Error, QueueGuard, QueueOwnedT, QueueState, QueueT};
2424
use virtio_bindings::bindings::virtio_ring::VRING_USED_F_NO_NOTIFY;
2525

26+
#[cfg(kani)]
27+
mod verification;
28+
2629
/// The maximum queue size as defined in the Virtio Spec.
2730
pub const MAX_QUEUE_SIZE: u16 = 32768;
2831

Lines changed: 270 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,270 @@
1+
use std::mem::ManuallyDrop;
2+
use std::num::Wrapping;
3+
4+
use vm_memory::{FileOffset, GuestMemoryRegion, MemoryRegionAddress, MmapRegion};
5+
6+
use super::*;
7+
8+
/// A made-for-kani version of `vm_memory::GuestMemoryMmap`. Unlike the real
9+
/// `GuestMemoryMmap`, which manages a list of regions and then does a binary
10+
/// search to determine which region a specific read or write request goes to,
11+
/// this only uses a single region. Eliminating this binary search significantly
12+
/// speeds up all queue proofs, because it eliminates the only loop contained herein,
13+
/// meaning we can use `kani::unwind(0)` instead of `kani::unwind(2)`. Functionally,
14+
/// it works identically to `GuestMemoryMmap` with only a single contained region.
15+
pub struct SingleRegionGuestMemory {
16+
the_region: vm_memory::GuestRegionMmap,
17+
}
18+
19+
impl GuestMemory for SingleRegionGuestMemory {
20+
type R = vm_memory::GuestRegionMmap;
21+
22+
fn num_regions(&self) -> usize {
23+
1
24+
}
25+
26+
fn find_region(&self, addr: GuestAddress) -> Option<&Self::R> {
27+
self.the_region
28+
.to_region_addr(addr)
29+
.map(|_| &self.the_region)
30+
}
31+
32+
fn iter(&self) -> impl Iterator<Item = &Self::R> {
33+
std::iter::once(&self.the_region)
34+
}
35+
36+
fn try_access<F>(
37+
&self,
38+
count: usize,
39+
addr: GuestAddress,
40+
mut f: F,
41+
) -> vm_memory::guest_memory::Result<usize>
42+
where
43+
F: FnMut(
44+
usize,
45+
usize,
46+
MemoryRegionAddress,
47+
&Self::R,
48+
) -> vm_memory::guest_memory::Result<usize>,
49+
{
50+
// We only have a single region, meaning a lot of the complications of the default
51+
// try_access implementation for dealing with reads/writes across multiple
52+
// regions does not apply.
53+
let region_addr = self
54+
.the_region
55+
.to_region_addr(addr)
56+
.ok_or(vm_memory::guest_memory::Error::InvalidGuestAddress(addr))?;
57+
self.the_region
58+
.checked_offset(region_addr, count)
59+
.ok_or(vm_memory::guest_memory::Error::InvalidGuestAddress(addr))?;
60+
f(0, count, region_addr, &self.the_region)
61+
}
62+
}
63+
64+
impl kani::Arbitrary for SingleRegionGuestMemory {
65+
fn any() -> Self {
66+
guest_memory(
67+
ManuallyDrop::new(kani::vec::exact_vec::<u8, GUEST_MEMORY_SIZE>()).as_mut_ptr(),
68+
)
69+
}
70+
}
71+
pub struct ProofContext {
72+
pub queue: Queue,
73+
pub memory: SingleRegionGuestMemory,
74+
}
75+
76+
pub struct MmapRegionStub {
77+
_addr: *mut u8,
78+
_size: usize,
79+
_bitmap: (),
80+
_file_offset: Option<FileOffset>,
81+
_prot: i32,
82+
_flags: i32,
83+
_owned: bool,
84+
_hugetlbfs: Option<bool>,
85+
}
86+
87+
/// We start the first guest memory region at an offset so that harnesses using
88+
/// Queue::any() will be exposed to queue segments both before and after valid guest memory.
89+
/// This is conforming to MockSplitQueue::new() that uses `0` as starting address of the
90+
/// virtqueue. Also, QUEUE_END is the size only if GUEST_MEMORY_BASE is `0`
91+
const GUEST_MEMORY_BASE: u64 = 0;
92+
93+
// We size our guest memory to fit a properly aligned queue, plus some wiggles bytes
94+
// to make sure we not only test queues where all segments are consecutively aligned.
95+
// We need to give at least 16 bytes of buffer space for the descriptor table to be
96+
// able to change its address, as it is 16-byte aligned.
97+
const GUEST_MEMORY_SIZE: usize = QUEUE_END as usize + 30;
98+
99+
fn guest_memory(memory: *mut u8) -> SingleRegionGuestMemory {
100+
// Ideally, we'd want to do
101+
// let region = unsafe {MmapRegionBuilder::new(GUEST_MEMORY_SIZE)
102+
// .with_raw_mmap_pointer(bytes.as_mut_ptr())
103+
// .build()
104+
// .unwrap()};
105+
// However, .build() calls to .build_raw(), which contains a call to libc::sysconf.
106+
// Since kani 0.34.0, stubbing out foreign functions is supported, but due to the rust
107+
// standard library using a special version of the libc crate, it runs into some problems
108+
// [1] Even if we work around those problems, we run into performance problems [2].
109+
// Therefore, for now we stick to this ugly transmute hack (which only works because
110+
// the kani compiler will never re-order fields, so we can treat repr(Rust) as repr(C)).
111+
//
112+
// [1]: https://github.com/model-checking/kani/issues/2673
113+
// [2]: https://github.com/model-checking/kani/issues/2538
114+
let region_stub = MmapRegionStub {
115+
_addr: memory,
116+
_size: GUEST_MEMORY_SIZE,
117+
_bitmap: Default::default(),
118+
_file_offset: None,
119+
_prot: 0,
120+
_flags: libc::MAP_ANONYMOUS | libc::MAP_PRIVATE,
121+
_owned: false,
122+
_hugetlbfs: None,
123+
};
124+
125+
let region: MmapRegion<()> = unsafe { std::mem::transmute(region_stub) };
126+
127+
let guest_region =
128+
vm_memory::GuestRegionMmap::new(region, GuestAddress(GUEST_MEMORY_BASE)).unwrap();
129+
130+
// Use a single memory region for guests of size < 2GB
131+
SingleRegionGuestMemory {
132+
the_region: guest_region,
133+
}
134+
}
135+
136+
const MAX_QUEUE_SIZE: u16 = 4;
137+
138+
// Constants describing the in-memory layout of a queue of size MAX_QUEUE_SIZE starting
139+
// at the beginning of guest memory. These are based on Section 2.7 of the VirtIO 1.2
140+
// specification.
141+
const QUEUE_BASE_ADDRESS: u64 = GUEST_MEMORY_BASE;
142+
143+
/// descriptor table has 16 bytes per entry, avail ring starts right after
144+
const MAX_START_AVAIL_RING_BASE_ADDRESS: u64 = QUEUE_BASE_ADDRESS + MAX_QUEUE_SIZE as u64 * 16;
145+
146+
/// Used ring starts after avail ring (which has size 6 + 2 * MAX_QUEUE_SIZE),
147+
/// and needs 2 bytes of padding
148+
const MAX_START_USED_RING_BASE_ADDRESS: u64 =
149+
MAX_START_AVAIL_RING_BASE_ADDRESS + 6 + 2 * MAX_QUEUE_SIZE as u64 + 2;
150+
151+
/// The address of the first byte after the queue. Since our queue starts at guest physical
152+
/// address 0, this is also the size of the memory area occupied by the queue.
153+
/// Note that the used ring structure has size 6 + 8 * MAX_QUEUE_SIZE
154+
const QUEUE_END: u64 = MAX_START_USED_RING_BASE_ADDRESS + 6 + 8 * MAX_QUEUE_SIZE as u64;
155+
156+
impl kani::Arbitrary for ProofContext {
157+
fn any() -> Self {
158+
// descriptor table has 16 bytes per entry, avail ring starts right after
159+
let desc_tbl_queue_size = kani::any::<u16>();
160+
// Alignment of the descriptor table is 16 bytes as per the VirtIO spec.
161+
// See https://docs.oasis-open.org/virtio/virtio/v1.0/cs04/virtio-v1.0-cs04.pdf
162+
kani::assume(
163+
desc_tbl_queue_size <= 16 * MAX_QUEUE_SIZE && (desc_tbl_queue_size & 0xF == 0),
164+
);
165+
let avail_ring_base_address: u64 = QUEUE_BASE_ADDRESS + desc_tbl_queue_size as u64;
166+
167+
// Used ring starts after avail ring (which has max size 6 + 2 * MAX_QUEUE_SIZE),
168+
// and needs 2 bytes of padding
169+
let avail_ring_queue_size = kani::any::<u16>();
170+
// Alignment of the available ring is 2 bytes as per the VirtIO spec.
171+
// See https://docs.oasis-open.org/virtio/virtio/v1.0/cs04/virtio-v1.0-cs04.pdf
172+
kani::assume(
173+
avail_ring_queue_size <= 6 + 2 * MAX_QUEUE_SIZE + 2
174+
&& (avail_ring_queue_size & 0x1 == 0),
175+
);
176+
let used_ring_base_address: u64 = avail_ring_base_address + avail_ring_queue_size as u64;
177+
178+
// The address of the first byte after the queue. Since our queue starts at guest physical
179+
// address 0, this is also the size of the memory area occupied by the queue.
180+
// Note that the used ring structure has max size 6 + 8 * MAX_QUEUE_SIZE
181+
let used_ring_queue_size = kani::any::<u16>();
182+
// Alignment of the used ring is 4 bytes as per the VirtIO spec.
183+
// See https://docs.oasis-open.org/virtio/virtio/v1.0/cs04/virtio-v1.0-cs04.pdf
184+
kani::assume(
185+
avail_ring_queue_size <= 6 + 8 * MAX_QUEUE_SIZE && (used_ring_queue_size & 0x3 == 0),
186+
);
187+
188+
// The size of the queue data structures should fill the available space
189+
kani::assume(QUEUE_END == used_ring_base_address + used_ring_queue_size as u64);
190+
191+
let mem = SingleRegionGuestMemory::any();
192+
193+
let mut queue = Queue::new(MAX_QUEUE_SIZE).unwrap();
194+
195+
queue.ready = true;
196+
197+
queue.set_desc_table_address(
198+
Some(QUEUE_BASE_ADDRESS as u32),
199+
Some((QUEUE_BASE_ADDRESS >> 32) as u32),
200+
);
201+
202+
queue.set_avail_ring_address(
203+
Some(avail_ring_base_address as u32),
204+
Some((avail_ring_base_address >> 32) as u32),
205+
);
206+
207+
queue.set_used_ring_address(
208+
Some(used_ring_base_address as u32),
209+
Some((used_ring_base_address >> 32) as u32),
210+
);
211+
212+
queue.set_next_avail(kani::any());
213+
queue.set_next_used(kani::any());
214+
queue.set_event_idx(kani::any());
215+
queue.num_added = Wrapping(kani::any());
216+
217+
kani::assume(queue.is_valid(&mem));
218+
219+
ProofContext { queue, memory: mem }
220+
}
221+
}
222+
223+
#[kani::proof]
224+
// There are no loops anywhere, but kani really enjoys getting stuck in std::ptr::drop_in_place.
225+
// This is a compiler intrinsic that has a "dummy" implementation in stdlib that just
226+
// recursively calls itself. Kani will generally unwind this recursion infinitely.
227+
#[kani::unwind(0)]
228+
fn verify_spec_2_7_7_2() {
229+
// Section 2.7.7.2 deals with device-to-driver notification suppression.
230+
// It describes a mechanism by which the driver can tell the device that it does not
231+
// want notifications (IRQs) about the device finishing processing individual buffers
232+
// (descriptor chain heads) from the avail ring until a specific number of descriptors
233+
// has been processed. This is done by the driver
234+
// defining a "used_event" index, which tells the device "please do not notify me until
235+
// used.ring[used_event] has been written to by you".
236+
let ProofContext {
237+
mut queue,
238+
memory: mem,
239+
} = kani::any();
240+
241+
let num_added_old = queue.num_added.0;
242+
let needs_notification = queue.needs_notification(&mem);
243+
244+
// event_idx_enabled equivalent to VIRTIO_F_EVENT_IDX negotiated
245+
if !queue.event_idx_enabled {
246+
// The specification here says
247+
// After the device writes a descriptor index into the used ring:
248+
// – If flags is 1, the device SHOULD NOT send a notification.
249+
// – If flags is 0, the device MUST send a notification
250+
// flags is the first field in the avail_ring_address, which we completely ignore. We
251+
// always send a notification, and as there only is a SHOULD NOT, that is okay
252+
assert!(needs_notification.unwrap());
253+
} else {
254+
// next_used - 1 is where the previous descriptor was placed
255+
if Wrapping(queue.used_event(&mem, Ordering::Relaxed).unwrap())
256+
== std::num::Wrapping(queue.next_used - Wrapping(1))
257+
&& num_added_old > 0
258+
{
259+
// If the idx field in the used ring (which determined where that descriptor index
260+
// was placed) was equal to used_event, the device MUST send a
261+
// notification.
262+
assert!(needs_notification.unwrap());
263+
264+
kani::cover!();
265+
}
266+
267+
// The other case is handled by a "SHOULD NOT send a notification" in the spec.
268+
// So we do not care
269+
}
270+
}

0 commit comments

Comments
 (0)