1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
use virtio_drivers::{VirtIOBlk, VirtIOHeader};
use crate::mm::{
PhysAddr,
VirtAddr,
frame_alloc,
frame_dealloc,
PhysPageNum,
FrameTracker,
PageTable,
StepByOne,
kernel_token,
};
use super::BlockDevice;
use crate::sync::UPSafeCell;
use alloc::vec::Vec;
use lazy_static::*;
#[allow(unused)]
const VIRTIO0: usize = 0x10001000;
pub struct VirtIOBlock(UPSafeCell<VirtIOBlk<'static>>);
lazy_static! {
static ref QUEUE_FRAMES: UPSafeCell<Vec<FrameTracker>> = unsafe {
UPSafeCell::new(Vec::new())
};
}
impl BlockDevice for VirtIOBlock {
fn read_block(&self, block_id: usize, buf: &mut [u8]) {
self.0.exclusive_access()
.read_block(block_id, buf)
.expect("Error when reading VirtIOBlk");
}
fn write_block(&self, block_id: usize, buf: &[u8]) {
self.0.exclusive_access()
.write_block(block_id, buf)
.expect("Error when writing VirtIOBlk");
}
}
impl VirtIOBlock {
#[allow(unused)]
pub fn new() -> Self {
unsafe {
Self(UPSafeCell::new(VirtIOBlk::new(
&mut *(VIRTIO0 as *mut VirtIOHeader)
).unwrap()))
}
}
}
#[no_mangle]
pub extern "C" fn virtio_dma_alloc(pages: usize) -> PhysAddr {
let mut ppn_base = PhysPageNum(0);
for i in 0..pages {
let frame = frame_alloc().unwrap();
if i == 0 { ppn_base = frame.ppn; }
assert_eq!(frame.ppn.0, ppn_base.0 + i);
QUEUE_FRAMES.exclusive_access().push(frame);
}
ppn_base.into()
}
#[no_mangle]
pub extern "C" fn virtio_dma_dealloc(pa: PhysAddr, pages: usize) -> i32 {
let mut ppn_base: PhysPageNum = pa.into();
for _ in 0..pages {
frame_dealloc(ppn_base);
ppn_base.step();
}
0
}
#[no_mangle]
pub extern "C" fn virtio_phys_to_virt(paddr: PhysAddr) -> VirtAddr {
VirtAddr(paddr.0)
}
#[no_mangle]
pub extern "C" fn virtio_virt_to_phys(vaddr: VirtAddr) -> PhysAddr {
PageTable::from_token(kernel_token()).translate_va(vaddr).unwrap()
}