1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
use super::BlockDevice;
use crate::mm::{
frame_alloc, frame_dealloc, kernel_token, FrameTracker, PageTable, PhysAddr, PhysPageNum,
StepByOne, VirtAddr,
};
use crate::sync::UPSafeCell;
use alloc::vec::Vec;
use lazy_static::*;
use virtio_drivers::{Hal, VirtIOBlk, VirtIOHeader};
#[allow(unused)]
const VIRTIO0: usize = 0x10001000;
pub struct VirtIOBlock(UPSafeCell<VirtIOBlk<'static, VirtioHal>>);
lazy_static! {
static ref QUEUE_FRAMES: UPSafeCell<Vec<FrameTracker>> = unsafe { UPSafeCell::new(Vec::new()) };
}
impl BlockDevice for VirtIOBlock {
fn read_block(&self, block_id: usize, buf: &mut [u8]) {
self.0
.exclusive_access()
.read_block(block_id, buf)
.expect("Error when reading VirtIOBlk");
}
fn write_block(&self, block_id: usize, buf: &[u8]) {
self.0
.exclusive_access()
.write_block(block_id, buf)
.expect("Error when writing VirtIOBlk");
}
}
impl VirtIOBlock {
#[allow(unused)]
pub fn new() -> Self {
unsafe {
Self(UPSafeCell::new(
VirtIOBlk::<VirtioHal>::new(&mut *(VIRTIO0 as *mut VirtIOHeader)).unwrap(),
))
}
}
}
pub struct VirtioHal;
impl Hal for VirtioHal {
fn dma_alloc(pages: usize) -> usize {
let mut ppn_base = PhysPageNum(0);
for i in 0..pages {
let frame = frame_alloc().unwrap();
if i == 0 {
ppn_base = frame.ppn;
}
assert_eq!(frame.ppn.0, ppn_base.0 + i);
QUEUE_FRAMES.exclusive_access().push(frame);
}
let pa: PhysAddr = ppn_base.into();
pa.0
}
fn dma_dealloc(pa: usize, pages: usize) -> i32 {
let pa = PhysAddr::from(pa);
let mut ppn_base: PhysPageNum = pa.into();
for _ in 0..pages {
frame_dealloc(ppn_base);
ppn_base.step();
}
0
}
fn phys_to_virt(addr: usize) -> usize {
addr
}
fn virt_to_phys(vaddr: usize) -> usize {
PageTable::from_token(kernel_token())
.translate_va(VirtAddr::from(vaddr))
.unwrap()
.0
}
}