cpu/crates/cpu/tests/next_pc.rs

348 lines
13 KiB
Rust

// SPDX-License-Identifier: LGPL-3.0-or-later
// See Notices.txt for copyright information
use cpu::{
config::{CpuConfig, UnitConfig},
next_pc::{
DecodeToPostDecodeInterface, DecodeToPostDecodeInterfaceInner, FETCH_BLOCK_ID_WIDTH,
NextPcToFetchInterface, NextPcToFetchInterfaceInner, WipDecodedInsn, WipDecodedInsnKind,
next_pc,
},
unit::UnitKind,
util::array_vec::ArrayVec,
};
use fayalite::{prelude::*, sim::vcd::VcdWriterDecls, util::RcWriter};
use std::{
cell::Cell,
collections::{BTreeMap, VecDeque},
num::NonZeroUsize,
};
#[derive(Copy, Clone, Debug)]
enum MockInsn {
Nop4,
Jump { target: u64 },
CondBranch { target: u64 },
Call { target: u64 },
Ret,
}
impl MockInsn {
fn byte_len(self) -> u64 {
match self {
MockInsn::Nop4 => 4,
MockInsn::Jump { .. } => 4,
MockInsn::CondBranch { .. } => 4,
MockInsn::Call { .. } => 4,
MockInsn::Ret => 4,
}
}
}
#[derive(Debug)]
struct MockInsns {
insns: BTreeMap<u64, MockInsn>,
}
impl MockInsns {
fn new() -> Self {
Self {
insns: BTreeMap::from_iter([
(0x0, MockInsn::Nop4),
(0x4, MockInsn::Nop4),
(0x8, MockInsn::CondBranch { target: 0x4 }),
(0xC, MockInsn::Call { target: 0x18 }),
(0x10, MockInsn::Jump { target: 0x10 }),
(0x14, MockInsn::Jump { target: 0x10 }),
(0x18, MockInsn::Jump { target: 0x1C }),
(0x1C, MockInsn::Ret),
]),
}
}
fn fetch_block(&self, pc_range: std::ops::Range<u64>) -> impl Iterator<Item = (u64, MockInsn)> {
self.insns
.range(pc_range.clone())
.filter_map(move |(&pc, &insn)| {
if pc_range.end >= pc + insn.byte_len() {
Some((pc, insn))
} else {
None
}
})
}
}
const FETCH_PIPE_QUEUE_SIZE: usize = 5;
const DEMO_ILLEGAL_INSN_TRAP: u64 = 0xFF000000u64;
#[hdl]
struct FetchPipeQueueEntry {
start_pc: UInt<64>,
cycles_left: UInt<8>,
fetch_block_id: UInt<{ FETCH_BLOCK_ID_WIDTH }>,
}
impl FetchPipeQueueEntry {
#[hdl]
fn default_sim(self) -> SimValue<Self> {
#[hdl(sim)]
FetchPipeQueueEntry {
start_pc: 0u64,
cycles_left: 0u8,
fetch_block_id: 0u8,
}
}
fn get_next_delay(delay_sequence_index: &Cell<u64>) -> u8 {
let index = delay_sequence_index.get();
delay_sequence_index.set(delay_sequence_index.get().wrapping_add(1));
// make a pseudo-random number deterministically based on index
let random = index
.wrapping_add(1)
.wrapping_mul(0x18C49126EABE7A0D) // random prime
.rotate_left(32)
.wrapping_mul(0x92B38C197608A6B) // random prime
.rotate_right(60);
(random % 8) as u8
}
}
#[hdl_module(extern)]
fn mock_fetch_pipe(config: PhantomConst<CpuConfig>) {
#[hdl]
let cd: ClockDomain = m.input();
#[hdl]
let from_fetch: NextPcToFetchInterface<PhantomConst<CpuConfig>> =
m.input(NextPcToFetchInterface[config]);
#[hdl]
let to_post_decode: DecodeToPostDecodeInterface<PhantomConst<CpuConfig>> =
m.output(DecodeToPostDecodeInterface[config]);
#[hdl]
let queue_debug: ArrayVec<FetchPipeQueueEntry, ConstUsize<{ FETCH_PIPE_QUEUE_SIZE }>> =
m.output();
m.register_clock_for_past(cd.clk);
m.extern_module_simulation_fn(
(cd, from_fetch, to_post_decode, queue_debug),
|(cd, from_fetch, to_post_decode, queue_debug), mut sim| async move {
// intentionally have a different sequence each time we're reset
let delay_sequence_index = Cell::new(0);
sim.resettable(
cd,
async |mut sim| {
sim.write(from_fetch.fetch.ready, false).await;
sim.write(from_fetch.cancel.ready, false).await;
sim.write(
to_post_decode.inner.data,
to_post_decode.ty().inner.data.HdlNone(),
)
.await;
sim.write(
queue_debug,
queue_debug.ty().new_sim(FetchPipeQueueEntry.default_sim()),
)
.await;
},
|sim, ()| {
run_fn(
cd,
from_fetch,
to_post_decode,
queue_debug,
&delay_sequence_index,
sim,
)
},
)
.await;
},
);
#[hdl]
async fn run_fn(
cd: Expr<ClockDomain>,
from_fetch: Expr<NextPcToFetchInterface<PhantomConst<CpuConfig>>>,
to_post_decode: Expr<DecodeToPostDecodeInterface<PhantomConst<CpuConfig>>>,
queue_debug: Expr<ArrayVec<FetchPipeQueueEntry, ConstUsize<{ FETCH_PIPE_QUEUE_SIZE }>>>,
delay_sequence_index: &Cell<u64>,
mut sim: ExternModuleSimulationState,
) {
let config = from_fetch.config.ty();
let mock_insns = MockInsns::new();
let mut queue: VecDeque<SimValue<FetchPipeQueueEntry>> = VecDeque::new();
let mut next_id = 0u32;
loop {
let mut sim_queue = queue_debug.ty().new_sim(FetchPipeQueueEntry.default_sim());
for entry in &queue {
ArrayVec::try_push_sim(&mut sim_queue, entry)
.ok()
.expect("queue is known to be small enough");
}
sim.write(queue_debug, sim_queue).await;
if let Some(front) = queue.front().filter(|v| v.cycles_left.as_int() == 0) {
#[hdl(sim)]
let FetchPipeQueueEntry {
start_pc,
cycles_left: _,
fetch_block_id,
} = front;
let start_pc = start_pc.as_int();
let end_pc =
(start_pc + 1).next_multiple_of(config.get().fetch_width_in_bytes() as u64);
let insns = to_post_decode.ty().inner.data.HdlSome.insns;
let zeroed_insn = UInt[insns.element().canonical().bit_width()]
.zero()
.cast_bits_to(insns.element());
let mut insns = insns.new_sim(zeroed_insn);
let mut expected_pc = start_pc;
// TODO: handle instructions that go past the end of a fetch block
for (pc, insn) in mock_insns.fetch_block(start_pc..end_pc) {
let next_pc = pc + insn.byte_len();
if pc != expected_pc {
break;
}
expected_pc = next_pc;
let kind = match insn {
MockInsn::Nop4 => WipDecodedInsnKind.NonBranch(),
MockInsn::Jump { target } => WipDecodedInsnKind.Branch(target),
MockInsn::CondBranch { target } => WipDecodedInsnKind.BranchCond(target),
MockInsn::Call { target } => WipDecodedInsnKind.Call(target),
MockInsn::Ret => WipDecodedInsnKind.Ret(),
};
let insn = #[hdl(sim)]
WipDecodedInsn {
fetch_block_id,
id: next_id.cast_to_static::<UInt<_>>(),
pc,
size_in_bytes: insn.byte_len().cast_to_static::<UInt<_>>(),
kind,
};
match ArrayVec::try_push_sim(&mut insns, insn) {
Ok(()) => next_id = next_id.wrapping_add(1),
Err(_) => break,
}
}
if **ArrayVec::len_sim(&insns) == 0 {
let Ok(()) = ArrayVec::try_push_sim(
&mut insns,
#[hdl(sim)]
WipDecodedInsn {
fetch_block_id,
id: next_id.cast_to_static::<UInt<_>>(),
pc: start_pc,
size_in_bytes: 0u8.cast_to_static::<UInt<_>>(),
kind: WipDecodedInsnKind.Interrupt(DEMO_ILLEGAL_INSN_TRAP),
},
) else {
unreachable!();
};
next_id = next_id.wrapping_add(1);
}
sim.write(
to_post_decode.inner.data,
HdlSome(
#[hdl(sim)]
DecodeToPostDecodeInterfaceInner::<_> { insns, config },
),
)
.await;
} else {
sim.write(
to_post_decode.inner.data,
to_post_decode.ty().inner.data.HdlNone(),
)
.await;
}
sim.write(from_fetch.fetch.ready, queue.len() < FETCH_PIPE_QUEUE_SIZE)
.await;
sim.write(from_fetch.cancel.ready, true).await;
sim.wait_for_clock_edge(cd.clk).await;
if sim.read_past_bool(to_post_decode.inner.ready, cd.clk).await {
#[hdl(sim)]
if let HdlSome(_) = sim.read_past(to_post_decode.inner.data, cd.clk).await {
queue.pop_front();
}
}
for entry in &mut queue {
if entry.cycles_left.as_int() > 0 {
entry.cycles_left = (entry.cycles_left.as_int() - 1u8).to_sim_value();
}
}
// handle cancels before pushing new fetch op
#[hdl(sim)]
if let HdlSome(in_progress_fetches_to_cancel) =
sim.read_past(from_fetch.cancel.data, cd.clk).await
{
// cancel in-progress fetches from newest to oldest
for _ in 0..*in_progress_fetches_to_cancel {
let _ = queue.pop_back();
}
}
if !sim.read_past_bool(from_fetch.fetch.ready, cd.clk).await {
continue;
}
// handle pushing new fetch op after handling cancels
#[hdl(sim)]
if let HdlSome(inner) = sim.read_past(from_fetch.fetch.data, cd.clk).await {
#[hdl(sim)]
let NextPcToFetchInterfaceInner {
start_pc,
fetch_block_id,
} = &inner;
queue.push_back(
#[hdl(sim)]
FetchPipeQueueEntry {
start_pc,
cycles_left: FetchPipeQueueEntry::get_next_delay(delay_sequence_index),
fetch_block_id,
},
);
}
}
}
}
#[hdl_module]
fn dut(config: PhantomConst<CpuConfig>) {
#[hdl]
let cd: ClockDomain = m.input();
#[hdl]
let next_pc = instance(next_pc(config));
connect(next_pc.cd, cd);
#[hdl]
let mock_fetch_pipe = instance(mock_fetch_pipe(config));
connect(mock_fetch_pipe.cd, cd);
connect(mock_fetch_pipe.from_fetch, next_pc.to_fetch);
connect(next_pc.from_decode, mock_fetch_pipe.to_post_decode);
}
#[hdl]
#[test]
fn test_next_pc() {
let _n = SourceLocation::normalize_files_for_tests();
let mut config = CpuConfig::new(
vec![
UnitConfig::new(UnitKind::AluBranch),
UnitConfig::new(UnitKind::AluBranch),
],
NonZeroUsize::new(20).unwrap(),
);
config.fetch_width = NonZeroUsize::new(2).unwrap();
let m = dut(PhantomConst::new_sized(config));
let mut sim = Simulation::new(m);
let mut writer = RcWriter::default();
sim.add_trace_writer(VcdWriterDecls::new(writer.clone()));
sim.write_clock(sim.io().cd.clk, false);
sim.write_reset(sim.io().cd.rst, true);
for _cycle in 0..300 {
sim.advance_time(SimDuration::from_nanos(500));
sim.write_clock(sim.io().cd.clk, true);
sim.advance_time(SimDuration::from_nanos(500));
sim.write_clock(sim.io().cd.clk, false);
sim.write_reset(sim.io().cd.rst, false);
}
// FIXME: vcd is just whatever next_pc does now, which isn't known to be correct
let vcd = String::from_utf8(writer.take()).unwrap();
println!("####### VCD:\n{vcd}\n#######");
if vcd != include_str!("expected/next_pc.vcd") {
panic!();
}
}