cpu/crates/cpu/tests/next_pc.rs
Jacob Lifshay d42f010cda
Some checks failed
/ test (pull_request) Failing after 14s
WIP fixing bugs
2025-12-15 02:48:40 -08:00

913 lines
31 KiB
Rust

// SPDX-License-Identifier: LGPL-3.0-or-later
// See Notices.txt for copyright information
use cpu::{
config::{CpuConfig, UnitConfig},
next_pc::{
CallStackOp, DecodeToPostDecodeInterface, DecodeToPostDecodeInterfaceInner,
FETCH_BLOCK_ID_WIDTH, NextPcToFetchInterface, NextPcToFetchInterfaceInner,
PostDecodeOutputInterface, RetireToNextPcInterface, RetireToNextPcInterfaceInner,
RetireToNextPcInterfacePerInsn, WipDecodedInsn, WipDecodedInsnKind, next_pc,
},
unit::UnitKind,
util::array_vec::ArrayVec,
};
use fayalite::{
prelude::*,
sim::vcd::VcdWriterDecls,
util::{DebugAsDisplay, RcWriter},
};
use std::{
cell::Cell,
collections::{BTreeMap, BTreeSet, VecDeque},
num::NonZeroUsize,
u64,
};
#[derive(Copy, Clone, Debug)]
enum MockInsn {
Nop4,
Jump { target: u64 },
CondBranch { target: u64 },
Call { target: u64 },
Ret,
}
impl MockInsn {
fn byte_len(self) -> u64 {
match self {
MockInsn::Nop4 => 4,
MockInsn::Jump { .. } => 4,
MockInsn::CondBranch { .. } => 4,
MockInsn::Call { .. } => 4,
MockInsn::Ret => 4,
}
}
const INSNS: &'static [(u64, Self)] = &[
(0x0, MockInsn::Nop4),
(0x4, MockInsn::Nop4),
(0x8, MockInsn::CondBranch { target: 0x4 }),
(0xC, MockInsn::Call { target: 0x18 }),
(0x10, MockInsn::Jump { target: 0x14 }),
(0x14, MockInsn::Jump { target: 0x10 }),
(0x18, MockInsn::Jump { target: 0x1C }),
(0x1C, MockInsn::Ret),
];
const RETIRE_SEQ_INIT: &'static [RetireSeqEntry] = &[
RetireSeqEntry {
pc: 0x0,
cond_br_taken: None,
kind: MockInsn::Nop4,
},
RetireSeqEntry {
pc: 0x4,
cond_br_taken: None,
kind: MockInsn::Nop4,
},
RetireSeqEntry {
pc: 0x8,
cond_br_taken: Some(true),
kind: MockInsn::CondBranch { target: 0x4 },
},
RetireSeqEntry {
pc: 0x4,
cond_br_taken: None,
kind: MockInsn::Nop4,
},
RetireSeqEntry {
pc: 0x8,
cond_br_taken: Some(true),
kind: MockInsn::CondBranch { target: 0x4 },
},
RetireSeqEntry {
pc: 0x4,
cond_br_taken: None,
kind: MockInsn::Nop4,
},
RetireSeqEntry {
pc: 0x8,
cond_br_taken: Some(true),
kind: MockInsn::CondBranch { target: 0x4 },
},
RetireSeqEntry {
pc: 0x4,
cond_br_taken: None,
kind: MockInsn::Nop4,
},
RetireSeqEntry {
pc: 0x8,
cond_br_taken: Some(false),
kind: MockInsn::CondBranch { target: 0x4 },
},
RetireSeqEntry {
pc: 0xC,
cond_br_taken: None,
kind: MockInsn::Call { target: 0x18 },
},
RetireSeqEntry {
pc: 0x18,
cond_br_taken: None,
kind: MockInsn::Jump { target: 0x1C },
},
RetireSeqEntry {
pc: 0x1C,
cond_br_taken: None,
kind: MockInsn::Ret,
},
];
const RETIRE_SEQ_CYCLE: &'static [RetireSeqEntry] = &[
RetireSeqEntry {
pc: 0x10,
cond_br_taken: None,
kind: MockInsn::Jump { target: 0x14 },
},
RetireSeqEntry {
pc: 0x14,
cond_br_taken: None,
kind: MockInsn::Jump { target: 0x10 },
},
];
}
#[derive(Copy, Clone, Debug)]
struct RetireSeqEntry {
pc: u64,
cond_br_taken: Option<bool>,
kind: MockInsn,
}
#[derive(Clone)]
struct RetireSeq {
iter: std::iter::Chain<
std::slice::Iter<'static, RetireSeqEntry>,
std::iter::Cycle<std::slice::Iter<'static, RetireSeqEntry>>,
>,
}
impl RetireSeq {
fn new() -> Self {
Self {
iter: MockInsn::RETIRE_SEQ_INIT
.iter()
.chain(MockInsn::RETIRE_SEQ_CYCLE.iter().cycle()),
}
}
}
impl Iterator for RetireSeq {
type Item = RetireSeqEntry;
fn next(&mut self) -> Option<Self::Item> {
self.iter.next().copied()
}
}
#[derive(Debug)]
struct MockInsns {
insns: BTreeMap<u64, MockInsn>,
}
impl MockInsns {
fn new() -> Self {
Self {
insns: BTreeMap::from_iter(MockInsn::INSNS.iter().copied()),
}
}
fn fetch_block(&self, pc_range: std::ops::Range<u64>) -> impl Iterator<Item = (u64, MockInsn)> {
self.insns
.range(pc_range.clone())
.filter_map(move |(&pc, &insn)| {
if pc_range.end >= pc + insn.byte_len() {
Some((pc, insn))
} else {
None
}
})
}
}
const FETCH_PIPE_QUEUE_SIZE: usize = 5;
const DEMO_ILLEGAL_INSN_TRAP: u64 = 0xFF000000u64;
#[hdl]
struct FetchPipeQueueEntry {
start_pc: UInt<64>,
cycles_left: UInt<8>,
fetch_block_id: UInt<{ FETCH_BLOCK_ID_WIDTH }>,
}
impl FetchPipeQueueEntry {
#[hdl]
fn default_sim(self) -> SimValue<Self> {
#[hdl(sim)]
FetchPipeQueueEntry {
start_pc: 0u64,
cycles_left: 0u8,
fetch_block_id: 0u8,
}
}
fn get_next_delay(delay_sequence_index: &Cell<u64>) -> u8 {
let index = delay_sequence_index.get();
delay_sequence_index.set(delay_sequence_index.get().wrapping_add(1));
// make a pseudo-random number deterministically based on index
let random = index
.wrapping_add(1)
.wrapping_mul(0x18C49126EABE7A0D) // random prime
.rotate_left(32)
.wrapping_mul(0x92B38C197608A6B) // random prime
.rotate_right(60);
(random % 8) as u8
}
}
#[hdl_module(extern)]
fn mock_fetch_pipe(config: PhantomConst<CpuConfig>) {
#[hdl]
let cd: ClockDomain = m.input();
#[hdl]
let from_fetch: NextPcToFetchInterface<PhantomConst<CpuConfig>> =
m.input(NextPcToFetchInterface[config]);
#[hdl]
let to_post_decode: DecodeToPostDecodeInterface<PhantomConst<CpuConfig>> =
m.output(DecodeToPostDecodeInterface[config]);
#[hdl]
let queue_debug: ArrayVec<FetchPipeQueueEntry, ConstUsize<{ FETCH_PIPE_QUEUE_SIZE }>> =
m.output();
m.register_clock_for_past(cd.clk);
m.extern_module_simulation_fn(
(cd, from_fetch, to_post_decode, queue_debug),
|(cd, from_fetch, to_post_decode, queue_debug), mut sim| async move {
// intentionally have a different sequence each time we're reset
let delay_sequence_index = Cell::new(0);
sim.resettable(
cd,
async |mut sim| {
sim.write(from_fetch.fetch.ready, false).await;
sim.write(from_fetch.cancel.ready, false).await;
sim.write(
to_post_decode.inner.data,
to_post_decode.ty().inner.data.HdlNone(),
)
.await;
sim.write(
queue_debug,
queue_debug.ty().new_sim(FetchPipeQueueEntry.default_sim()),
)
.await;
},
|sim, ()| {
run_fn(
cd,
from_fetch,
to_post_decode,
queue_debug,
&delay_sequence_index,
sim,
)
},
)
.await;
},
);
#[hdl]
async fn run_fn(
cd: Expr<ClockDomain>,
from_fetch: Expr<NextPcToFetchInterface<PhantomConst<CpuConfig>>>,
to_post_decode: Expr<DecodeToPostDecodeInterface<PhantomConst<CpuConfig>>>,
queue_debug: Expr<ArrayVec<FetchPipeQueueEntry, ConstUsize<{ FETCH_PIPE_QUEUE_SIZE }>>>,
delay_sequence_index: &Cell<u64>,
mut sim: ExternModuleSimulationState,
) {
let config = from_fetch.config.ty();
let mock_insns = MockInsns::new();
let mut queue: VecDeque<SimValue<FetchPipeQueueEntry>> = VecDeque::new();
let mut next_id = 0u32;
loop {
let mut sim_queue = queue_debug.ty().new_sim(FetchPipeQueueEntry.default_sim());
for entry in &queue {
ArrayVec::try_push_sim(&mut sim_queue, entry)
.ok()
.expect("queue is known to be small enough");
}
sim.write(queue_debug, sim_queue).await;
if let Some(front) = queue.front().filter(|v| v.cycles_left.as_int() == 0) {
#[hdl(sim)]
let FetchPipeQueueEntry {
start_pc,
cycles_left: _,
fetch_block_id,
} = front;
let start_pc = start_pc.as_int();
let end_pc =
(start_pc + 1).next_multiple_of(config.get().fetch_width_in_bytes() as u64);
let insns = to_post_decode.ty().inner.data.HdlSome.insns;
let zeroed_insn = UInt[insns.element().canonical().bit_width()]
.zero()
.cast_bits_to(insns.element());
let mut insns = insns.new_sim(zeroed_insn);
let mut expected_pc = start_pc;
// TODO: handle instructions that go past the end of a fetch block
for (pc, insn) in mock_insns.fetch_block(start_pc..end_pc) {
let next_pc = pc + insn.byte_len();
if pc != expected_pc {
break;
}
expected_pc = next_pc;
let kind = match insn {
MockInsn::Nop4 => WipDecodedInsnKind.NonBranch(),
MockInsn::Jump { target } => WipDecodedInsnKind.Branch(target),
MockInsn::CondBranch { target } => WipDecodedInsnKind.BranchCond(target),
MockInsn::Call { target } => WipDecodedInsnKind.Call(target),
MockInsn::Ret => WipDecodedInsnKind.Ret(),
};
let insn = #[hdl(sim)]
WipDecodedInsn {
fetch_block_id,
id: next_id.cast_to_static::<UInt<_>>(),
pc,
size_in_bytes: insn.byte_len().cast_to_static::<UInt<_>>(),
kind,
};
match ArrayVec::try_push_sim(&mut insns, insn) {
Ok(()) => next_id = next_id.wrapping_add(1),
Err(_) => break,
}
}
if **ArrayVec::len_sim(&insns) == 0 {
let Ok(()) = ArrayVec::try_push_sim(
&mut insns,
#[hdl(sim)]
WipDecodedInsn {
fetch_block_id,
id: next_id.cast_to_static::<UInt<_>>(),
pc: start_pc,
size_in_bytes: 0u8.cast_to_static::<UInt<_>>(),
kind: WipDecodedInsnKind.Interrupt(DEMO_ILLEGAL_INSN_TRAP),
},
) else {
unreachable!();
};
next_id = next_id.wrapping_add(1);
}
sim.write(
to_post_decode.inner.data,
HdlSome(
#[hdl(sim)]
DecodeToPostDecodeInterfaceInner::<_> { insns, config },
),
)
.await;
} else {
sim.write(
to_post_decode.inner.data,
to_post_decode.ty().inner.data.HdlNone(),
)
.await;
}
sim.write(from_fetch.fetch.ready, queue.len() < FETCH_PIPE_QUEUE_SIZE)
.await;
sim.write(from_fetch.cancel.ready, true).await;
sim.wait_for_clock_edge(cd.clk).await;
if sim.read_past_bool(to_post_decode.inner.ready, cd.clk).await {
#[hdl(sim)]
if let HdlSome(_) = sim.read_past(to_post_decode.inner.data, cd.clk).await {
queue.pop_front();
}
}
for entry in &mut queue {
if entry.cycles_left.as_int() > 0 {
entry.cycles_left = (entry.cycles_left.as_int() - 1u8).to_sim_value();
}
}
// handle cancels before pushing new fetch op
#[hdl(sim)]
if let HdlSome(in_progress_fetches_to_cancel) =
sim.read_past(from_fetch.cancel.data, cd.clk).await
{
// cancel in-progress fetches from newest to oldest
for _ in 0..*in_progress_fetches_to_cancel {
let _ = queue.pop_back();
}
}
if !sim.read_past_bool(from_fetch.fetch.ready, cd.clk).await {
continue;
}
// handle pushing new fetch op after handling cancels
#[hdl(sim)]
if let HdlSome(inner) = sim.read_past(from_fetch.fetch.data, cd.clk).await {
#[hdl(sim)]
let NextPcToFetchInterfaceInner {
start_pc,
fetch_block_id,
} = &inner;
queue.push_back(
#[hdl(sim)]
FetchPipeQueueEntry {
start_pc,
cycles_left: FetchPipeQueueEntry::get_next_delay(delay_sequence_index),
fetch_block_id,
},
);
}
}
}
}
const EXECUTE_RETIRE_PIPE_QUEUE_SIZE: usize = 15;
#[hdl]
struct ExecuteRetirePipeQueueEntry {
insn: WipDecodedInsn,
cycles_left: UInt<8>,
}
impl ExecuteRetirePipeQueueEntry {
#[hdl]
fn default_sim(self) -> SimValue<Self> {
#[hdl(sim)]
Self {
insn: #[hdl(sim)]
WipDecodedInsn {
fetch_block_id: 0u8,
id: 0_hdl_u12,
pc: 0xEEEE_EEEE_EEEE_EEEEu64,
size_in_bytes: 0_hdl_u4,
kind: WipDecodedInsnKind.NonBranch(),
},
cycles_left: 0u8,
}
}
fn get_next_delay(delay_sequence_index: &Cell<u64>) -> u8 {
let index = delay_sequence_index.get();
delay_sequence_index.set(delay_sequence_index.get().wrapping_add(1));
// make a pseudo-random number deterministically based on index
let random = index
.wrapping_add(1)
.wrapping_mul(0x39FF446D8BFB75BB) // random prime
.rotate_left(32)
.wrapping_mul(0x73161B54984B1C21) // random prime
.rotate_right(60);
(random % 16) as u8
}
}
/// an arbitrary value
const END_PC: u64 = u64::from_be_bytes(*b"EndInsns");
#[derive(Clone)]
struct MockExecuteState {
queue: VecDeque<SimValue<ExecuteRetirePipeQueueEntry>>,
used_ids: BTreeSet<SimValue<UInt<12>>>,
retire_seq: RetireSeq,
canceling: bool,
config: PhantomConst<CpuConfig>,
}
impl MockExecuteState {
fn new(config: PhantomConst<CpuConfig>) -> Self {
Self {
queue: VecDeque::new(),
used_ids: BTreeSet::new(),
retire_seq: RetireSeq::new(),
canceling: false,
config,
}
}
fn on_clock_cycle(&mut self) {
for entry in &mut self.queue {
if entry.cycles_left.as_int() > 0 {
entry.cycles_left = (entry.cycles_left.as_int() - 1u8).to_sim_value();
}
}
}
#[hdl]
fn do_retire(
&mut self,
entry: SimValue<ExecuteRetirePipeQueueEntry>,
passive: bool,
) -> Result<SimValue<RetireToNextPcInterfacePerInsn<PhantomConst<CpuConfig>>>, String> {
#[hdl(sim)]
let ExecuteRetirePipeQueueEntry {
insn,
cycles_left: _,
} = entry;
self.used_ids.remove(&insn.id);
let RetireSeqEntry {
pc,
cond_br_taken,
kind,
} = self
.retire_seq
.next()
.ok_or_else(|| "expected no more instructions to retire")?;
let next_pc = self
.retire_seq
.clone()
.next()
.map(|v| v.pc)
.unwrap_or(END_PC);
let (expected_kind, call_stack_op) = match kind {
MockInsn::Nop4 => (
#[hdl(sim)]
WipDecodedInsnKind::NonBranch(),
#[hdl(sim)]
CallStackOp::None(),
),
MockInsn::Jump { target } => (
#[hdl(sim)]
WipDecodedInsnKind::Branch(target),
#[hdl(sim)]
CallStackOp::None(),
),
MockInsn::CondBranch { target } => (
#[hdl(sim)]
WipDecodedInsnKind::BranchCond(target),
#[hdl(sim)]
CallStackOp::None(),
),
MockInsn::Call { target } => (
#[hdl(sim)]
WipDecodedInsnKind::Call(target),
#[hdl(sim)]
CallStackOp::Push(pc.wrapping_add(kind.byte_len())),
),
MockInsn::Ret => (
#[hdl(sim)]
WipDecodedInsnKind::Ret(),
#[hdl(sim)]
CallStackOp::Pop(),
),
};
let expected_insn = #[hdl(sim)]
WipDecodedInsn {
fetch_block_id: &insn.fetch_block_id,
id: &insn.id,
pc,
size_in_bytes: kind.byte_len().cast_to_static::<UInt<4>>(),
kind: expected_kind,
};
if *expected_insn.cmp_ne(&insn) {
return Err(format!(
"insn doesn't match expected:\ninsn: {insn:?}\nexpected insn: {expected_insn:?}"
));
}
if let Some(next_insn) = self.queue.front() {
if next_pc != next_insn.insn.pc.as_int() {
self.canceling = true;
if !passive {
println!(
"MockExecuteState: starting canceling {} instruction(s): next_pc={next_pc:#x}, mis-predicted next_pc={next_insn_pc}",
self.queue.len(),
next_insn_pc = next_insn.insn.pc
);
}
}
}
Ok(
#[hdl(sim)]
RetireToNextPcInterfacePerInsn::<_> {
id: &insn.id,
next_pc,
call_stack_op,
cond_br_taken: if let Some(cond_br_taken) = cond_br_taken {
#[hdl(sim)]
HdlSome(cond_br_taken)
} else {
#[hdl(sim)]
HdlNone()
},
config: self.config,
},
)
}
#[hdl]
fn try_retire(
&mut self,
passive: bool,
) -> Option<(
SimValue<RetireToNextPcInterfacePerInsn<PhantomConst<CpuConfig>>>,
Result<(), String>,
)> {
if self.canceling {
return None;
}
if self.queue.front()?.cycles_left.as_int() != 0 {
return None;
}
let entry = self.queue.pop_front()?;
let id = entry.insn.id.clone();
Some(match self.do_retire(entry, passive) {
Ok(v) => (v, Ok(())),
Err(e) => (
#[hdl(sim)]
RetireToNextPcInterfacePerInsn::<_> {
id,
next_pc: u64::from_be_bytes(*b"ErrError"),
call_stack_op: #[hdl(sim)]
CallStackOp::None(),
cond_br_taken: #[hdl(sim)]
HdlNone(),
config: self.config,
},
Err(e),
),
})
}
fn space_available(&self) -> usize {
EXECUTE_RETIRE_PIPE_QUEUE_SIZE.saturating_sub(self.queue.len())
}
#[hdl]
fn start(&mut self, insn: &SimValue<WipDecodedInsn>, delay_sequence_index: &Cell<u64>) {
if !self.used_ids.insert(insn.id.clone()) {
panic!("next_pc gave a duplicate insn id: {insn:?}");
}
self.queue.push_back(
#[hdl(sim)]
ExecuteRetirePipeQueueEntry {
insn,
cycles_left: ExecuteRetirePipeQueueEntry::get_next_delay(delay_sequence_index),
},
);
}
#[hdl]
fn finish_cancel(&mut self) {
println!(
"MockExecuteState: finishing canceling {} instruction(s)",
self.queue.len(),
);
self.queue.clear();
self.used_ids.clear();
self.canceling = false;
}
}
#[hdl_module(extern)]
fn mock_execute_retire_pipe(config: PhantomConst<CpuConfig>) {
#[hdl]
let cd: ClockDomain = m.input();
#[hdl]
let from_post_decode: PostDecodeOutputInterface<PhantomConst<CpuConfig>> =
m.input(PostDecodeOutputInterface[config]);
#[hdl]
let retire_output: RetireToNextPcInterface<PhantomConst<CpuConfig>> =
m.output(RetireToNextPcInterface[config]);
#[hdl]
let queue_debug: ArrayVec<
ExecuteRetirePipeQueueEntry,
ConstUsize<{ EXECUTE_RETIRE_PIPE_QUEUE_SIZE }>,
> = m.output();
m.register_clock_for_past(cd.clk);
m.extern_module_simulation_fn(
(cd, from_post_decode, retire_output, queue_debug),
|(cd, from_post_decode, retire_output, queue_debug), mut sim| async move {
// intentionally have a different sequence each time we're reset
let delay_sequence_index = Cell::new(0);
sim.resettable(
cd,
async |mut sim| {
sim.write(from_post_decode.ready, 0usize).await;
sim.write(
retire_output.inner.data,
retire_output.ty().inner.data.HdlNone(),
)
.await;
sim.write(
retire_output.next_insn_ids,
retire_output.next_insn_ids.ty().HdlNone(),
)
.await;
sim.write(
queue_debug,
queue_debug
.ty()
.new_sim(ExecuteRetirePipeQueueEntry.default_sim()),
)
.await;
},
|sim, ()| {
run_fn(
cd,
from_post_decode,
retire_output,
queue_debug,
&delay_sequence_index,
sim,
)
},
)
.await;
},
);
#[hdl]
async fn run_fn(
cd: Expr<ClockDomain>,
from_post_decode: Expr<PostDecodeOutputInterface<PhantomConst<CpuConfig>>>,
retire_output: Expr<RetireToNextPcInterface<PhantomConst<CpuConfig>>>,
queue_debug: Expr<
ArrayVec<ExecuteRetirePipeQueueEntry, ConstUsize<{ EXECUTE_RETIRE_PIPE_QUEUE_SIZE }>>,
>,
delay_sequence_index: &Cell<u64>,
mut sim: ExternModuleSimulationState,
) {
let config = from_post_decode.config.ty();
let mut state = MockExecuteState::new(config);
let empty_retire_insn = #[hdl(sim)]
RetireToNextPcInterfacePerInsn::<_> {
id: 0_hdl_u12,
next_pc: 0u64,
call_stack_op: #[hdl(sim)]
CallStackOp::None(),
cond_br_taken: #[hdl(sim)]
HdlNone(),
config,
};
let retire_vec_ty = retire_output.inner.data.ty().HdlSome.insns;
loop {
state.on_clock_cycle();
let mut sim_queue = queue_debug
.ty()
.new_sim(ExecuteRetirePipeQueueEntry.default_sim());
let mut next_insn_ids = retire_output.next_insn_ids.ty().HdlSome.new_sim(0_hdl_u12);
for entry in &state.queue {
ArrayVec::try_push_sim(&mut sim_queue, entry)
.ok()
.expect("queue is known to be small enough");
let _ = ArrayVec::try_push_sim(&mut next_insn_ids, &entry.insn.id);
}
sim.write(queue_debug, sim_queue).await;
sim.write(
retire_output.next_insn_ids,
if state.canceling {
#[hdl(sim)]
(retire_output.next_insn_ids.ty()).HdlNone()
} else {
#[hdl(sim)]
(retire_output.next_insn_ids.ty()).HdlSome(next_insn_ids)
},
)
.await;
let mut retiring = retire_vec_ty.new_sim(&empty_retire_insn);
let mut peek_state = state.clone();
while let Some((peek_retire, result)) = peek_state.try_retire(true) {
if result.is_err() && **ArrayVec::len_sim(&retiring) > 0 {
break;
}
let Ok(_) = ArrayVec::try_push_sim(&mut retiring, peek_retire) else {
break;
};
}
sim.write(
retire_output.inner.data,
if **ArrayVec::len_sim(&retiring) > 0 {
#[hdl(sim)]
(retire_output.inner.data.ty()).HdlSome(
#[hdl(sim)]
RetireToNextPcInterfaceInner::<_> {
insns: retiring,
config,
},
)
} else {
#[hdl(sim)]
(retire_output.inner.data.ty()).HdlNone()
},
)
.await;
sim.write(
from_post_decode.ready,
if state.canceling {
0
} else {
state.space_available().min(config.get().fetch_width.get())
},
)
.await;
sim.wait_for_clock_edge(cd.clk).await;
println!(
"Dump mock execute retire pipe queue: {:#?}",
Vec::from_iter(state.queue.iter().map(|v| {
DebugAsDisplay(format!(
"fid={:#x} id={} pc={:#x}",
v.insn.fetch_block_id.as_int(),
v.insn.id,
v.insn.pc.as_int(),
))
}))
);
if state.canceling {
state.finish_cancel();
}
if sim.read_past_bool(retire_output.inner.ready, cd.clk).await {
for _ in 0..**ArrayVec::len_sim(&retiring) {
match state.try_retire(false) {
Some((_, Ok(_))) => {}
Some((_, Err(e))) => panic!("retire error: {e}"),
None => unreachable!(),
}
}
}
let mut new_insns = sim.read_past(from_post_decode.insns, cd.clk).await;
ArrayVec::truncate_sim(
&mut new_insns,
*sim.read_past(from_post_decode.ready, cd.clk).await,
);
for insn in dbg!(ArrayVec::elements_sim_ref(&new_insns)) {
state.start(insn, delay_sequence_index);
}
}
}
}
#[hdl_module]
fn dut(config: PhantomConst<CpuConfig>) {
#[hdl]
let cd: ClockDomain = m.input();
#[hdl]
let next_pc = instance(next_pc(config));
#[hdl]
let next_pc {
cd: next_pc_cd,
to_fetch: next_pc_to_fetch,
from_decode: next_pc_from_decode,
post_decode_output: next_pc_post_decode_output,
from_retire: next_pc_from_retire,
state_for_debug: _,
} = next_pc;
connect(next_pc_cd, cd);
#[hdl]
let mock_fetch_pipe = instance(mock_fetch_pipe(config));
#[hdl]
let mock_fetch_pipe {
cd: mock_fetch_pipe_cd,
from_fetch: mock_fetch_pipe_from_fetch,
to_post_decode: mock_fetch_pipe_to_post_decode,
queue_debug: _,
} = mock_fetch_pipe;
connect(mock_fetch_pipe_cd, cd);
connect(mock_fetch_pipe_from_fetch, next_pc_to_fetch);
connect(next_pc_from_decode, mock_fetch_pipe_to_post_decode);
#[hdl]
let mock_execute_retire_pipe = instance(mock_execute_retire_pipe(config));
#[hdl]
let mock_execute_retire_pipe {
cd: mock_execute_retire_pipe_cd,
from_post_decode: mock_execute_retire_pipe_from_post_decode,
retire_output: mock_execute_retire_pipe_retire_output,
queue_debug: _,
} = mock_execute_retire_pipe;
connect(mock_execute_retire_pipe_cd, cd);
connect(next_pc_from_retire, mock_execute_retire_pipe_retire_output);
connect(
mock_execute_retire_pipe_from_post_decode,
next_pc_post_decode_output,
);
}
#[hdl]
#[test]
fn test_next_pc() {
let _n = SourceLocation::normalize_files_for_tests();
let mut config = CpuConfig::new(
vec![
UnitConfig::new(UnitKind::AluBranch),
UnitConfig::new(UnitKind::AluBranch),
],
NonZeroUsize::new(20).unwrap(),
);
config.fetch_width = NonZeroUsize::new(2).unwrap();
let m = dut(PhantomConst::new_sized(config));
let mut sim = Simulation::new(m);
let writer = RcWriter::default();
sim.add_trace_writer(VcdWriterDecls::new(writer.clone()));
struct DumpVcdOnDrop {
writer: Option<RcWriter>,
}
impl Drop for DumpVcdOnDrop {
fn drop(&mut self) {
if let Some(mut writer) = self.writer.take() {
let vcd = String::from_utf8(writer.take()).unwrap();
println!("####### VCD:\n{vcd}\n#######");
}
}
}
let mut writer = DumpVcdOnDrop {
writer: Some(writer),
};
sim.write_clock(sim.io().cd.clk, false);
sim.write_reset(sim.io().cd.rst, true);
for _cycle in 0..500 {
sim.advance_time(SimDuration::from_nanos(500));
println!("clock tick");
sim.write_clock(sim.io().cd.clk, true);
sim.advance_time(SimDuration::from_nanos(500));
sim.write_clock(sim.io().cd.clk, false);
sim.write_reset(sim.io().cd.rst, false);
}
// FIXME: vcd is just whatever next_pc does now, which isn't known to be correct
let vcd = String::from_utf8(writer.writer.take().unwrap().take()).unwrap();
println!("####### VCD:\n{vcd}\n#######");
if vcd != include_str!("expected/next_pc.vcd") {
panic!();
}
}