cpu/crates/cpu/src/rename_execute_retire.rs
2026-04-24 18:13:27 -07:00

1866 lines
66 KiB
Rust

// SPDX-License-Identifier: LGPL-3.0-or-later
// See Notices.txt for copyright information
use crate::{
config::{
CpuConfig, CpuConfig2PowOutRegNumWidth, CpuConfigFetchWidth, CpuConfigPRegNumWidth,
CpuConfigRobSize, CpuConfigUnitCount, PhantomConstCpuConfig, TwiceCpuConfigFetchWidth,
},
instruction::{
COMMON_MOP_SRC_LEN, L2RegNum, L2RegisterFileMOp, MOp, MOpDebug, MOpDestReg, MOpRegNum,
MOpTrait, PRegNum, ReadL2RegMOp, UnitNum, UnitOutRegNum,
},
next_pc::{CallStackOp, SimValueDefault},
register::PRegValue,
rename_execute_retire::to_unit_interfaces::ExecuteToUnitInterfaces,
unit::{UnitKind, UnitMOp},
util::array_vec::ArrayVec,
};
use fayalite::{
int::UIntInRangeInclusiveType,
prelude::*,
ty::{OpaqueSimValue, StaticType},
util::ready_valid::ReadyValid,
};
use std::{collections::VecDeque, fmt, mem, num::NonZero};
pub mod to_unit_interfaces;
pub const MOP_ID_WIDTH: usize = 16;
#[hdl]
pub type MOpId = UInt<{ MOP_ID_WIDTH }>;
#[hdl]
/// A &micro;Op along with the state needed for this instance of the &micro;Op.
pub struct MOpInstance<MOp> {
pub fetch_block_id: UInt<8>,
pub id: MOpId,
pub pc: UInt<64>,
/// initialized to 0 by decoder, overwritten by `next_pc()`
pub predicted_next_pc: UInt<64>,
pub size_in_bytes: UInt<4>,
/// `true` if this &micro;Op is the first &micro;Op in the ISA-level instruction.
/// In general, a single &micro;Op can't be canceled by itself,
/// it needs to be canceled along with all other &micro;Ops that
/// come from the same ISA-level instruction.
pub is_first_mop_in_insn: Bool,
pub mop: MOp,
}
#[hdl(no_static)]
/// TODO: merge with [`crate::next_pc::PostDecodeOutputInterface`]
pub struct PostDecodeOutputInterface<C: PhantomConstGet<CpuConfig>> {
pub insns: ArrayVec<MOpInstance<MOp>, CpuConfigFetchWidth<C>>,
#[hdl(flip)]
pub ready: UIntInRangeInclusiveType<ConstUsize<0>, CpuConfigFetchWidth<C>>,
/// tells the rename/execute/retire circuit to cancel all non-retired instructions
pub cancel: ReadyValid<()>,
pub config: C,
}
#[hdl(no_static)]
pub struct NextPcPredictorOp<C: PhantomConstGet<CpuConfig>> {
pub call_stack_op: CallStackOp,
/// should be `HdlSome(taken)` for any conditional control-flow instruction
/// with an immediate target that can be predicted as taken/not-taken (branch/call/return).
pub cond_br_taken: HdlOption<Bool>,
pub config: C,
}
#[hdl(no_static)]
/// TODO: merge with [`crate::next_pc::RetireToNextPcInterfaceInner`]
pub enum RetireToNextPcInterfaceInner<C: PhantomConstGet<CpuConfig>> {
CancelAndStartAt(UInt<64>),
RetiredInstructions(ArrayVec<NextPcPredictorOp<C>, CpuConfigFetchWidth<C>>),
}
#[hdl(no_static)]
/// handles updating speculative branch predictor state (e.g. branch histories)
/// when instructions retire, as well as updating state when a
/// branch instruction is mis-speculated.
pub struct RetireToNextPcInterface<C: PhantomConstGet<CpuConfig>> {
pub inner: ReadyValid<RetireToNextPcInterfaceInner<C>>,
/// only for debugging
pub next_insns: HdlOption<ArrayVec<MOpInstance<MOp>, CpuConfigRobSize<C>>>,
}
#[hdl]
pub type RenamedMOp<C: PhantomConstGet<CpuConfig>> =
crate::instruction::RenamedMOp<PRegNum<C>, CpuConfigPRegNumWidth<C>>;
#[hdl]
pub type RenamedSrcRegUInt<C: PhantomConstGet<CpuConfig>> = UIntType<CpuConfigPRegNumWidth<C>>;
#[hdl(no_static)]
pub struct UnitStart<C: PhantomConstGet<CpuConfig>> {
pub mop: MOpInstance<RenamedMOp<C>>,
pub src_values: Array<PRegValue, { COMMON_MOP_SRC_LEN }>,
pub config: C,
}
#[hdl(no_static)]
pub struct UnitFinishedSuccessfully<C: PhantomConstGet<CpuConfig>> {
pub dest: UnitOutRegNum<C>,
pub dest_value: PRegValue,
pub predictor_op: NextPcPredictorOp<C>,
}
#[hdl(no_static)]
pub struct UnitCausedCancel<C: PhantomConstGet<CpuConfig>> {
pub id: MOpId,
pub start_at_pc: UInt<64>,
pub config: C,
}
#[hdl(no_static)]
pub struct UnitFinished<C: PhantomConstGet<CpuConfig>> {
pub id: MOpId,
pub finished_successfully: HdlOption<UnitFinishedSuccessfully<C>>,
pub caused_cancel: HdlOption<UnitCausedCancel<C>>,
pub config: C,
}
#[hdl(no_static)]
pub struct UnitMOpIsNoLongerSpeculative<C: PhantomConstGet<CpuConfig>> {
pub id: MOpId,
pub config: C,
}
#[hdl(no_static)]
pub struct UnitMOpCantCauseCancel<C: PhantomConstGet<CpuConfig>> {
pub id: MOpId,
pub config: C,
}
#[hdl(no_static)]
pub struct ExecuteToUnitInterface<C: PhantomConstGet<CpuConfig>> {
pub start: ReadyValid<UnitStart<C>>,
pub cancel_all: ReadyValid<()>,
pub is_no_longer_speculative: ReadyValid<UnitMOpIsNoLongerSpeculative<C>>,
#[hdl(flip)]
pub cant_cause_cancel: ReadyValid<UnitMOpCantCauseCancel<C>>,
#[hdl(flip)]
pub finished: ReadyValid<UnitFinished<C>>,
pub config: C,
}
fn zeroed<T: Type>(ty: T) -> SimValue<T> {
SimValue::from_opaque(
ty,
OpaqueSimValue::from_bits(UInt::new(ty.canonical().bit_width()).zero()),
)
}
impl<C: PhantomConstCpuConfig> SimValueDefault for RenameExecuteRetireDebugState<C> {
#[hdl]
fn sim_value_default(self) -> SimValue<Self> {
let Self {
rename_delayed,
rename_table,
retire_rename_table,
rob,
next_pc_canceling,
unit_canceling,
l1_reg_file,
per_insn_timeline,
} = self;
let empty_string = SimOnlyValue::new(String::new());
#[hdl(sim)]
Self {
rename_delayed: zeroed(rename_delayed),
rename_table: zeroed(rename_table),
retire_rename_table: zeroed(retire_rename_table),
rob: zeroed(rob),
next_pc_canceling: zeroed(next_pc_canceling),
unit_canceling: zeroed(unit_canceling),
l1_reg_file: zeroed(l1_reg_file),
per_insn_timeline: SimValue::from_array_elements(
per_insn_timeline,
(0..per_insn_timeline.len()).map(|_| empty_string.clone()),
),
}
}
}
#[hdl(no_static)]
enum RenameTableEntry<C: PhantomConstGet<CpuConfig>> {
L1(PRegNum<C>),
L2(L2RegNum),
}
impl<C: PhantomConstCpuConfig> RenameTableEntry<C> {
#[hdl]
fn const_zero(self) -> SimValue<Self> {
#[hdl(sim)]
self.L1(self.L1.const_zero())
}
#[hdl]
fn debug_sim(this: &SimValue<Self>) -> impl fmt::Debug {
fmt::from_fn(move |f| {
#[hdl(sim)]
match this {
Self::L1(v) => write!(f, "L1({:?})", PRegNum::debug_sim(v)),
Self::L2(v) => write!(f, "L2({:?})", L2RegNum::debug_sim(v)),
}
})
}
}
/// make arrays dynamically-sized to avoid putting large types on the stack
#[hdl(get(|c| 1 << MOpRegNum::WIDTH))]
type MOpRegCount<C: PhantomConstGet<CpuConfig>> = DynSize;
#[hdl(no_static)]
struct RenameTableDebugState<C: PhantomConstGet<CpuConfig>> {
entries: ArrayType<RenameTableEntry<C>, MOpRegCount<C>>,
config: C,
}
#[derive(Debug)]
struct RenameTable<C: PhantomConstCpuConfig> {
entries: Box<[SimValue<RenameTableEntry<C>>; 1 << MOpRegNum::WIDTH]>,
config: C,
}
impl<C: PhantomConstCpuConfig> Clone for RenameTable<C> {
fn clone(&self) -> Self {
Self {
entries: self.entries.clone(),
config: self.config.clone(),
}
}
fn clone_from(&mut self, source: &Self) {
let Self { entries, config } = self;
entries.clone_from(&source.entries);
*config = source.config;
}
}
#[derive(Debug, Clone)]
enum RenameTableUpdate<C: PhantomConstCpuConfig> {
Write {
unrenamed_reg_num: u32,
new: SimValue<RenameTableEntry<C>>,
},
UpdateForReadL2Reg {
dest: SimValue<PRegNum<C>>,
src: SimValue<L2RegNum>,
},
UpdateForWriteL2Reg {
dest: SimValue<L2RegNum>,
src: SimValue<PRegNum<C>>,
},
}
impl<C: PhantomConstCpuConfig> RenameTable<C> {
fn new(config: C) -> Self {
let entries: Box<[SimValue<RenameTableEntry<C>>; 1 << MOpRegNum::WIDTH]> =
vec![RenameTableEntry[config].const_zero(); 1 << MOpRegNum::WIDTH]
.try_into()
.expect("size is known to match");
Self { entries, config }
}
#[hdl]
fn to_debug_state(&self) -> SimValue<RenameTableDebugState<C>> {
let Self { entries, config } = self;
let ty = RenameTableDebugState[*config];
#[hdl(sim)]
RenameTableDebugState::<_> {
entries: entries.to_sim_value_with_type(ty.entries),
config,
}
}
#[hdl]
fn update(&mut self, update: &RenameTableUpdate<C>, rename_table_name: &str) {
match update {
RenameTableUpdate::Write {
unrenamed_reg_num,
new,
} => {
if *unrenamed_reg_num == MOpRegNum::CONST_ZERO_REG_NUM {
// writing to const zero reg does nothing
return;
}
println!(
"{rename_table_name}: Write: {unrenamed_reg_num:#x} <- {:?}",
RenameTableEntry::debug_sim(new),
);
self.entries[*unrenamed_reg_num as usize] = new.clone();
}
RenameTableUpdate::UpdateForReadL2Reg { dest, src } => {
let new = #[hdl(sim)]
(RenameTableEntry[self.config]).L1(dest);
for (unrenamed_reg_num, entry) in self.entries.iter_mut().enumerate() {
#[hdl(sim)]
match &entry {
RenameTableEntry::<_>::L1(_) => {}
RenameTableEntry::<_>::L2(l2) => {
if L2RegNum::value_sim(l2) == L2RegNum::value_sim(src) {
println!(
"{rename_table_name}: UpdateForReadL2Reg: {unrenamed_reg_num:#x} updating from {:?} to {:?}",
RenameTableEntry::debug_sim(&entry),
RenameTableEntry::debug_sim(&new),
);
*entry = new.clone();
}
}
}
}
}
RenameTableUpdate::UpdateForWriteL2Reg { dest, src } => {
let new = #[hdl(sim)]
(RenameTableEntry[self.config]).L2(dest);
for (unrenamed_reg_num, entry) in self.entries.iter_mut().enumerate() {
#[hdl(sim)]
match &entry {
RenameTableEntry::<_>::L1(l1) => {
if l1 == src {
println!(
"{rename_table_name}: UpdateForWriteL2Reg: {unrenamed_reg_num:#x} updating from {:?} to {:?}",
RenameTableEntry::debug_sim(&entry),
RenameTableEntry::debug_sim(&new),
);
*entry = new.clone();
}
}
RenameTableEntry::<_>::L2(_) => {}
}
}
}
}
}
}
#[hdl(no_static)]
enum MOpExecutionProgressDebugState<C: PhantomConstGet<CpuConfig>> {
NotStarted,
Started,
Finished(NextPcPredictorOp<C>),
Canceled,
}
#[hdl(no_static)]
struct RobEntryDebugState<C: PhantomConstGet<CpuConfig>> {
mop: MOpInstance<RenamedMOp<C>>,
is_speculative: Bool,
sent_is_no_longer_speculative: Bool,
can_cause_cancel: Bool,
progress: MOpExecutionProgressDebugState<C>,
caused_cancel: HdlOption<UnitCausedCancel<C>>,
}
impl<C: PhantomConstCpuConfig> SimValueDefault for RobEntryDebugState<C> {
fn sim_value_default(self) -> SimValue<Self> {
zeroed(self)
}
}
#[derive(Clone, Debug)]
enum MOpExecutionProgress<C: Type + PhantomConstGet<CpuConfig>> {
NotStarted,
Started,
Finished(SimValue<NextPcPredictorOp<C>>),
Canceled,
}
impl<C: Type + PhantomConstGet<CpuConfig>> MOpExecutionProgress<C> {
#[hdl]
fn debug_state(&self, config: C) -> SimValue<MOpExecutionProgressDebugState<C>> {
let ret_ty = MOpExecutionProgressDebugState[config];
match self {
Self::NotStarted =>
{
#[hdl(sim)]
ret_ty.NotStarted()
}
Self::Started =>
{
#[hdl(sim)]
ret_ty.Started()
}
Self::Finished(v) =>
{
#[hdl(sim)]
ret_ty.Finished(v)
}
Self::Canceled =>
{
#[hdl(sim)]
ret_ty.Canceled()
}
}
}
}
#[derive(Debug)]
struct RobEntry<C: PhantomConstCpuConfig> {
mop: SimValue<MOpInstance<RenamedMOp<C>>>,
is_speculative: bool,
sent_is_no_longer_speculative: bool,
can_cause_cancel: bool,
progress: MOpExecutionProgress<C>,
caused_cancel: Option<SimValue<UnitCausedCancel<C>>>,
}
impl<C: PhantomConstCpuConfig> RobEntry<C> {
fn new(mop: SimValue<MOpInstance<RenamedMOp<C>>>) -> Self {
Self {
mop,
is_speculative: true,
sent_is_no_longer_speculative: false,
can_cause_cancel: true,
progress: MOpExecutionProgress::NotStarted,
caused_cancel: None,
}
}
fn dest_reg(&self) -> &SimValue<PRegNum<C>> {
MOpTrait::dest_reg_sim_ref(&self.mop.mop)
}
fn unit_num(&self) -> &SimValue<UnitNum<C>> {
&self.dest_reg().unit_num
}
fn unit_index(&self) -> usize {
UnitNum::index_sim(&self.unit_num()).expect("known to have unit_index")
}
fn unit_out_reg(&self) -> &SimValue<UnitOutRegNum<C>> {
&self.dest_reg().unit_out_reg
}
fn unit_out_reg_index(&self) -> usize {
UnitOutRegNum::value_sim(&self.unit_out_reg())
}
}
#[hdl]
struct RobEntriesDebugState {
unrenamed: MOpInstance<MOp>,
/// number of renamed &micro;Ops that this unrenamed &micro;Op corresponds to
renamed_entries_len: UInt<8>,
}
#[derive(Debug)]
struct RobEntries<C: PhantomConstCpuConfig> {
unrenamed: SimValue<MOpInstance<MOp>>,
rename_table_updates: Vec<RenameTableUpdate<C>>,
renamed_entries: VecDeque<RobEntry<C>>,
}
impl<C: PhantomConstCpuConfig> RobEntries<C> {
#[hdl]
fn debug_state(&self) -> SimValue<RobEntriesDebugState> {
let Self {
unrenamed,
rename_table_updates: _,
renamed_entries,
} = self;
#[hdl(sim)]
RobEntriesDebugState {
unrenamed,
renamed_entries_len: u8::try_from(renamed_entries.len())
.expect("renamed_entries.len() should fit in u8"),
}
}
}
#[hdl(no_static)]
struct NeedSendCancelDebugState<C: PhantomConstGet<CpuConfig>> {
send_to_next_pc: HdlOption<UInt<64>>,
send_to_units: ArrayType<Bool, CpuConfigUnitCount<C>>,
config: C,
}
#[hdl]
enum NextPcCancelingDebugState {
NeedSendCancel(UInt<64>),
NeedReceiveCancel,
}
#[derive(Clone, PartialEq, Eq, Debug)]
enum NextPcCancelingState {
NeedSendCancel(u64),
NeedReceiveCancel,
}
impl NextPcCancelingState {
#[hdl]
fn debug_state(this: &Option<Self>) -> SimValue<HdlOption<NextPcCancelingDebugState>> {
match this {
Some(Self::NeedSendCancel(pc)) =>
{
#[hdl(sim)]
HdlSome(
#[hdl(sim)]
NextPcCancelingDebugState.NeedSendCancel(pc),
)
}
Some(Self::NeedReceiveCancel) =>
{
#[hdl(sim)]
HdlSome(
#[hdl(sim)]
NextPcCancelingDebugState.NeedReceiveCancel(),
)
}
None =>
{
#[hdl(sim)]
HdlNone()
}
}
}
}
#[hdl(no_static)]
pub struct ReorderBufferDebugState<C: PhantomConstGet<CpuConfig>> {
next_renamed_mop_id: MOpId,
entries: ArrayVec<RobEntriesDebugState, CpuConfigRobSize<C>>,
incomplete_back_entry: HdlOption<RobEntriesDebugState>,
renamed: ArrayVec<RobEntryDebugState<C>, CpuConfigRobSize<C>>,
config: C,
}
#[derive(Debug)]
struct ReorderBuffer<C: PhantomConstCpuConfig> {
next_renamed_mop_id: SimValue<MOpId>,
entries: VecDeque<RobEntries<C>>,
incomplete_back_entry: Option<RobEntries<C>>,
config: C,
}
impl<C: PhantomConstCpuConfig> ReorderBuffer<C> {
fn new(config: C) -> Self {
Self {
next_renamed_mop_id: MOpId.zero().into_sim_value(),
entries: VecDeque::new(),
incomplete_back_entry: None,
config,
}
}
#[hdl]
fn debug_state(&self) -> SimValue<ReorderBufferDebugState<C>> {
let Self {
next_renamed_mop_id,
entries,
incomplete_back_entry,
config,
} = self;
let ty = ReorderBufferDebugState[*config];
#[hdl(sim)]
ReorderBufferDebugState::<_> {
next_renamed_mop_id,
entries: ty
.entries
.from_iter_sim(
zeroed(StaticType::TYPE),
entries.iter().map(RobEntries::debug_state),
)
.expect("known to fit"),
incomplete_back_entry: if let Some(incomplete_back_entry) = incomplete_back_entry {
#[hdl(sim)]
HdlSome(incomplete_back_entry.debug_state())
} else {
#[hdl(sim)]
HdlNone()
},
renamed: ty
.renamed
.from_iter_sim(
zeroed(ty.renamed.element()),
self.renamed().map(|entry| {
let RobEntry {
mop,
is_speculative,
sent_is_no_longer_speculative,
can_cause_cancel,
progress,
caused_cancel,
} = entry;
let caused_cancel_ty = HdlOption[UnitCausedCancel[self.config]];
#[hdl(sim)]
RobEntryDebugState::<_> {
mop,
is_speculative,
sent_is_no_longer_speculative,
can_cause_cancel,
progress: progress.debug_state(self.config),
caused_cancel: if let Some(caused_cancel) = caused_cancel {
#[hdl(sim)]
caused_cancel_ty.HdlSome(caused_cancel)
} else {
#[hdl(sim)]
caused_cancel_ty.HdlNone()
},
}
}),
)
.ok()
.expect("known to fit"),
config,
}
}
fn unrenamed_len(&self) -> usize {
self.entries.len()
}
fn unrenamed(&self) -> impl DoubleEndedIterator<Item = &SimValue<MOpInstance<MOp>>> + Clone {
self.entries.iter().map(|v| &v.unrenamed)
}
fn unrenamed_mut(
&mut self,
) -> impl DoubleEndedIterator<Item = &mut SimValue<MOpInstance<MOp>>> {
self.entries.iter_mut().map(|v| &mut v.unrenamed)
}
fn renamed_len(&self) -> usize {
let Self {
next_renamed_mop_id: _,
entries,
incomplete_back_entry,
config: _,
} = self;
entries
.iter()
.chain(incomplete_back_entry)
.map(|entries| entries.renamed_entries.len())
.sum()
}
fn renamed(&self) -> impl DoubleEndedIterator<Item = &RobEntry<C>> + Clone {
let Self {
next_renamed_mop_id: _,
entries,
incomplete_back_entry,
config: _,
} = self;
entries
.iter()
.chain(incomplete_back_entry)
.flat_map(|entries| &entries.renamed_entries)
}
fn renamed_mut(&mut self) -> impl DoubleEndedIterator<Item = &mut RobEntry<C>> {
let Self {
next_renamed_mop_id: _,
entries,
incomplete_back_entry,
config: _,
} = self;
entries
.iter_mut()
.chain(incomplete_back_entry)
.flat_map(|entries| &mut entries.renamed_entries)
}
fn try_renamed_by_id(&self, id: &SimValue<MOpId>) -> Option<&RobEntry<C>> {
self.renamed().find(|v| v.mop.id == *id)
}
fn try_renamed_by_id_mut(&mut self, id: &SimValue<MOpId>) -> Option<&mut RobEntry<C>> {
self.renamed_mut().find(|v| v.mop.id == *id)
}
#[track_caller]
fn renamed_by_id(&self, id: &SimValue<MOpId>) -> &RobEntry<C> {
match self.try_renamed_by_id(id) {
Some(v) => v,
None => panic!("MOpId not found: {id:?}"),
}
}
fn renamed_by_id_mut(&mut self, id: &SimValue<MOpId>) -> &mut RobEntry<C> {
match self.try_renamed_by_id_mut(id) {
Some(v) => v,
None => panic!("MOpId not found: {id:?}"),
}
}
fn renamed_push_back_with_new_id(
&mut self,
unrenamed: &SimValue<MOpInstance<MOp>>,
mut renamed: RobEntry<C>,
) {
let replacement_id = self
.next_renamed_mop_id
.as_int()
.wrapping_add(1)
.into_sim_value();
renamed.mop.id = mem::replace(&mut self.next_renamed_mop_id, replacement_id);
self.incomplete_back_entry
.get_or_insert_with(|| RobEntries {
unrenamed: unrenamed.clone(),
rename_table_updates: Vec::new(),
renamed_entries: VecDeque::new(),
})
.renamed_entries
.push_back(renamed);
}
fn finished_unrenamed_push_back(&mut self, unrenamed: &SimValue<MOpInstance<MOp>>) {
let entry = self
.incomplete_back_entry
.take()
.unwrap_or_else(|| RobEntries {
unrenamed: unrenamed.clone(),
rename_table_updates: Vec::new(),
renamed_entries: VecDeque::new(),
});
self.entries.push_back(entry);
}
fn clear(&mut self) {
let Self {
next_renamed_mop_id: _,
entries,
incomplete_back_entry,
config: _,
} = self;
entries.clear();
*incomplete_back_entry = None;
}
fn unrenamed_back_append_rename_table_update(
&mut self,
unrenamed: &SimValue<MOpInstance<MOp>>,
update: RenameTableUpdate<C>,
) {
self.incomplete_back_entry
.get_or_insert_with(|| RobEntries {
unrenamed: unrenamed.clone(),
rename_table_updates: Vec::new(),
renamed_entries: VecDeque::new(),
})
.rename_table_updates
.push(update);
}
}
type SimOnlyString = SimOnly<String>;
#[expect(non_upper_case_globals)]
const SimOnlyString: SimOnlyString = SimOnlyString::TYPE;
#[hdl(get(|c| c.rob_size.get().next_power_of_two()))]
type PerInsnTimelineLen<C: PhantomConstGet<CpuConfig>> = DynSize;
#[hdl(no_static)]
pub struct RenameExecuteRetireDebugState<C: PhantomConstGet<CpuConfig>> {
rename_delayed: ArrayVec<MOpInstance<MOp>, TwiceCpuConfigFetchWidth<C>>,
rename_table: RenameTableDebugState<C>,
retire_rename_table: RenameTableDebugState<C>,
rob: ReorderBufferDebugState<C>,
next_pc_canceling: HdlOption<NextPcCancelingDebugState>,
unit_canceling: ArrayType<Bool, CpuConfigUnitCount<C>>,
l1_reg_file: ArrayType<
ArrayType<HdlOption<PRegValue>, CpuConfig2PowOutRegNumWidth<C>>,
CpuConfigUnitCount<C>,
>,
per_insn_timeline: ArrayType<SimOnlyString, PerInsnTimelineLen<C>>,
}
#[derive(Debug)]
struct RenameExecuteRetireState<C: PhantomConstCpuConfig> {
rename_delayed: VecDeque<SimValue<MOpInstance<MOp>>>,
rename_table: RenameTable<C>,
retire_rename_table: RenameTable<C>,
rob: ReorderBuffer<C>,
next_pc_canceling: Option<NextPcCancelingState>,
unit_canceling: Box<[bool]>,
l1_reg_file: Box<[Box<[Option<SimValue<PRegValue>>]>]>,
l2_reg_file_unit_index: usize,
config: C,
}
impl<C: PhantomConstCpuConfig> RenameExecuteRetireState<C> {
fn new(config: C) -> Self {
Self {
rename_delayed: VecDeque::with_capacity(TwiceCpuConfigFetchWidth[config]),
rename_table: RenameTable::new(config),
retire_rename_table: RenameTable::new(config),
rob: ReorderBuffer::new(config),
next_pc_canceling: None,
unit_canceling: vec![false; CpuConfigUnitCount[config]].into_boxed_slice(),
l1_reg_file: vec![
vec![None; CpuConfig2PowOutRegNumWidth[config]].into_boxed_slice();
CpuConfigUnitCount[config]
]
.into_boxed_slice(),
l2_reg_file_unit_index: config
.get()
.units
.iter()
.position(|unit| unit.kind == UnitKind::TransformedMove)
.expect("Unit for L2 register file is missing"),
config,
}
}
fn is_canceling(&self) -> bool {
self.next_pc_canceling.is_some() || self.unit_canceling.iter().any(|v| *v)
}
fn per_insn_timeline(&self) -> SimValue<ArrayType<SimOnlyString, PerInsnTimelineLen<C>>> {
let len = PerInsnTimelineLen[self.config];
let retval_ty = ArrayType[SimOnlyString][len];
assert!(len.is_power_of_two());
let mask = len - 1;
let empty_string = SimOnlyValue::new(String::new());
let mut retval =
SimValue::from_array_elements(retval_ty, (0..len).map(|_| empty_string.clone()));
for rob in self.rob.renamed() {
let masked_id = rob.mop.id.as_int() as usize & mask;
**retval[masked_id] = fmt::from_fn(|f| {
if rob.is_speculative {
f.write_str("(S)")?;
}
match rob.progress {
MOpExecutionProgress::NotStarted => f.write_str("NotStarted")?,
MOpExecutionProgress::Started => f.write_str("Started")?,
MOpExecutionProgress::Finished(_) => f.write_str("Finished")?,
MOpExecutionProgress::Canceled => f.write_str("Canceled")?,
}
if rob.caused_cancel.is_some() {
f.write_str("(caused cancel)")?;
}
write!(
f,
": {:#x}{}: ",
rob.mop.pc.as_int(),
if *rob.mop.is_first_mop_in_insn {
""
} else {
".."
},
)?;
MOpDebug::mop_debug(&rob.mop.mop, f)
})
.to_string();
// TODO
}
retval
}
#[hdl]
async fn write_for_debug(
&self,
sim: &mut ExternModuleSimulationState,
state_for_debug: Expr<RenameExecuteRetireDebugState<C>>,
) {
let Self {
ref rename_delayed,
ref rename_table,
ref retire_rename_table,
ref rob,
ref next_pc_canceling,
ref unit_canceling,
ref l1_reg_file,
l2_reg_file_unit_index: _,
config: _,
} = *self;
sim.write(
state_for_debug,
#[hdl(sim)]
RenameExecuteRetireDebugState::<_> {
rename_delayed: state_for_debug
.ty()
.rename_delayed
.from_iter_sim(zeroed(StaticType::TYPE), rename_delayed)
.expect("known to fit"),
rename_table: rename_table.to_debug_state(),
retire_rename_table: retire_rename_table.to_debug_state(),
rob: rob.debug_state(),
next_pc_canceling: NextPcCancelingState::debug_state(next_pc_canceling),
unit_canceling,
l1_reg_file: SimValue::from_array_elements(
state_for_debug.ty().l1_reg_file,
l1_reg_file.iter().map(|v| {
SimValue::from_array_elements(
state_for_debug.ty().l1_reg_file.element(),
v.iter().map(|v| {
if let Some(v) = v {
#[hdl(sim)]
HdlSome(v)
} else {
#[hdl(sim)]
HdlNone()
}
}),
)
}),
),
per_insn_timeline: self.per_insn_timeline(),
},
)
.await;
}
#[hdl]
async fn write_to_next_pc_next_insns(
&self,
sim: &mut ExternModuleSimulationState,
next_insns: Expr<HdlOption<ArrayVec<MOpInstance<MOp>, CpuConfigRobSize<C>>>>,
) {
sim.write(
next_insns,
if self.is_canceling() {
#[hdl(sim)]
(next_insns.ty()).HdlNone()
} else {
#[hdl(sim)]
(next_insns.ty()).HdlSome(
next_insns
.ty()
.HdlSome
.from_iter_sim(
zeroed(MOpInstance[MOp]),
self.rename_delayed.iter().chain(self.rob.unrenamed()),
)
.ok()
.expect("known to fit"),
)
},
)
.await;
}
fn space_available_for_unit(&self, unit_index: usize) -> usize {
let mut retval = self.config.get().unit_max_in_flight(unit_index);
for renamed in self.rob.renamed() {
if renamed.unit_index() == unit_index {
let Some(v) = NonZero::new(retval.get() - 1) else {
return 0;
};
retval = v;
}
}
retval.get()
}
#[hdl]
fn find_free_unit_out_reg(&self, unit_index: usize) -> Option<usize> {
// TODO: replace searching through instructions and rename tables with tracking when regs are free
let mut allocated_regs = vec![false; 1 << self.config.get().out_reg_num_width];
for renamed in self.rob.renamed() {
if renamed.unit_index() == unit_index {
allocated_regs[renamed.unit_out_reg_index()] = true;
}
MOpTrait::for_each_src_reg_sim_ref(&renamed.mop.mop, &mut |src_reg, _index| {
#[hdl(sim)]
let PRegNum::<_> {
unit_num,
unit_out_reg,
} = src_reg.cast_bits_to(PRegNum[self.config]);
if Some(unit_index) == UnitNum::index_sim(&unit_num) {
allocated_regs[UnitOutRegNum::value_sim(&unit_out_reg)] = true;
}
});
}
for entry in self
.rename_table
.entries
.iter()
.chain(self.retire_rename_table.entries.iter())
{
#[hdl(sim)]
match entry {
RenameTableEntry::<_>::L1(entry) => {
if Some(unit_index) == UnitNum::index_sim(&entry.unit_num) {
allocated_regs[UnitOutRegNum::value_sim(&entry.unit_out_reg)] = true;
}
}
RenameTableEntry::<_>::L2(_) => {}
}
}
allocated_regs.iter().position(|v| !v)
}
#[hdl]
fn find_free_l2_reg(&self) -> Option<usize> {
// TODO: replace searching through instructions and rename tables with tracking when regs are free
let mut allocated_regs = vec![false; L2RegNum.l2_reg_count()];
for renamed in self.rob.renamed() {
#[hdl(sim)]
if let RenamedMOp::<_>::TransformedMove(l2_register_file_op) = &renamed.mop.mop {
let l2_reg = #[hdl(sim)]
match l2_register_file_op {
L2RegisterFileMOp::<_, _>::ReadL2Reg(v) => &v.common.imm,
L2RegisterFileMOp::<_, _>::WriteL2Reg(v) => &v.common.imm,
};
allocated_regs[L2RegNum::value_sim(l2_reg)] = true;
}
}
for entry in self
.rename_table
.entries
.iter()
.chain(self.retire_rename_table.entries.iter())
{
#[hdl(sim)]
match entry {
RenameTableEntry::<_>::L1(_) => {}
RenameTableEntry::<_>::L2(entry) => {
allocated_regs[L2RegNum::value_sim(entry)] = true;
}
}
}
allocated_regs.iter().position(|v| !v)
}
fn add_renamed_with_new_id(
&mut self,
unrenamed: &SimValue<MOpInstance<MOp>>,
renamed: RobEntry<C>,
) {
self.l1_reg_file[renamed.unit_index()][renamed.unit_out_reg_index()] = None;
self.rob.renamed_push_back_with_new_id(unrenamed, renamed);
}
fn update_rename_table(
&mut self,
unrenamed: &SimValue<MOpInstance<MOp>>,
update: RenameTableUpdate<C>,
) {
self.rename_table.update(&update, "rename_table");
self.rob
.unrenamed_back_append_rename_table_update(unrenamed, update);
}
#[hdl]
fn try_rename(
&mut self,
insn: SimValue<MOpInstance<MOp>>,
) -> Result<(), SimValue<MOpInstance<MOp>>> {
if self.rob.unrenamed_len() >= self.config.get().rob_size.get() {
return Err(insn);
}
if self.rob.renamed_len() >= self.config.get().rob_size.get() {
return Err(insn);
}
let unit_kind = UnitMOp::kind_sim(&insn.mop);
#[hdl(sim)]
if let MOp::TransformedMove(move_reg_mop) = &insn.mop {
let mut src_regs = [MOpRegNum::CONST_ZERO_REG_NUM; 1];
MOpTrait::for_each_src_reg_sim_ref(move_reg_mop, &mut |src_reg, index| {
src_regs[index] = src_reg.as_int() as u32;
});
let [src_reg] = src_regs;
let renamed_reg = self.rename_table.entries[src_reg as usize].clone();
println!(
"moving from {src_reg:#x} renamed: {:?}",
RenameTableEntry::debug_sim(&renamed_reg),
);
let unrenamed_dest_regs =
MOpDestReg::regs_sim(MOpTrait::dest_reg_sim_ref(move_reg_mop));
assert!(self.rob.incomplete_back_entry.is_none());
for unrenamed_reg_num in unrenamed_dest_regs {
self.update_rename_table(
&insn,
RenameTableUpdate::Write {
unrenamed_reg_num,
new: renamed_reg.clone(),
},
);
}
self.rob.finished_unrenamed_push_back(&insn);
return Ok(());
}
#[derive(Clone, Copy)]
struct ChosenUnit {
unit_index: usize,
out_reg_num: Option<usize>,
space_available: usize,
}
impl ChosenUnit {
fn is_better_than(self, other: Self) -> bool {
let Self {
unit_index: _,
out_reg_num,
space_available,
} = self;
if out_reg_num.is_some() != other.out_reg_num.is_some() {
out_reg_num.is_some()
} else {
space_available > other.space_available
}
}
}
let mut chosen_unit = None;
for (unit_index, unit_config) in self.config.get().units.iter().enumerate() {
if unit_config.kind != unit_kind {
continue;
}
let cur_unit = ChosenUnit {
unit_index,
out_reg_num: self.find_free_unit_out_reg(unit_index),
space_available: self.space_available_for_unit(unit_index),
};
let chosen_unit = chosen_unit.get_or_insert(cur_unit);
if cur_unit.is_better_than(*chosen_unit) {
*chosen_unit = cur_unit;
}
}
let Some(ChosenUnit {
unit_index,
out_reg_num,
space_available,
}) = chosen_unit
else {
panic!(
"there are no units of kind: {unit_kind:?}:\n{:?}",
self.config,
);
};
if space_available == 0 {
return Err(insn);
}
let Some(out_reg_num) = out_reg_num else {
return if self.space_available_for_unit(self.l2_reg_file_unit_index) > 0
&& let Some(l2_reg_index) = self.find_free_l2_reg()
{
todo!("maybe start a L2 register file store");
} else {
Err(insn)
};
};
let out_reg_num_sim = UnitOutRegNum[self.config].new_sim(out_reg_num);
#[hdl(sim)]
let MOpInstance::<_> {
fetch_block_id,
id: _,
pc,
predicted_next_pc,
size_in_bytes,
is_first_mop_in_insn,
mop,
} = &insn;
let mut needed_load = None;
let unrenamed_dest_regs = MOpDestReg::regs_sim(MOpTrait::dest_reg_sim_ref(mop));
let renamed_dest_reg = #[hdl(sim)]
PRegNum::<_> {
unit_num: UnitNum[self.config].from_index_sim(unit_index),
unit_out_reg: out_reg_num_sim,
};
let mop = MOpTrait::map_regs_sim(
mop,
&renamed_dest_reg,
CpuConfigPRegNumWidth[self.config],
&mut |src_reg, index| {
let renamed = &self.rename_table.entries[src_reg.as_int() as usize];
println!(
"renaming src[{index}] from {src_reg:?} to {:?}",
RenameTableEntry::debug_sim(renamed),
);
#[hdl(sim)]
match renamed {
RenameTableEntry::<_>::L1(v) => v.cast_to_bits(),
RenameTableEntry::<_>::L2(v) => {
needed_load.get_or_insert_with(|| v.clone());
PRegNum[self.config]
.const_zero()
.cast_to_bits()
.into_sim_value()
}
}
},
);
if let Some(needed_load) = needed_load {
return if let Some(out_reg) = self.find_free_unit_out_reg(self.l2_reg_file_unit_index)
&& self.space_available_for_unit(self.l2_reg_file_unit_index) > 0
{
let dest = #[hdl(sim)]
PRegNum::<_> {
unit_num: UnitNum[self.config].from_index_sim(self.l2_reg_file_unit_index),
unit_out_reg: UnitOutRegNum[self.config].new_sim(out_reg),
};
self.update_rename_table(
&insn,
RenameTableUpdate::UpdateForReadL2Reg {
dest: dest.clone(),
src: needed_load.clone(),
},
);
self.add_renamed_with_new_id(
&insn,
RobEntry::new(
#[hdl(sim)]
MOpInstance::<_> {
fetch_block_id,
id: MOpId.zero(), // filled in by add_renamed_with_new_id
pc,
predicted_next_pc,
size_in_bytes,
is_first_mop_in_insn,
mop: ReadL2RegMOp::read_l2_reg::<RenamedMOp<C>>(
dest,
repeat(RenamedSrcRegUInt[self.config].zero(), ConstUsize),
needed_load,
),
},
),
);
Ok(())
} else {
Err(insn)
};
}
let mop = UnitMOp::with_transformed_move_op_sim(
mop,
RenamedMOp[self.config].TransformedMove,
|_move_reg| unreachable!(),
);
let renamed_dest_reg = #[hdl(sim)]
(RenameTableEntry[self.config]).L1(renamed_dest_reg);
for unrenamed_reg_num in unrenamed_dest_regs {
self.update_rename_table(
&insn,
RenameTableUpdate::Write {
unrenamed_reg_num,
new: renamed_dest_reg.clone(),
},
);
}
self.add_renamed_with_new_id(
&insn,
RobEntry::new(
#[hdl(sim)]
MOpInstance::<_> {
fetch_block_id,
id: MOpId.zero(), // filled in by add_renamed_with_new_id
pc,
predicted_next_pc,
size_in_bytes,
is_first_mop_in_insn,
mop,
},
),
);
self.rob.finished_unrenamed_push_back(&insn);
Ok(())
}
#[hdl]
fn get_unit_start(&self, unit_index: usize) -> SimValue<HdlOption<UnitStart<C>>> {
let ret_ty = HdlOption[UnitStart[self.config]];
if self.is_canceling() {
let retval = #[hdl(sim)]
ret_ty.HdlNone();
return retval; // separate variable to work around rust-analyzer parse error
}
let zero_reg = PRegNum[self.config].const_zero().into_sim_value();
let zero_value = zeroed(PRegValue);
for rob in self.rob.renamed() {
if let MOpExecutionProgress::NotStarted = rob.progress
&& rob.unit_index() == unit_index
{
let mut src_values: [_; COMMON_MOP_SRC_LEN] =
std::array::from_fn(|_| Some(zero_value.clone()));
MOpTrait::for_each_src_reg_sim_ref(&rob.mop.mop, &mut |src_reg, index| {
let src_reg = src_reg.cast_bits_to(zero_reg.ty());
#[hdl(sim)]
let PRegNum::<_> {
unit_num,
unit_out_reg,
} = &src_reg;
if let Some(src_unit_index) = UnitNum::index_sim(unit_num) {
src_values[index] = self.l1_reg_file[src_unit_index]
[UnitOutRegNum::value_sim(unit_out_reg)]
.clone();
} else {
assert_eq!(src_reg, zero_reg);
src_values[index] = Some(zeroed(PRegValue));
}
});
if src_values.iter().all(|v| v.is_some()) {
let src_values: [SimValue<_>; 3] = src_values.map(Option::unwrap);
let retval = #[hdl(sim)]
ret_ty.HdlSome(
#[hdl(sim)]
UnitStart::<_> {
mop: &rob.mop,
src_values,
config: self.config,
},
);
return retval;
}
}
}
#[hdl(sim)]
ret_ty.HdlNone()
}
#[hdl]
fn get_unit_mop_is_no_longer_speculative(
&self,
unit_index: usize,
) -> SimValue<HdlOption<UnitMOpIsNoLongerSpeculative<C>>> {
let ret_ty = HdlOption[UnitMOpIsNoLongerSpeculative[self.config]];
if self.is_canceling() {
let retval = #[hdl(sim)]
ret_ty.HdlNone();
return retval; // separate variable to work around rust-analyzer parse error
}
for rob in self.rob.renamed() {
if rob.unit_index() == unit_index
&& !rob.is_speculative
&& !rob.sent_is_no_longer_speculative
{
let retval = #[hdl(sim)]
ret_ty.HdlSome(
#[hdl(sim)]
UnitMOpIsNoLongerSpeculative::<_> {
id: &rob.mop.id,
config: self.config,
},
);
return retval; // separate variable to work around rust-analyzer parse error
}
}
#[hdl(sim)]
ret_ty.HdlNone()
}
#[hdl]
fn unit_finished(&mut self, finished: SimValue<UnitFinished<C>>) {
#[hdl(sim)]
let UnitFinished::<_> {
id,
finished_successfully,
caused_cancel: unit_caused_cancel,
config: _,
} = finished;
assert!(!self.is_canceling());
let rob = self.rob.renamed_by_id_mut(&id);
let unit_index = rob.unit_index();
let out_reg_index = rob.unit_out_reg_index();
let RobEntry {
mop: _,
is_speculative: _,
sent_is_no_longer_speculative: _,
can_cause_cancel,
progress,
caused_cancel,
} = rob;
assert!(matches!(progress, MOpExecutionProgress::Started));
assert!(caused_cancel.is_none());
#[hdl(sim)]
if let HdlSome(finished_successfully) = finished_successfully {
#[hdl(sim)]
let UnitFinishedSuccessfully::<_> {
dest: _,
dest_value,
predictor_op,
} = finished_successfully;
let l1_reg = &mut self.l1_reg_file[unit_index][out_reg_index];
assert!(l1_reg.is_none());
*l1_reg = Some(dest_value);
*progress = MOpExecutionProgress::Finished(predictor_op);
} else {
*progress = MOpExecutionProgress::Canceled;
}
#[hdl(sim)]
if let HdlSome(unit_caused_cancel) = unit_caused_cancel {
assert!(
*can_cause_cancel,
"MOp {id:?} said it won't cause a cancel, then caused a cancel: {unit_caused_cancel:#?}"
);
*caused_cancel = Some(unit_caused_cancel);
} else {
assert!(
matches!(progress, MOpExecutionProgress::Finished(_)),
"MOp {id:?} must finish successfully and/or cause a cancel: progress={progress:?}",
);
}
}
fn get_from_post_decode_ready(&self) -> usize {
if self.is_canceling() {
0
} else {
TwiceCpuConfigFetchWidth[self.config]
.saturating_sub(self.rename_delayed.len())
.min(CpuConfigFetchWidth[self.config])
.min(
CpuConfigRobSize[self.config]
.saturating_sub(self.rename_delayed.len())
.saturating_sub(self.rob.unrenamed_len()),
)
}
}
fn handle_from_post_decode(&mut self, insns: &[SimValue<MOpInstance<MOp>>]) {
if insns.is_empty() {
return;
}
assert!(!self.is_canceling());
for insn in insns {
self.rename_delayed.push_back(insn.clone());
}
for _ in 0..CpuConfigFetchWidth[self.config] {
let Some(insn) = self.rename_delayed.pop_front() else {
break;
};
match self.try_rename(insn) {
Ok(()) => {}
Err(insn) => {
self.rename_delayed.push_front(insn);
break;
}
}
}
}
#[hdl]
fn finish_receive_cancel_from_post_decode(&mut self) {
let Self {
rename_delayed,
rename_table,
retire_rename_table,
rob,
next_pc_canceling,
unit_canceling: _,
l1_reg_file,
l2_reg_file_unit_index: _,
config: _,
} = self;
assert_eq!(
*next_pc_canceling,
Some(NextPcCancelingState::NeedReceiveCancel)
);
rename_delayed.clear();
rename_table.clone_from(retire_rename_table);
rob.clear();
*next_pc_canceling = None;
}
#[hdl]
fn finish_send_cancel_to_next_pc(&mut self) {
assert!(matches!(
self.next_pc_canceling,
Some(NextPcCancelingState::NeedSendCancel(_))
));
self.next_pc_canceling = Some(NextPcCancelingState::NeedReceiveCancel);
}
#[hdl]
fn peek_retiring_insns(&self) -> Vec<SimValue<NextPcPredictorOp<C>>> {
let mut retval = Vec::new();
let mut prev_caused_cancel = false;
for RobEntries {
unrenamed,
rename_table_updates: _,
renamed_entries,
} in &self.rob.entries
{
if retval.len() >= self.config.get().fetch_width.get() || prev_caused_cancel {
return retval;
}
let mut unrenamed_op = #[hdl(sim)]
NextPcPredictorOp::<_> {
call_stack_op: #[hdl(sim)]
CallStackOp.None(),
cond_br_taken: #[hdl(sim)]
HdlNone(),
config: self.config,
};
for renamed in renamed_entries {
if prev_caused_cancel {
return retval;
}
if let RobEntry {
mop,
is_speculative,
sent_is_no_longer_speculative,
can_cause_cancel,
progress: MOpExecutionProgress::Finished(renamed_op),
caused_cancel,
} = renamed
{
prev_caused_cancel = caused_cancel.is_some();
#[hdl(sim)]
let NextPcPredictorOp::<_> {
call_stack_op,
cond_br_taken,
config: _,
} = renamed_op;
#[hdl(sim)]
if let CallStackOp::None = &unrenamed_op.call_stack_op {
unrenamed_op.call_stack_op = call_stack_op.clone();
}
#[hdl(sim)]
if let HdlNone = &unrenamed_op.cond_br_taken {
unrenamed_op.cond_br_taken = cond_br_taken.clone();
}
} else {
return retval;
}
}
retval.push(unrenamed_op);
}
retval
}
#[hdl]
fn retire_peek(&self) -> SimValue<HdlOption<RetireToNextPcInterfaceInner<C>>> {
let ty = RetireToNextPcInterfaceInner[self.config];
let ret_ty = HdlOption[ty];
let next_pc_predictor_op = NextPcPredictorOp[self.config];
if let Some(NextPcCancelingState::NeedSendCancel(v)) = &self.next_pc_canceling {
#[hdl(sim)]
ret_ty.HdlSome(
#[hdl(sim)]
ty.CancelAndStartAt(v),
)
} else if self.is_canceling() {
#[hdl(sim)]
ret_ty.HdlNone()
} else {
let retiring_insns = self.peek_retiring_insns();
if retiring_insns.is_empty() {
#[hdl(sim)]
ret_ty.HdlNone()
} else {
#[hdl(sim)]
ret_ty.HdlSome(
#[hdl(sim)]
ty.RetiredInstructions(
ty.RetiredInstructions
.from_iter_sim(zeroed(next_pc_predictor_op), retiring_insns)
.expect("known to fit"),
),
)
}
}
}
#[hdl]
fn retire_one(&mut self, retire: &SimValue<NextPcPredictorOp<C>>) {
assert!(!self.is_canceling());
#[hdl(sim)]
let NextPcPredictorOp::<_> {
call_stack_op: _,
cond_br_taken: _,
config: _,
} = retire;
let Some(RobEntries {
unrenamed: _,
rename_table_updates,
renamed_entries,
}) = self.rob.entries.pop_front()
else {
unreachable!();
};
rename_table_updates
.iter()
.for_each(|v| self.retire_rename_table.update(v, "retire_rename_table"));
for RobEntry {
mop: _,
is_speculative: _,
sent_is_no_longer_speculative: _,
can_cause_cancel: _,
progress,
caused_cancel,
} in renamed_entries
{
assert!(matches!(progress, MOpExecutionProgress::Finished(_)));
if let Some(caused_cancel) = caused_cancel {
self.start_cancel(caused_cancel);
return;
}
}
}
#[hdl]
fn start_cancel(&mut self, caused_cancel: SimValue<UnitCausedCancel<C>>) {
assert!(!self.is_canceling());
#[hdl(sim)]
let UnitCausedCancel::<_> {
id: _,
start_at_pc,
config: _,
} = caused_cancel;
self.next_pc_canceling = Some(NextPcCancelingState::NeedSendCancel(start_at_pc.as_int()));
self.unit_canceling.fill(true);
}
#[hdl]
fn step(&mut self) {
if self.is_canceling() {
return;
}
for renamed in self.rob.renamed_mut() {
let can_cause_cancel = match renamed.progress {
MOpExecutionProgress::NotStarted => break,
MOpExecutionProgress::Started => renamed.can_cause_cancel,
MOpExecutionProgress::Finished(_) => renamed.caused_cancel.is_some(),
MOpExecutionProgress::Canceled => true,
};
renamed.can_cause_cancel = can_cause_cancel;
renamed.is_speculative = false;
if can_cause_cancel {
break;
}
}
let first_renamed = self.rob.renamed().next();
if let Some(RobEntry {
mop: _,
is_speculative: _,
sent_is_no_longer_speculative: _,
can_cause_cancel: _,
progress: MOpExecutionProgress::Canceled,
caused_cancel: Some(caused_cancel),
}) = first_renamed
{
let caused_cancel = caused_cancel.clone();
self.start_cancel(caused_cancel);
return;
}
}
}
#[hdl]
async fn rename_execute_retire_run(
mut sim: ExternModuleSimulationState,
cd: Expr<ClockDomain>,
from_post_decode: Expr<PostDecodeOutputInterface<PhantomConst<CpuConfig>>>,
to_next_pc: Expr<RetireToNextPcInterface<PhantomConst<CpuConfig>>>,
to_units: Expr<ExecuteToUnitInterfaces<PhantomConst<CpuConfig>>>,
state_for_debug: Expr<RenameExecuteRetireDebugState<PhantomConst<CpuConfig>>>,
config: PhantomConst<CpuConfig>,
) {
let mut state = RenameExecuteRetireState::new(config);
loop {
state
.write_to_next_pc_next_insns(&mut sim, to_next_pc.next_insns)
.await;
state.write_for_debug(&mut sim, state_for_debug).await;
let from_post_decode_ready = state.get_from_post_decode_ready();
assert!(from_post_decode_ready <= from_post_decode.ty().ready.end());
sim.write(from_post_decode.ready, from_post_decode_ready)
.await;
sim.write(
from_post_decode.cancel.ready,
state.next_pc_canceling == Some(NextPcCancelingState::NeedReceiveCancel),
)
.await;
let retire_peek = state.retire_peek();
sim.write(to_next_pc.inner.data, &retire_peek).await;
let is_canceling = state.is_canceling();
for (unit_index, to_unit) in ExecuteToUnitInterfaces::unit_fields(to_units)
.into_iter()
.enumerate()
{
#[hdl]
let ExecuteToUnitInterface::<_> {
start,
cancel_all,
is_no_longer_speculative,
cant_cause_cancel,
finished,
config: _,
} = to_unit;
sim.write(start.data, state.get_unit_start(unit_index))
.await;
sim.write(
cancel_all.data,
if state.unit_canceling[unit_index] {
#[hdl(sim)]
HdlSome(())
} else {
#[hdl(sim)]
HdlNone()
},
)
.await;
sim.write(
is_no_longer_speculative.data,
state.get_unit_mop_is_no_longer_speculative(unit_index),
)
.await;
sim.write(cant_cause_cancel.ready, !is_canceling).await;
sim.write(finished.ready, !is_canceling).await;
}
sim.wait_for_clock_edge(cd.clk).await;
let from_post_decode_insns = sim.read_past(from_post_decode.insns, cd.clk).await;
let from_post_decode_insns = ArrayVec::elements_sim_ref(&from_post_decode_insns);
state.handle_from_post_decode(
from_post_decode_insns
.get(..from_post_decode_ready)
.unwrap_or(from_post_decode_insns),
);
for (unit_index, to_unit) in ExecuteToUnitInterfaces::unit_fields(to_units)
.into_iter()
.enumerate()
{
#[hdl]
let ExecuteToUnitInterface::<_> {
start,
cancel_all,
is_no_longer_speculative,
cant_cause_cancel,
finished,
config: _,
} = to_unit;
if sim.read_past_bool(start.ready, cd.clk).await {
#[hdl(sim)]
if let HdlSome(start) = sim.read_past(start.data, cd.clk).await {
assert!(!state.is_canceling());
let RobEntry {
mop: _,
is_speculative: _,
sent_is_no_longer_speculative: _,
can_cause_cancel: _,
progress,
caused_cancel,
} = state.rob.renamed_by_id_mut(&start.mop.id);
assert!(caused_cancel.is_none());
assert!(matches!(progress, MOpExecutionProgress::NotStarted));
*progress = MOpExecutionProgress::Started;
}
}
if sim.read_past_bool(cancel_all.ready, cd.clk).await {
#[hdl(sim)]
if let HdlSome(v) = sim.read_past(cancel_all.data, cd.clk).await {
let () = *v;
assert!(state.unit_canceling[unit_index]);
state.unit_canceling[unit_index] = false;
}
}
if sim
.read_past_bool(is_no_longer_speculative.ready, cd.clk)
.await
{
#[hdl(sim)]
if let HdlSome(is_no_longer_speculative) =
sim.read_past(is_no_longer_speculative.data, cd.clk).await
{
assert!(!state.is_canceling());
if let Some(RobEntry {
mop: _,
is_speculative,
sent_is_no_longer_speculative,
can_cause_cancel: _,
progress: _,
caused_cancel: _,
}) = state
.rob
.try_renamed_by_id_mut(&is_no_longer_speculative.id)
{
assert!(!*is_speculative);
assert!(!*sent_is_no_longer_speculative);
*sent_is_no_longer_speculative = true;
}
}
}
if sim.read_past_bool(cant_cause_cancel.ready, cd.clk).await {
#[hdl(sim)]
if let HdlSome(cant_cause_cancel) =
sim.read_past(cant_cause_cancel.data, cd.clk).await
{
#[hdl(sim)]
let UnitMOpCantCauseCancel::<_> { id, config: _ } = cant_cause_cancel;
assert!(!state.is_canceling());
let RobEntry {
mop: _,
is_speculative: _,
sent_is_no_longer_speculative: _,
can_cause_cancel,
progress,
caused_cancel,
} = state.rob.renamed_by_id_mut(&id);
assert!(caused_cancel.is_none());
assert!(!matches!(progress, MOpExecutionProgress::Canceled));
*can_cause_cancel = false;
}
}
if sim.read_past_bool(finished.ready, cd.clk).await {
#[hdl(sim)]
if let HdlSome(finished) = sim.read_past(finished.data, cd.clk).await {
state.unit_finished(finished);
}
}
}
match &mut state.next_pc_canceling {
Some(NextPcCancelingState::NeedReceiveCancel) => {
#[hdl(sim)]
if let HdlSome(_) = sim.read_past(from_post_decode.cancel.data, cd.clk).await {
state.finish_receive_cancel_from_post_decode();
}
}
Some(NextPcCancelingState::NeedSendCancel(_)) => {
if sim.read_past_bool(to_next_pc.inner.ready, cd.clk).await {
state.finish_send_cancel_to_next_pc();
}
}
None => {
if sim.read_past_bool(to_next_pc.inner.ready, cd.clk).await {
#[hdl(sim)]
if let HdlSome(v) = retire_peek {
let ops =
#[hdl(sim)]
if let RetireToNextPcInterfaceInner::<_>::RetiredInstructions(ops) = v {
ops
} else {
unreachable!()
};
for op in ArrayVec::elements_sim_ref(&ops) {
state.retire_one(op);
}
}
}
}
}
state.step();
}
}
#[hdl_module(extern)]
pub fn rename_execute_retire(config: PhantomConst<CpuConfig>) {
#[hdl]
let cd: ClockDomain = m.input();
#[hdl]
let from_post_decode: PostDecodeOutputInterface<PhantomConst<CpuConfig>> =
m.input(PostDecodeOutputInterface[config]);
#[hdl]
let to_next_pc: RetireToNextPcInterface<PhantomConst<CpuConfig>> =
m.output(RetireToNextPcInterface[config]);
#[hdl]
let to_units: ExecuteToUnitInterfaces<PhantomConst<CpuConfig>> =
m.output(ExecuteToUnitInterfaces[config]);
#[hdl]
let state_for_debug: RenameExecuteRetireDebugState<PhantomConst<CpuConfig>> =
m.output(RenameExecuteRetireDebugState[config]);
m.register_clock_for_past(cd.clk);
m.extern_module_simulation_fn(
(
cd,
from_post_decode,
to_next_pc,
to_units,
state_for_debug,
config,
),
async |args, mut sim| {
let (cd, from_post_decode, to_next_pc, to_units, state_for_debug, config) = args;
sim.write(state_for_debug, state_for_debug.ty().sim_value_default())
.await;
sim.resettable(
cd,
async |mut sim: ExternModuleSimulationState| {
sim.write(from_post_decode.ready, 0usize).await;
sim.write(from_post_decode.cancel.ready, false).await;
sim.write(to_next_pc.inner.data, to_next_pc.ty().inner.data.HdlNone())
.await;
sim.write(to_next_pc.next_insns, to_next_pc.ty().next_insns.HdlNone())
.await;
for unit_field in ExecuteToUnitInterfaces::unit_fields(to_units) {
#[hdl]
let ExecuteToUnitInterface::<_> {
start,
cancel_all,
is_no_longer_speculative,
cant_cause_cancel,
finished,
config: _,
} = unit_field;
sim.write(
start.data,
#[hdl(sim)]
(start.ty().data).HdlNone(),
)
.await;
sim.write(
cancel_all.data,
#[hdl(sim)]
HdlNone(),
)
.await;
sim.write(
is_no_longer_speculative.data,
#[hdl(sim)]
(is_no_longer_speculative.ty().data).HdlNone(),
)
.await;
sim.write(cant_cause_cancel.ready, false).await;
sim.write(finished.ready, false).await;
}
},
|sim, ()| {
rename_execute_retire_run(
sim,
cd,
from_post_decode,
to_next_pc,
to_units,
state_for_debug,
config,
)
},
)
.await;
},
);
}