2209 lines
78 KiB
Rust
2209 lines
78 KiB
Rust
// SPDX-License-Identifier: LGPL-3.0-or-later
|
|
// See Notices.txt for copyright information
|
|
|
|
//! Rename/Execute/Retire Control System
|
|
//! [#8](https://git.libre-chip.org/libre-chip/grant-tracking/issues/8)
|
|
|
|
use crate::{
|
|
config::{
|
|
CpuConfig, CpuConfig2PowOutRegNumWidth, CpuConfigFetchWidth, CpuConfigPRegNumWidth,
|
|
CpuConfigRobSize, CpuConfigUnitCount, PhantomConstCpuConfig, TwiceCpuConfigFetchWidth,
|
|
},
|
|
instruction::{
|
|
COMMON_MOP_SRC_LEN, L2RegNum, L2RegisterFileMOp, MOp, MOpDestReg, MOpRegNum, MOpTrait,
|
|
PRegNum, ReadL2RegMOp, UnitNum, UnitOutRegNum,
|
|
},
|
|
next_pc::{CallStackOp, SimValueDefault},
|
|
register::PRegValue,
|
|
rename_execute_retire::to_unit_interfaces::ExecuteToUnitInterfaces,
|
|
unit::{UnitKind, UnitMOp},
|
|
util::array_vec::ArrayVec,
|
|
};
|
|
use fayalite::{
|
|
int::UIntInRangeInclusiveType,
|
|
prelude::*,
|
|
ty::{OpaqueSimValue, SimValueDebug, StaticType},
|
|
util::ready_valid::ReadyValid,
|
|
};
|
|
use std::{collections::VecDeque, fmt, mem, num::NonZero};
|
|
|
|
pub mod to_unit_interfaces;
|
|
|
|
pub const MOP_ID_WIDTH: usize = 16;
|
|
#[hdl]
|
|
pub type MOpId = UInt<{ MOP_ID_WIDTH }>;
|
|
|
|
#[hdl(custom_debug(sim))]
|
|
/// A µOp along with the state needed for this instance of the µOp.
|
|
pub struct MOpInstance<MOp> {
|
|
pub fetch_block_id: UInt<8>,
|
|
pub id: MOpId,
|
|
pub pc: UInt<64>,
|
|
/// initialized to 0 by decoder, overwritten by `next_pc()`
|
|
pub predicted_next_pc: UInt<64>,
|
|
pub size_in_bytes: UInt<4>,
|
|
/// `true` if this µOp is the first µOp in the ISA-level instruction.
|
|
/// In general, a single µOp can't be canceled by itself,
|
|
/// it needs to be canceled along with all other µOps that
|
|
/// come from the same ISA-level instruction.
|
|
pub is_first_mop_in_insn: Bool,
|
|
/// `true` if this µOp is the last µOp in the ISA-level instruction.
|
|
/// In general, a single µOp can't be canceled by itself,
|
|
/// it needs to be canceled along with all other µOps that
|
|
/// come from the same ISA-level instruction.
|
|
pub is_last_mop_in_insn: Bool,
|
|
pub mop: MOp,
|
|
}
|
|
|
|
impl<MOp: Type> SimValueDebug for MOpInstance<MOp> {
|
|
#[hdl]
|
|
fn sim_value_debug(
|
|
value: &<Self as Type>::SimValue,
|
|
f: &mut fmt::Formatter<'_>,
|
|
) -> fmt::Result {
|
|
#[hdl(sim)]
|
|
let Self {
|
|
fetch_block_id,
|
|
id,
|
|
pc,
|
|
predicted_next_pc,
|
|
size_in_bytes,
|
|
is_first_mop_in_insn,
|
|
is_last_mop_in_insn,
|
|
mop,
|
|
} = value;
|
|
write!(
|
|
f,
|
|
"fid={fetch_block_id:?} id={id:?} pc={pc:?} pn_pc={predicted_next_pc:?} sz={size_in_bytes:?} first={is_first_mop_in_insn} last={is_last_mop_in_insn}: {mop:?}"
|
|
)
|
|
}
|
|
}
|
|
|
|
#[hdl(no_static)]
|
|
/// TODO: merge with [`crate::next_pc::PostDecodeOutputInterface`]
|
|
pub struct PostDecodeOutputInterface<C: PhantomConstGet<CpuConfig>> {
|
|
pub insns: ArrayVec<MOpInstance<MOp>, CpuConfigFetchWidth<C>>,
|
|
#[hdl(flip)]
|
|
pub ready: UIntInRangeInclusiveType<ConstUsize<0>, CpuConfigFetchWidth<C>>,
|
|
/// tells the rename/execute/retire circuit to cancel all non-retired instructions
|
|
pub cancel: ReadyValid<()>,
|
|
pub config: C,
|
|
}
|
|
|
|
#[hdl(no_static)]
|
|
pub struct NextPcPredictorOp<C: PhantomConstGet<CpuConfig>> {
|
|
pub call_stack_op: CallStackOp,
|
|
/// should be `HdlSome(taken)` for any conditional control-flow instruction
|
|
/// with an immediate target that can be predicted as taken/not-taken (branch/call/return).
|
|
pub cond_br_taken: HdlOption<Bool>,
|
|
pub config: C,
|
|
}
|
|
|
|
#[hdl(no_static)]
|
|
/// TODO: merge with [`crate::next_pc::RetireToNextPcInterfaceInner`]
|
|
pub enum RetireToNextPcInterfaceInner<C: PhantomConstGet<CpuConfig>> {
|
|
CancelAndStartAt(UInt<64>),
|
|
RetiredInstructions(ArrayVec<NextPcPredictorOp<C>, CpuConfigFetchWidth<C>>),
|
|
}
|
|
|
|
#[hdl(no_static)]
|
|
/// handles updating speculative branch predictor state (e.g. branch histories)
|
|
/// when instructions retire, as well as updating state when a
|
|
/// branch instruction is mis-speculated.
|
|
pub struct RetireToNextPcInterface<C: PhantomConstGet<CpuConfig>> {
|
|
pub inner: ReadyValid<RetireToNextPcInterfaceInner<C>>,
|
|
/// only for debugging
|
|
pub next_insns: HdlOption<ArrayVec<MOpInstance<MOp>, CpuConfigRobSize<C>>>,
|
|
}
|
|
|
|
#[hdl]
|
|
pub type RenamedMOp<C: PhantomConstGet<CpuConfig>> =
|
|
crate::instruction::RenamedMOp<PRegNum<C>, CpuConfigPRegNumWidth<C>>;
|
|
|
|
#[hdl]
|
|
pub type RenamedSrcRegUInt<C: PhantomConstGet<CpuConfig>> = UIntType<CpuConfigPRegNumWidth<C>>;
|
|
|
|
/// Enqueues happen in program order, they are not re-ordered by out-of-order execution.
|
|
/// the whole `MOpInstance` is sent again in [`UnitInputsReady`] so Units can just ignore all
|
|
/// [`UnitEnqueue`] messages if they don't need to keep track of program order -- so, pure computation
|
|
/// instructions.
|
|
/// Loads/Stores need to keep track of program order to ensure they properly handle memory dependencies.
|
|
#[hdl(no_static)]
|
|
pub struct UnitEnqueue<C: PhantomConstGet<CpuConfig>> {
|
|
pub mop: MOpInstance<RenamedMOp<C>>,
|
|
pub config: C,
|
|
}
|
|
|
|
#[hdl(no_static)]
|
|
pub struct UnitInputsReady<C: PhantomConstGet<CpuConfig>> {
|
|
/// the whole `MOpInstance` is sent again so Units can just ignore all [`UnitEnqueue`] messages if desired.
|
|
pub mop: MOpInstance<RenamedMOp<C>>,
|
|
pub src_values: Array<PRegValue, { COMMON_MOP_SRC_LEN }>,
|
|
pub config: C,
|
|
}
|
|
|
|
#[hdl(no_static)]
|
|
pub struct UnitOutputReady<C: PhantomConstGet<CpuConfig>> {
|
|
pub id: MOpId,
|
|
pub dest_value: PRegValue,
|
|
pub predictor_op: NextPcPredictorOp<C>,
|
|
}
|
|
|
|
#[hdl(no_static)]
|
|
pub struct UnitCausedCancel<C: PhantomConstGet<CpuConfig>> {
|
|
pub start_at_pc: UInt<64>,
|
|
/// `true` if this instruction should be retired and then cause a cancel
|
|
/// (e.g. a branch ran successfully but the next pc was mispredicted so the following instructions
|
|
/// needs to be canceled).
|
|
/// `false` if this instruction should be canceled without retiring it
|
|
/// (e.g. a load ran before a store it should have run after so it needs to retry after the memory
|
|
/// is in the right state).
|
|
pub cancel_after_retire: Bool,
|
|
pub config: C,
|
|
}
|
|
|
|
#[hdl(no_static)]
|
|
pub struct UnitFinishCauseCancel<C: PhantomConstGet<CpuConfig>> {
|
|
pub id: MOpId,
|
|
pub caused_cancel: HdlOption<UnitCausedCancel<C>>,
|
|
pub config: C,
|
|
}
|
|
|
|
#[hdl(no_static)]
|
|
pub struct UnitMOpIsNoLongerSpeculative<C: PhantomConstGet<CpuConfig>> {
|
|
pub id: MOpId,
|
|
pub config: C,
|
|
}
|
|
|
|
#[hdl(no_static)]
|
|
pub struct UnitMOpCantCauseCancel<C: PhantomConstGet<CpuConfig>> {
|
|
pub id: MOpId,
|
|
pub config: C,
|
|
}
|
|
|
|
/// Interface from the Rename/Execute/Retire control logic to a single Unit.
|
|
///
|
|
/// ## State diagram for a single µOp in a Unit
|
|
/// Notes:
|
|
/// * The diagram ignores `cancel_all`.
|
|
/// * Multiple state transitions can happen in a single clock cycle.
|
|
/// * Any state marked "Can cause cancel", can immediately finish with [`Self::finish_cause_cancel`] where:
|
|
/// * [`UnitCausedCancel::cancel_after_retire`] must be `false` unless there's a "Finish" edge from this state.
|
|
/// * [`UnitFinishCauseCancel::caused_cancel`] must be `HdlSome` unless there's a "Finish" edge from this state.
|
|
#[doc = simple_mermaid::mermaid!("rename_execute_retire/unit.mermaid")]
|
|
#[hdl(no_static)]
|
|
pub struct ExecuteToUnitInterface<C: PhantomConstGet<CpuConfig>> {
|
|
/// Enqueues happen in program order, they are not re-ordered by out-of-order execution.
|
|
pub enqueue: ReadyValid<UnitEnqueue<C>>,
|
|
pub inputs_ready: HdlOption<UnitInputsReady<C>>,
|
|
pub is_no_longer_speculative: HdlOption<UnitMOpIsNoLongerSpeculative<C>>,
|
|
/// this uses [`Self::unit_outputs_ready`] as a shared ready flag
|
|
#[hdl(flip)]
|
|
pub cant_cause_cancel: HdlOption<UnitMOpCantCauseCancel<C>>,
|
|
/// this uses [`Self::unit_outputs_ready`] as a shared ready flag
|
|
#[hdl(flip)]
|
|
pub output_ready: HdlOption<UnitOutputReady<C>>,
|
|
/// this uses [`Self::unit_outputs_ready`] as a shared ready flag
|
|
#[hdl(flip)]
|
|
pub finish_cause_cancel: HdlOption<UnitFinishCauseCancel<C>>,
|
|
/// ready flag for [`Self::cant_cause_cancel`], [`Self::output_ready`], and [`Self::finish_cause_cancel`]
|
|
pub unit_outputs_ready: Bool,
|
|
pub cancel_all: ReadyValid<()>,
|
|
pub config: C,
|
|
}
|
|
|
|
fn zeroed<T: Type>(ty: T) -> SimValue<T> {
|
|
SimValue::from_opaque(
|
|
ty,
|
|
OpaqueSimValue::from_bits(UInt::new(ty.canonical().bit_width()).zero()),
|
|
)
|
|
}
|
|
|
|
impl<C: PhantomConstCpuConfig> SimValueDefault for RenameExecuteRetireDebugState<C> {
|
|
#[hdl]
|
|
fn sim_value_default(self) -> SimValue<Self> {
|
|
let Self {
|
|
rename_delayed,
|
|
rename_table,
|
|
retire_rename_table,
|
|
rob,
|
|
next_pc_canceling,
|
|
unit_canceling,
|
|
l1_reg_file,
|
|
per_insn_timeline,
|
|
} = self;
|
|
let empty_string = SimOnlyValue::new(String::new());
|
|
#[hdl(sim)]
|
|
Self {
|
|
rename_delayed: zeroed(rename_delayed),
|
|
rename_table: zeroed(rename_table),
|
|
retire_rename_table: zeroed(retire_rename_table),
|
|
rob: rob.sim_value_default(),
|
|
next_pc_canceling: zeroed(next_pc_canceling),
|
|
unit_canceling: zeroed(unit_canceling),
|
|
l1_reg_file: zeroed(l1_reg_file),
|
|
per_insn_timeline: SimValue::from_array_elements(
|
|
per_insn_timeline,
|
|
(0..per_insn_timeline.len()).map(|_| empty_string.clone()),
|
|
),
|
|
}
|
|
}
|
|
}
|
|
|
|
#[hdl(no_static)]
|
|
enum RenameTableEntry<C: PhantomConstGet<CpuConfig>> {
|
|
L1(PRegNum<C>),
|
|
L2(L2RegNum),
|
|
}
|
|
|
|
impl<C: PhantomConstCpuConfig> RenameTableEntry<C> {
|
|
#[hdl]
|
|
fn const_zero(self) -> SimValue<Self> {
|
|
#[hdl(sim)]
|
|
self.L1(self.L1.const_zero())
|
|
}
|
|
}
|
|
|
|
/// make arrays dynamically-sized to avoid putting large types on the stack
|
|
#[hdl(get(|c| 1 << MOpRegNum::WIDTH))]
|
|
type MOpRegCount<C: PhantomConstGet<CpuConfig>> = DynSize;
|
|
|
|
#[hdl(no_static)]
|
|
struct RenameTableDebugState<C: PhantomConstGet<CpuConfig>> {
|
|
entries: ArrayType<RenameTableEntry<C>, MOpRegCount<C>>,
|
|
config: C,
|
|
}
|
|
|
|
#[derive(Debug)]
|
|
struct RenameTable<C: PhantomConstCpuConfig> {
|
|
entries: Box<[SimValue<RenameTableEntry<C>>; 1 << MOpRegNum::WIDTH]>,
|
|
config: C,
|
|
}
|
|
|
|
impl<C: PhantomConstCpuConfig> Clone for RenameTable<C> {
|
|
fn clone(&self) -> Self {
|
|
Self {
|
|
entries: self.entries.clone(),
|
|
config: self.config.clone(),
|
|
}
|
|
}
|
|
fn clone_from(&mut self, source: &Self) {
|
|
let Self { entries, config } = self;
|
|
entries.clone_from(&source.entries);
|
|
*config = source.config;
|
|
}
|
|
}
|
|
|
|
#[derive(Debug, Clone)]
|
|
enum RenameTableUpdate<C: PhantomConstCpuConfig> {
|
|
Write {
|
|
unrenamed_reg_num: u32,
|
|
new: SimValue<RenameTableEntry<C>>,
|
|
},
|
|
UpdateForReadL2Reg {
|
|
dest: SimValue<PRegNum<C>>,
|
|
src: SimValue<L2RegNum>,
|
|
},
|
|
UpdateForWriteL2Reg {
|
|
dest: SimValue<L2RegNum>,
|
|
src: SimValue<PRegNum<C>>,
|
|
},
|
|
}
|
|
|
|
impl<C: PhantomConstCpuConfig> RenameTable<C> {
|
|
fn new(config: C) -> Self {
|
|
let entries: Box<[SimValue<RenameTableEntry<C>>; 1 << MOpRegNum::WIDTH]> =
|
|
vec![RenameTableEntry[config].const_zero(); 1 << MOpRegNum::WIDTH]
|
|
.try_into()
|
|
.expect("size is known to match");
|
|
Self { entries, config }
|
|
}
|
|
#[hdl]
|
|
fn to_debug_state(&self) -> SimValue<RenameTableDebugState<C>> {
|
|
let Self { entries, config } = self;
|
|
let ty = RenameTableDebugState[*config];
|
|
#[hdl(sim)]
|
|
RenameTableDebugState::<_> {
|
|
entries: entries.to_sim_value_with_type(ty.entries),
|
|
config,
|
|
}
|
|
}
|
|
#[hdl]
|
|
fn update(&mut self, update: &RenameTableUpdate<C>, rename_table_name: &str) {
|
|
match update {
|
|
RenameTableUpdate::Write {
|
|
unrenamed_reg_num,
|
|
new,
|
|
} => {
|
|
if *unrenamed_reg_num == MOpRegNum::CONST_ZERO_REG_NUM {
|
|
// writing to const zero reg does nothing
|
|
return;
|
|
}
|
|
println!("{rename_table_name}: Write: {unrenamed_reg_num:#x} <- {new:?}");
|
|
self.entries[*unrenamed_reg_num as usize] = new.clone();
|
|
}
|
|
RenameTableUpdate::UpdateForReadL2Reg { dest, src } => {
|
|
let new = #[hdl(sim)]
|
|
(RenameTableEntry[self.config]).L1(dest);
|
|
for (unrenamed_reg_num, entry) in self.entries.iter_mut().enumerate() {
|
|
#[hdl(sim)]
|
|
match &entry {
|
|
RenameTableEntry::<_>::L1(_) => {}
|
|
RenameTableEntry::<_>::L2(l2) => {
|
|
if L2RegNum::value_sim(l2) == L2RegNum::value_sim(src) {
|
|
println!(
|
|
"{rename_table_name}: UpdateForReadL2Reg: {unrenamed_reg_num:#x} \
|
|
updating from {entry:?} to {new:?}",
|
|
);
|
|
*entry = new.clone();
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
RenameTableUpdate::UpdateForWriteL2Reg { dest, src } => {
|
|
let new = #[hdl(sim)]
|
|
(RenameTableEntry[self.config]).L2(dest);
|
|
for (unrenamed_reg_num, entry) in self.entries.iter_mut().enumerate() {
|
|
#[hdl(sim)]
|
|
match &entry {
|
|
RenameTableEntry::<_>::L1(l1) => {
|
|
if l1 == src {
|
|
println!(
|
|
"{rename_table_name}: UpdateForWriteL2Reg: {unrenamed_reg_num:#x} \
|
|
updating from {entry:?} to {new:?}",
|
|
);
|
|
*entry = new.clone();
|
|
}
|
|
}
|
|
RenameTableEntry::<_>::L2(_) => {}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
#[derive(Copy, Clone, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize, Default)]
|
|
enum MOpInUnitState {
|
|
#[default]
|
|
NotYetEnqueued,
|
|
InputsNotReadySpeculative {
|
|
can_cause_cancel: bool,
|
|
},
|
|
InputsReady {
|
|
speculative: bool,
|
|
can_cause_cancel: bool,
|
|
},
|
|
OutputReady {
|
|
speculative: bool,
|
|
can_cause_cancel: bool,
|
|
},
|
|
FinishedAndOrCausedCancel,
|
|
}
|
|
|
|
impl MOpInUnitState {
|
|
fn debug_str(self) -> &'static str {
|
|
match self {
|
|
Self::NotYetEnqueued => "NotYetEnqueued",
|
|
Self::InputsNotReadySpeculative { can_cause_cancel } => {
|
|
if can_cause_cancel {
|
|
"INR_S_C"
|
|
} else {
|
|
"INR_S"
|
|
}
|
|
}
|
|
Self::InputsReady {
|
|
speculative,
|
|
can_cause_cancel,
|
|
} => {
|
|
if speculative {
|
|
if can_cause_cancel { "IR_S_C" } else { "IR_S" }
|
|
} else {
|
|
if can_cause_cancel { "IR_C" } else { "IR" }
|
|
}
|
|
}
|
|
Self::OutputReady {
|
|
speculative,
|
|
can_cause_cancel,
|
|
} => {
|
|
if speculative {
|
|
if can_cause_cancel { "OR_S_C" } else { "OR_S" }
|
|
} else {
|
|
if can_cause_cancel { "OR_C" } else { "OR" }
|
|
}
|
|
}
|
|
Self::FinishedAndOrCausedCancel => "F_C",
|
|
}
|
|
}
|
|
#[must_use]
|
|
fn after_enqueue(self) -> Option<Self> {
|
|
match self {
|
|
Self::NotYetEnqueued => Some(Self::InputsNotReadySpeculative {
|
|
can_cause_cancel: true,
|
|
}),
|
|
_ => None,
|
|
}
|
|
}
|
|
#[must_use]
|
|
fn after_output_ready(self) -> Option<Self> {
|
|
match self {
|
|
Self::InputsReady {
|
|
speculative,
|
|
can_cause_cancel,
|
|
} => Some(Self::OutputReady {
|
|
speculative,
|
|
can_cause_cancel,
|
|
}),
|
|
_ => None,
|
|
}
|
|
}
|
|
#[must_use]
|
|
fn after_finish_cause_cancel(
|
|
self,
|
|
cancel_after_retire: bool,
|
|
cause_cancel: bool,
|
|
) -> Option<Self> {
|
|
if cause_cancel && !cancel_after_retire {
|
|
match self {
|
|
Self::NotYetEnqueued => None,
|
|
Self::InputsNotReadySpeculative { can_cause_cancel }
|
|
| Self::InputsReady {
|
|
speculative: _,
|
|
can_cause_cancel,
|
|
}
|
|
| Self::OutputReady {
|
|
speculative: _,
|
|
can_cause_cancel,
|
|
} => can_cause_cancel.then_some(Self::FinishedAndOrCausedCancel),
|
|
Self::FinishedAndOrCausedCancel => todo!(),
|
|
}
|
|
} else {
|
|
assert!(cause_cancel == cancel_after_retire);
|
|
// see if we can eventually retire MOp
|
|
match self {
|
|
Self::OutputReady {
|
|
speculative: _,
|
|
can_cause_cancel,
|
|
} => {
|
|
if cause_cancel && !can_cause_cancel {
|
|
None
|
|
} else {
|
|
Some(Self::FinishedAndOrCausedCancel)
|
|
}
|
|
}
|
|
Self::NotYetEnqueued
|
|
| Self::InputsNotReadySpeculative { .. }
|
|
| Self::InputsReady { .. }
|
|
| Self::FinishedAndOrCausedCancel => None,
|
|
}
|
|
}
|
|
}
|
|
#[must_use]
|
|
fn with_inputs_ready(self) -> Option<Self> {
|
|
match self {
|
|
Self::InputsNotReadySpeculative { can_cause_cancel } => Some(Self::InputsReady {
|
|
speculative: true,
|
|
can_cause_cancel,
|
|
}),
|
|
_ => None,
|
|
}
|
|
}
|
|
#[must_use]
|
|
fn without_speculative(self) -> Option<Self> {
|
|
match self {
|
|
Self::NotYetEnqueued => None,
|
|
Self::InputsNotReadySpeculative { .. } => None,
|
|
Self::InputsReady {
|
|
speculative,
|
|
can_cause_cancel,
|
|
} => speculative.then_some(Self::InputsReady {
|
|
speculative: false,
|
|
can_cause_cancel,
|
|
}),
|
|
Self::OutputReady {
|
|
speculative,
|
|
can_cause_cancel,
|
|
} => speculative.then_some(Self::OutputReady {
|
|
speculative: false,
|
|
can_cause_cancel,
|
|
}),
|
|
Self::FinishedAndOrCausedCancel => None,
|
|
}
|
|
}
|
|
#[must_use]
|
|
fn with_cant_cause_cancel(self) -> Option<Self> {
|
|
match self {
|
|
Self::NotYetEnqueued => None,
|
|
Self::InputsNotReadySpeculative { can_cause_cancel } => {
|
|
can_cause_cancel.then_some(Self::InputsNotReadySpeculative {
|
|
can_cause_cancel: false,
|
|
})
|
|
}
|
|
Self::InputsReady {
|
|
speculative,
|
|
can_cause_cancel,
|
|
} => can_cause_cancel.then_some(Self::InputsReady {
|
|
speculative,
|
|
can_cause_cancel: false,
|
|
}),
|
|
Self::OutputReady {
|
|
speculative,
|
|
can_cause_cancel,
|
|
} => can_cause_cancel.then_some(Self::OutputReady {
|
|
speculative,
|
|
can_cause_cancel: false,
|
|
}),
|
|
Self::FinishedAndOrCausedCancel => None,
|
|
}
|
|
}
|
|
}
|
|
|
|
impl fmt::Debug for MOpInUnitState {
|
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
f.write_str(self.debug_str())
|
|
}
|
|
}
|
|
|
|
#[hdl]
|
|
type SimOnlyMOpInUnitState = SimOnly<MOpInUnitState>;
|
|
|
|
#[hdl(no_static)]
|
|
struct RobEntryDebugState<C: PhantomConstGet<CpuConfig>> {
|
|
mop: MOpInstance<RenamedMOp<C>>,
|
|
mop_in_unit_state: SimOnlyMOpInUnitState,
|
|
is_speculative: Bool,
|
|
finished: HdlOption<NextPcPredictorOp<C>>,
|
|
caused_cancel: HdlOption<UnitCausedCancel<C>>,
|
|
}
|
|
|
|
impl<C: PhantomConstCpuConfig> SimValueDefault for RobEntryDebugState<C> {
|
|
#[hdl]
|
|
fn sim_value_default(self) -> SimValue<Self> {
|
|
let Self {
|
|
mop,
|
|
mop_in_unit_state: _,
|
|
is_speculative: _,
|
|
finished,
|
|
caused_cancel,
|
|
} = self;
|
|
#[hdl(sim)]
|
|
Self {
|
|
mop: zeroed(mop),
|
|
mop_in_unit_state: SimOnlyValue::default(),
|
|
is_speculative: false,
|
|
finished: #[hdl(sim)]
|
|
finished.HdlNone(),
|
|
caused_cancel: #[hdl(sim)]
|
|
caused_cancel.HdlNone(),
|
|
}
|
|
}
|
|
}
|
|
|
|
#[derive(Debug)]
|
|
struct RobEntry<C: PhantomConstCpuConfig> {
|
|
mop: SimValue<MOpInstance<RenamedMOp<C>>>,
|
|
mop_in_unit_state: MOpInUnitState,
|
|
is_speculative: bool,
|
|
finished: Option<SimValue<NextPcPredictorOp<C>>>,
|
|
caused_cancel: Option<SimValue<UnitCausedCancel<C>>>,
|
|
}
|
|
|
|
impl<C: PhantomConstCpuConfig> RobEntry<C> {
|
|
fn new(mop: SimValue<MOpInstance<RenamedMOp<C>>>) -> Self {
|
|
Self {
|
|
mop,
|
|
mop_in_unit_state: MOpInUnitState::NotYetEnqueued,
|
|
is_speculative: true,
|
|
finished: None,
|
|
caused_cancel: None,
|
|
}
|
|
}
|
|
fn dest_reg(&self) -> &SimValue<PRegNum<C>> {
|
|
MOpTrait::dest_reg_sim_ref(&self.mop.mop)
|
|
}
|
|
fn unit_num(&self) -> &SimValue<UnitNum<C>> {
|
|
&self.dest_reg().unit_num
|
|
}
|
|
fn unit_index(&self) -> usize {
|
|
UnitNum::index_sim(&self.unit_num()).expect("known to have unit_index")
|
|
}
|
|
fn unit_out_reg(&self) -> &SimValue<UnitOutRegNum<C>> {
|
|
&self.dest_reg().unit_out_reg
|
|
}
|
|
fn unit_out_reg_index(&self) -> usize {
|
|
UnitOutRegNum::value_sim(&self.unit_out_reg())
|
|
}
|
|
#[hdl]
|
|
fn debug_state(&self, config: C) -> SimValue<RobEntryDebugState<C>> {
|
|
let Self {
|
|
mop,
|
|
mop_in_unit_state,
|
|
is_speculative,
|
|
finished,
|
|
caused_cancel,
|
|
} = self;
|
|
let ret_ty = RobEntryDebugState[config];
|
|
#[hdl(sim)]
|
|
RobEntryDebugState::<C> {
|
|
mop,
|
|
mop_in_unit_state: SimOnlyValue::new(*mop_in_unit_state),
|
|
is_speculative,
|
|
finished: finished.into_sim_value_with_type(ret_ty.finished),
|
|
caused_cancel: caused_cancel.into_sim_value_with_type(ret_ty.caused_cancel),
|
|
}
|
|
}
|
|
}
|
|
|
|
#[hdl]
|
|
struct RobEntriesDebugState {
|
|
unrenamed: MOpInstance<MOp>,
|
|
/// number of renamed µOps that this unrenamed µOp corresponds to
|
|
renamed_entries_len: UInt<8>,
|
|
}
|
|
|
|
impl SimValueDefault for RobEntriesDebugState {
|
|
#[hdl]
|
|
fn sim_value_default(self) -> SimValue<Self> {
|
|
let Self {
|
|
unrenamed,
|
|
renamed_entries_len,
|
|
} = self;
|
|
#[hdl(sim)]
|
|
Self {
|
|
unrenamed: zeroed(unrenamed),
|
|
renamed_entries_len: renamed_entries_len.sim_value_default(),
|
|
}
|
|
}
|
|
}
|
|
|
|
#[derive(Debug)]
|
|
struct RobEntries<C: PhantomConstCpuConfig> {
|
|
unrenamed: SimValue<MOpInstance<MOp>>,
|
|
rename_table_updates: Vec<RenameTableUpdate<C>>,
|
|
renamed_entries: VecDeque<RobEntry<C>>,
|
|
}
|
|
|
|
impl<C: PhantomConstCpuConfig> RobEntries<C> {
|
|
#[hdl]
|
|
fn debug_state(&self) -> SimValue<RobEntriesDebugState> {
|
|
let Self {
|
|
unrenamed,
|
|
rename_table_updates: _,
|
|
renamed_entries,
|
|
} = self;
|
|
#[hdl(sim)]
|
|
RobEntriesDebugState {
|
|
unrenamed,
|
|
renamed_entries_len: u8::try_from(renamed_entries.len())
|
|
.expect("renamed_entries.len() should fit in u8"),
|
|
}
|
|
}
|
|
}
|
|
|
|
#[hdl(no_static)]
|
|
struct NeedSendCancelDebugState<C: PhantomConstGet<CpuConfig>> {
|
|
send_to_next_pc: HdlOption<UInt<64>>,
|
|
send_to_units: ArrayType<Bool, CpuConfigUnitCount<C>>,
|
|
config: C,
|
|
}
|
|
|
|
#[hdl]
|
|
enum NextPcCancelingDebugState {
|
|
NeedSendCancel(UInt<64>),
|
|
NeedReceiveCancel,
|
|
}
|
|
|
|
#[derive(Clone, PartialEq, Eq, Debug)]
|
|
enum NextPcCancelingState {
|
|
NeedSendCancel(u64),
|
|
NeedReceiveCancel,
|
|
}
|
|
|
|
impl NextPcCancelingState {
|
|
#[hdl]
|
|
fn debug_state(this: &Option<Self>) -> SimValue<HdlOption<NextPcCancelingDebugState>> {
|
|
match this {
|
|
Some(Self::NeedSendCancel(pc)) =>
|
|
{
|
|
#[hdl(sim)]
|
|
HdlSome(
|
|
#[hdl(sim)]
|
|
NextPcCancelingDebugState.NeedSendCancel(pc),
|
|
)
|
|
}
|
|
Some(Self::NeedReceiveCancel) =>
|
|
{
|
|
#[hdl(sim)]
|
|
HdlSome(
|
|
#[hdl(sim)]
|
|
NextPcCancelingDebugState.NeedReceiveCancel(),
|
|
)
|
|
}
|
|
None =>
|
|
{
|
|
#[hdl(sim)]
|
|
HdlNone()
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
#[hdl(no_static)]
|
|
pub struct ReorderBufferDebugState<C: PhantomConstGet<CpuConfig>> {
|
|
next_renamed_mop_id: MOpId,
|
|
entries: ArrayVec<RobEntriesDebugState, CpuConfigRobSize<C>>,
|
|
incomplete_back_entry: HdlOption<RobEntriesDebugState>,
|
|
renamed: ArrayVec<RobEntryDebugState<C>, CpuConfigRobSize<C>>,
|
|
config: C,
|
|
}
|
|
|
|
impl<C: PhantomConstCpuConfig> SimValueDefault for ReorderBufferDebugState<C> {
|
|
#[hdl]
|
|
fn sim_value_default(self) -> SimValue<Self> {
|
|
let Self {
|
|
next_renamed_mop_id,
|
|
entries,
|
|
incomplete_back_entry,
|
|
renamed,
|
|
config,
|
|
} = self;
|
|
#[hdl(sim)]
|
|
Self {
|
|
next_renamed_mop_id: next_renamed_mop_id.sim_value_default(),
|
|
entries: entries.sim_value_default(),
|
|
incomplete_back_entry: incomplete_back_entry.sim_value_default(),
|
|
renamed: renamed.sim_value_default(),
|
|
config,
|
|
}
|
|
}
|
|
}
|
|
|
|
#[derive(Debug)]
|
|
struct ReorderBuffer<C: PhantomConstCpuConfig> {
|
|
next_renamed_mop_id: SimValue<MOpId>,
|
|
entries: VecDeque<RobEntries<C>>,
|
|
incomplete_back_entry: Option<RobEntries<C>>,
|
|
config: C,
|
|
}
|
|
|
|
impl<C: PhantomConstCpuConfig> ReorderBuffer<C> {
|
|
fn new(config: C) -> Self {
|
|
Self {
|
|
next_renamed_mop_id: MOpId.zero().into_sim_value(),
|
|
entries: VecDeque::new(),
|
|
incomplete_back_entry: None,
|
|
config,
|
|
}
|
|
}
|
|
#[hdl]
|
|
fn debug_state(&self) -> SimValue<ReorderBufferDebugState<C>> {
|
|
let Self {
|
|
next_renamed_mop_id,
|
|
entries,
|
|
incomplete_back_entry,
|
|
config,
|
|
} = self;
|
|
let ty = ReorderBufferDebugState[*config];
|
|
#[hdl(sim)]
|
|
ReorderBufferDebugState::<_> {
|
|
next_renamed_mop_id,
|
|
entries: ty
|
|
.entries
|
|
.from_iter_sim(
|
|
zeroed(StaticType::TYPE),
|
|
entries.iter().map(RobEntries::debug_state),
|
|
)
|
|
.expect("known to fit"),
|
|
incomplete_back_entry: incomplete_back_entry.as_ref().map(|v| v.debug_state()),
|
|
renamed: ty
|
|
.renamed
|
|
.from_iter_sim(
|
|
ty.renamed.element().sim_value_default(),
|
|
self.renamed().map(|v| v.debug_state(*config)),
|
|
)
|
|
.ok()
|
|
.expect("known to fit"),
|
|
config,
|
|
}
|
|
}
|
|
fn unrenamed_len(&self) -> usize {
|
|
self.entries.len()
|
|
}
|
|
fn unrenamed(&self) -> impl DoubleEndedIterator<Item = &SimValue<MOpInstance<MOp>>> + Clone {
|
|
self.entries.iter().map(|v| &v.unrenamed)
|
|
}
|
|
fn unrenamed_mut(
|
|
&mut self,
|
|
) -> impl DoubleEndedIterator<Item = &mut SimValue<MOpInstance<MOp>>> {
|
|
self.entries.iter_mut().map(|v| &mut v.unrenamed)
|
|
}
|
|
fn retire_groups_unrenamed_ranges(
|
|
&self,
|
|
) -> impl Clone + Iterator<Item = std::ops::Range<usize>> {
|
|
let mut next_group_start = 0;
|
|
self.entries
|
|
.iter()
|
|
.enumerate()
|
|
.filter_map(move |(index, entry)| {
|
|
if *entry.unrenamed.is_last_mop_in_insn {
|
|
let group_start = next_group_start;
|
|
next_group_start = index + 1;
|
|
Some(group_start..next_group_start)
|
|
} else {
|
|
None
|
|
}
|
|
})
|
|
}
|
|
fn retire_groups(
|
|
&self,
|
|
) -> impl Clone + Iterator<Item: DoubleEndedIterator<Item = &RobEntries<C>> + Clone> {
|
|
self.retire_groups_unrenamed_ranges()
|
|
.map(|range| self.entries.range(range))
|
|
}
|
|
fn renamed_len(&self) -> usize {
|
|
let Self {
|
|
next_renamed_mop_id: _,
|
|
entries,
|
|
incomplete_back_entry,
|
|
config: _,
|
|
} = self;
|
|
entries
|
|
.iter()
|
|
.chain(incomplete_back_entry)
|
|
.map(|entries| entries.renamed_entries.len())
|
|
.sum()
|
|
}
|
|
fn renamed(&self) -> impl DoubleEndedIterator<Item = &RobEntry<C>> + Clone {
|
|
let Self {
|
|
next_renamed_mop_id: _,
|
|
entries,
|
|
incomplete_back_entry,
|
|
config: _,
|
|
} = self;
|
|
entries
|
|
.iter()
|
|
.chain(incomplete_back_entry)
|
|
.flat_map(|entries| &entries.renamed_entries)
|
|
}
|
|
fn renamed_mut(&mut self) -> impl DoubleEndedIterator<Item = &mut RobEntry<C>> {
|
|
let Self {
|
|
next_renamed_mop_id: _,
|
|
entries,
|
|
incomplete_back_entry,
|
|
config: _,
|
|
} = self;
|
|
entries
|
|
.iter_mut()
|
|
.chain(incomplete_back_entry)
|
|
.flat_map(|entries| &mut entries.renamed_entries)
|
|
}
|
|
fn try_renamed_by_id(&self, id: &SimValue<MOpId>) -> Option<&RobEntry<C>> {
|
|
self.renamed().find(|v| v.mop.id == *id)
|
|
}
|
|
fn try_renamed_by_id_mut(&mut self, id: &SimValue<MOpId>) -> Option<&mut RobEntry<C>> {
|
|
self.renamed_mut().find(|v| v.mop.id == *id)
|
|
}
|
|
#[track_caller]
|
|
fn renamed_by_id(&self, id: &SimValue<MOpId>) -> &RobEntry<C> {
|
|
match self.try_renamed_by_id(id) {
|
|
Some(v) => v,
|
|
None => panic!("MOpId not found: {id:?}"),
|
|
}
|
|
}
|
|
fn renamed_by_id_mut(&mut self, id: &SimValue<MOpId>) -> &mut RobEntry<C> {
|
|
match self.try_renamed_by_id_mut(id) {
|
|
Some(v) => v,
|
|
None => panic!("MOpId not found: {id:?}"),
|
|
}
|
|
}
|
|
fn renamed_push_back_with_new_id(
|
|
&mut self,
|
|
unrenamed: &SimValue<MOpInstance<MOp>>,
|
|
mut renamed: RobEntry<C>,
|
|
) {
|
|
let replacement_id = self
|
|
.next_renamed_mop_id
|
|
.as_int()
|
|
.wrapping_add(1)
|
|
.into_sim_value();
|
|
renamed.mop.id = mem::replace(&mut self.next_renamed_mop_id, replacement_id);
|
|
self.incomplete_back_entry
|
|
.get_or_insert_with(|| RobEntries {
|
|
unrenamed: unrenamed.clone(),
|
|
rename_table_updates: Vec::new(),
|
|
renamed_entries: VecDeque::new(),
|
|
})
|
|
.renamed_entries
|
|
.push_back(renamed);
|
|
}
|
|
fn finished_unrenamed_push_back(&mut self, unrenamed: &SimValue<MOpInstance<MOp>>) {
|
|
let entry = self
|
|
.incomplete_back_entry
|
|
.take()
|
|
.unwrap_or_else(|| RobEntries {
|
|
unrenamed: unrenamed.clone(),
|
|
rename_table_updates: Vec::new(),
|
|
renamed_entries: VecDeque::new(),
|
|
});
|
|
self.entries.push_back(entry);
|
|
}
|
|
fn clear(&mut self) {
|
|
let Self {
|
|
next_renamed_mop_id: _,
|
|
entries,
|
|
incomplete_back_entry,
|
|
config: _,
|
|
} = self;
|
|
entries.clear();
|
|
*incomplete_back_entry = None;
|
|
}
|
|
fn unrenamed_back_append_rename_table_update(
|
|
&mut self,
|
|
unrenamed: &SimValue<MOpInstance<MOp>>,
|
|
update: RenameTableUpdate<C>,
|
|
) {
|
|
self.incomplete_back_entry
|
|
.get_or_insert_with(|| RobEntries {
|
|
unrenamed: unrenamed.clone(),
|
|
rename_table_updates: Vec::new(),
|
|
renamed_entries: VecDeque::new(),
|
|
})
|
|
.rename_table_updates
|
|
.push(update);
|
|
}
|
|
}
|
|
|
|
type SimOnlyString = SimOnly<String>;
|
|
#[expect(non_upper_case_globals)]
|
|
const SimOnlyString: SimOnlyString = SimOnlyString::TYPE;
|
|
|
|
#[hdl(get(|c| c.rob_size.get().next_power_of_two()))]
|
|
type PerInsnTimelineLen<C: PhantomConstGet<CpuConfig>> = DynSize;
|
|
|
|
#[hdl(no_static)]
|
|
pub struct RenameExecuteRetireDebugState<C: PhantomConstGet<CpuConfig>> {
|
|
rename_delayed: ArrayVec<MOpInstance<MOp>, TwiceCpuConfigFetchWidth<C>>,
|
|
rename_table: RenameTableDebugState<C>,
|
|
retire_rename_table: RenameTableDebugState<C>,
|
|
rob: ReorderBufferDebugState<C>,
|
|
next_pc_canceling: HdlOption<NextPcCancelingDebugState>,
|
|
unit_canceling: ArrayType<Bool, CpuConfigUnitCount<C>>,
|
|
l1_reg_file: ArrayType<
|
|
ArrayType<HdlOption<PRegValue>, CpuConfig2PowOutRegNumWidth<C>>,
|
|
CpuConfigUnitCount<C>,
|
|
>,
|
|
per_insn_timeline: ArrayType<SimOnlyString, PerInsnTimelineLen<C>>,
|
|
}
|
|
|
|
#[derive(Debug)]
|
|
struct RenameExecuteRetireState<C: PhantomConstCpuConfig> {
|
|
rename_delayed: VecDeque<SimValue<MOpInstance<MOp>>>,
|
|
rename_table: RenameTable<C>,
|
|
retire_rename_table: RenameTable<C>,
|
|
rob: ReorderBuffer<C>,
|
|
next_pc_canceling: Option<NextPcCancelingState>,
|
|
unit_canceling: Box<[bool]>,
|
|
l1_reg_file: Box<[Box<[Option<SimValue<PRegValue>>]>]>,
|
|
l2_reg_file_unit_index: usize,
|
|
config: C,
|
|
}
|
|
|
|
impl<C: PhantomConstCpuConfig> RenameExecuteRetireState<C> {
|
|
fn new(config: C) -> Self {
|
|
Self {
|
|
rename_delayed: VecDeque::with_capacity(TwiceCpuConfigFetchWidth[config]),
|
|
rename_table: RenameTable::new(config),
|
|
retire_rename_table: RenameTable::new(config),
|
|
rob: ReorderBuffer::new(config),
|
|
next_pc_canceling: None,
|
|
unit_canceling: vec![false; CpuConfigUnitCount[config]].into_boxed_slice(),
|
|
l1_reg_file: vec![
|
|
vec![None; CpuConfig2PowOutRegNumWidth[config]].into_boxed_slice();
|
|
CpuConfigUnitCount[config]
|
|
]
|
|
.into_boxed_slice(),
|
|
l2_reg_file_unit_index: config
|
|
.get()
|
|
.units
|
|
.iter()
|
|
.position(|unit| unit.kind == UnitKind::TransformedMove)
|
|
.expect("Unit for L2 register file is missing"),
|
|
config,
|
|
}
|
|
}
|
|
fn is_canceling(&self) -> bool {
|
|
self.next_pc_canceling.is_some() || self.unit_canceling.iter().any(|v| *v)
|
|
}
|
|
fn per_insn_timeline(&self) -> SimValue<ArrayType<SimOnlyString, PerInsnTimelineLen<C>>> {
|
|
let len = PerInsnTimelineLen[self.config];
|
|
let retval_ty = ArrayType[SimOnlyString][len];
|
|
assert!(len.is_power_of_two());
|
|
let mask = len - 1;
|
|
let empty_string = SimOnlyValue::new(String::new());
|
|
let mut retval =
|
|
SimValue::from_array_elements(retval_ty, (0..len).map(|_| empty_string.clone()));
|
|
for rob in self.rob.renamed() {
|
|
let masked_id = rob.mop.id.as_int() as usize & mask;
|
|
**retval[masked_id] = fmt::from_fn(|f| {
|
|
f.write_str(rob.mop_in_unit_state.debug_str())?;
|
|
if rob.finished.is_some() {
|
|
f.write_str("(finished)")?;
|
|
}
|
|
if rob.caused_cancel.is_some() {
|
|
f.write_str("(caused cancel)")?;
|
|
}
|
|
write!(
|
|
f,
|
|
": {}{:#x}{}: {:?}",
|
|
if *rob.mop.is_first_mop_in_insn {
|
|
""
|
|
} else {
|
|
".."
|
|
},
|
|
rob.mop.pc.as_int(),
|
|
if *rob.mop.is_last_mop_in_insn {
|
|
""
|
|
} else {
|
|
".."
|
|
},
|
|
rob.mop.mop,
|
|
)
|
|
})
|
|
.to_string();
|
|
// TODO
|
|
}
|
|
retval
|
|
}
|
|
#[hdl]
|
|
async fn write_for_debug(
|
|
&self,
|
|
sim: &mut ExternModuleSimulationState,
|
|
state_for_debug: Expr<RenameExecuteRetireDebugState<C>>,
|
|
) {
|
|
let Self {
|
|
ref rename_delayed,
|
|
ref rename_table,
|
|
ref retire_rename_table,
|
|
ref rob,
|
|
ref next_pc_canceling,
|
|
ref unit_canceling,
|
|
ref l1_reg_file,
|
|
l2_reg_file_unit_index: _,
|
|
config: _,
|
|
} = *self;
|
|
sim.write(
|
|
state_for_debug,
|
|
#[hdl(sim)]
|
|
RenameExecuteRetireDebugState::<_> {
|
|
rename_delayed: state_for_debug
|
|
.ty()
|
|
.rename_delayed
|
|
.from_iter_sim(zeroed(StaticType::TYPE), rename_delayed)
|
|
.expect("known to fit"),
|
|
rename_table: rename_table.to_debug_state(),
|
|
retire_rename_table: retire_rename_table.to_debug_state(),
|
|
rob: rob.debug_state(),
|
|
next_pc_canceling: NextPcCancelingState::debug_state(next_pc_canceling),
|
|
unit_canceling,
|
|
l1_reg_file: SimValue::from_array_elements(
|
|
state_for_debug.ty().l1_reg_file,
|
|
l1_reg_file.iter().map(|v| {
|
|
SimValue::from_array_elements(state_for_debug.ty().l1_reg_file.element(), v)
|
|
}),
|
|
),
|
|
per_insn_timeline: self.per_insn_timeline(),
|
|
},
|
|
)
|
|
.await;
|
|
}
|
|
#[hdl]
|
|
async fn write_to_next_pc_next_insns(
|
|
&self,
|
|
sim: &mut ExternModuleSimulationState,
|
|
next_insns: Expr<HdlOption<ArrayVec<MOpInstance<MOp>, CpuConfigRobSize<C>>>>,
|
|
) {
|
|
sim.write(
|
|
next_insns,
|
|
if self.is_canceling() {
|
|
#[hdl(sim)]
|
|
(next_insns.ty()).HdlNone()
|
|
} else {
|
|
#[hdl(sim)]
|
|
(next_insns.ty()).HdlSome(
|
|
next_insns
|
|
.ty()
|
|
.HdlSome
|
|
.from_iter_sim(
|
|
zeroed(MOpInstance[MOp]),
|
|
self.rename_delayed.iter().chain(self.rob.unrenamed()),
|
|
)
|
|
.ok()
|
|
.expect("known to fit"),
|
|
)
|
|
},
|
|
)
|
|
.await;
|
|
}
|
|
fn space_available_for_unit(&self, unit_index: usize) -> usize {
|
|
let mut retval = self.config.get().unit_max_in_flight(unit_index);
|
|
for renamed in self.rob.renamed() {
|
|
if renamed.unit_index() == unit_index {
|
|
let Some(v) = NonZero::new(retval.get() - 1) else {
|
|
return 0;
|
|
};
|
|
retval = v;
|
|
}
|
|
}
|
|
retval.get()
|
|
}
|
|
#[hdl]
|
|
fn find_free_unit_out_reg(&self, unit_index: usize) -> Option<usize> {
|
|
// TODO: replace searching through instructions and rename tables with tracking when regs are free
|
|
let mut allocated_regs = vec![false; 1 << self.config.get().out_reg_num_width];
|
|
for renamed in self.rob.renamed() {
|
|
if renamed.unit_index() == unit_index {
|
|
allocated_regs[renamed.unit_out_reg_index()] = true;
|
|
}
|
|
MOpTrait::for_each_src_reg_sim_ref(&renamed.mop.mop, &mut |src_reg, _index| {
|
|
#[hdl(sim)]
|
|
let PRegNum::<_> {
|
|
unit_num,
|
|
unit_out_reg,
|
|
} = src_reg.cast_bits_to(PRegNum[self.config]);
|
|
if Some(unit_index) == UnitNum::index_sim(&unit_num) {
|
|
allocated_regs[UnitOutRegNum::value_sim(&unit_out_reg)] = true;
|
|
}
|
|
});
|
|
}
|
|
for entry in self
|
|
.rename_table
|
|
.entries
|
|
.iter()
|
|
.chain(self.retire_rename_table.entries.iter())
|
|
{
|
|
#[hdl(sim)]
|
|
match entry {
|
|
RenameTableEntry::<_>::L1(entry) => {
|
|
if Some(unit_index) == UnitNum::index_sim(&entry.unit_num) {
|
|
allocated_regs[UnitOutRegNum::value_sim(&entry.unit_out_reg)] = true;
|
|
}
|
|
}
|
|
RenameTableEntry::<_>::L2(_) => {}
|
|
}
|
|
}
|
|
allocated_regs.iter().position(|v| !v)
|
|
}
|
|
#[hdl]
|
|
fn find_free_l2_reg(&self) -> Option<usize> {
|
|
// TODO: replace searching through instructions and rename tables with tracking when regs are free
|
|
let mut allocated_regs = vec![false; L2RegNum.l2_reg_count()];
|
|
for renamed in self.rob.renamed() {
|
|
#[hdl(sim)]
|
|
if let RenamedMOp::<_>::TransformedMove(l2_register_file_op) = &renamed.mop.mop {
|
|
let l2_reg = #[hdl(sim)]
|
|
match l2_register_file_op {
|
|
L2RegisterFileMOp::<_, _>::ReadL2Reg(v) => &v.common.imm,
|
|
L2RegisterFileMOp::<_, _>::WriteL2Reg(v) => &v.common.imm,
|
|
};
|
|
allocated_regs[L2RegNum::value_sim(l2_reg)] = true;
|
|
}
|
|
}
|
|
for entry in self
|
|
.rename_table
|
|
.entries
|
|
.iter()
|
|
.chain(self.retire_rename_table.entries.iter())
|
|
{
|
|
#[hdl(sim)]
|
|
match entry {
|
|
RenameTableEntry::<_>::L1(_) => {}
|
|
RenameTableEntry::<_>::L2(entry) => {
|
|
allocated_regs[L2RegNum::value_sim(entry)] = true;
|
|
}
|
|
}
|
|
}
|
|
allocated_regs.iter().position(|v| !v)
|
|
}
|
|
fn add_renamed_with_new_id(
|
|
&mut self,
|
|
unrenamed: &SimValue<MOpInstance<MOp>>,
|
|
renamed: RobEntry<C>,
|
|
) {
|
|
self.l1_reg_file[renamed.unit_index()][renamed.unit_out_reg_index()] = None;
|
|
self.rob.renamed_push_back_with_new_id(unrenamed, renamed);
|
|
}
|
|
fn update_rename_table(
|
|
&mut self,
|
|
unrenamed: &SimValue<MOpInstance<MOp>>,
|
|
update: RenameTableUpdate<C>,
|
|
) {
|
|
self.rename_table.update(&update, "rename_table");
|
|
self.rob
|
|
.unrenamed_back_append_rename_table_update(unrenamed, update);
|
|
}
|
|
#[hdl]
|
|
fn try_rename(
|
|
&mut self,
|
|
insn: SimValue<MOpInstance<MOp>>,
|
|
) -> Result<(), SimValue<MOpInstance<MOp>>> {
|
|
if self.rob.unrenamed_len() >= self.config.get().rob_size.get() {
|
|
return Err(insn);
|
|
}
|
|
if self.rob.renamed_len() >= self.config.get().rob_size.get() {
|
|
return Err(insn);
|
|
}
|
|
let unit_kind = UnitMOp::kind_sim(&insn.mop);
|
|
#[hdl(sim)]
|
|
if let MOp::TransformedMove(move_reg_mop) = &insn.mop {
|
|
let mut src_regs = [MOpRegNum::CONST_ZERO_REG_NUM; 1];
|
|
MOpTrait::for_each_src_reg_sim_ref(move_reg_mop, &mut |src_reg, index| {
|
|
src_regs[index] = src_reg.as_int() as u32;
|
|
});
|
|
let [src_reg] = src_regs;
|
|
let renamed_reg = self.rename_table.entries[src_reg as usize].clone();
|
|
println!("moving from {src_reg:#x} renamed: {renamed_reg:?}");
|
|
let unrenamed_dest_regs =
|
|
MOpDestReg::regs_sim(MOpTrait::dest_reg_sim_ref(move_reg_mop));
|
|
assert!(self.rob.incomplete_back_entry.is_none());
|
|
for unrenamed_reg_num in unrenamed_dest_regs {
|
|
self.update_rename_table(
|
|
&insn,
|
|
RenameTableUpdate::Write {
|
|
unrenamed_reg_num,
|
|
new: renamed_reg.clone(),
|
|
},
|
|
);
|
|
}
|
|
self.rob.finished_unrenamed_push_back(&insn);
|
|
return Ok(());
|
|
}
|
|
#[derive(Clone, Copy)]
|
|
struct ChosenUnit {
|
|
unit_index: usize,
|
|
out_reg_num: Option<usize>,
|
|
space_available: usize,
|
|
}
|
|
impl ChosenUnit {
|
|
fn is_better_than(self, other: Self) -> bool {
|
|
let Self {
|
|
unit_index: _,
|
|
out_reg_num,
|
|
space_available,
|
|
} = self;
|
|
if out_reg_num.is_some() != other.out_reg_num.is_some() {
|
|
out_reg_num.is_some()
|
|
} else {
|
|
space_available > other.space_available
|
|
}
|
|
}
|
|
}
|
|
let mut chosen_unit = None;
|
|
for (unit_index, unit_config) in self.config.get().units.iter().enumerate() {
|
|
if unit_config.kind != unit_kind {
|
|
continue;
|
|
}
|
|
let cur_unit = ChosenUnit {
|
|
unit_index,
|
|
out_reg_num: self.find_free_unit_out_reg(unit_index),
|
|
space_available: self.space_available_for_unit(unit_index),
|
|
};
|
|
let chosen_unit = chosen_unit.get_or_insert(cur_unit);
|
|
if cur_unit.is_better_than(*chosen_unit) {
|
|
*chosen_unit = cur_unit;
|
|
}
|
|
}
|
|
let Some(ChosenUnit {
|
|
unit_index,
|
|
out_reg_num,
|
|
space_available,
|
|
}) = chosen_unit
|
|
else {
|
|
panic!(
|
|
"there are no units of kind: {unit_kind:?}:\n{:?}",
|
|
self.config,
|
|
);
|
|
};
|
|
if space_available == 0 {
|
|
return Err(insn);
|
|
}
|
|
let Some(out_reg_num) = out_reg_num else {
|
|
return if self.space_available_for_unit(self.l2_reg_file_unit_index) > 0
|
|
&& let Some(l2_reg_index) = self.find_free_l2_reg()
|
|
{
|
|
todo!("maybe start a L2 register file store");
|
|
} else {
|
|
Err(insn)
|
|
};
|
|
};
|
|
let out_reg_num_sim = UnitOutRegNum[self.config].new_sim(out_reg_num);
|
|
#[hdl(sim)]
|
|
let MOpInstance::<_> {
|
|
fetch_block_id,
|
|
id: _,
|
|
pc,
|
|
predicted_next_pc,
|
|
size_in_bytes,
|
|
is_first_mop_in_insn,
|
|
is_last_mop_in_insn,
|
|
mop,
|
|
} = &insn;
|
|
let mut needed_load = None;
|
|
let unrenamed_dest_regs = MOpDestReg::regs_sim(MOpTrait::dest_reg_sim_ref(mop));
|
|
let renamed_dest_reg = #[hdl(sim)]
|
|
PRegNum::<_> {
|
|
unit_num: UnitNum[self.config].from_index_sim(unit_index),
|
|
unit_out_reg: out_reg_num_sim,
|
|
};
|
|
let mop = MOpTrait::map_regs_sim(
|
|
mop,
|
|
&renamed_dest_reg,
|
|
CpuConfigPRegNumWidth[self.config],
|
|
&mut |src_reg, index| {
|
|
let renamed = &self.rename_table.entries[src_reg.as_int() as usize];
|
|
println!("renaming src[{index}] from {src_reg:?} to {renamed:?}");
|
|
#[hdl(sim)]
|
|
match renamed {
|
|
RenameTableEntry::<_>::L1(v) => v.cast_to_bits(),
|
|
RenameTableEntry::<_>::L2(v) => {
|
|
needed_load.get_or_insert_with(|| v.clone());
|
|
PRegNum[self.config]
|
|
.const_zero()
|
|
.cast_to_bits()
|
|
.into_sim_value()
|
|
}
|
|
}
|
|
},
|
|
);
|
|
if let Some(needed_load) = needed_load {
|
|
return if let Some(out_reg) = self.find_free_unit_out_reg(self.l2_reg_file_unit_index)
|
|
&& self.space_available_for_unit(self.l2_reg_file_unit_index) > 0
|
|
{
|
|
let dest = #[hdl(sim)]
|
|
PRegNum::<_> {
|
|
unit_num: UnitNum[self.config].from_index_sim(self.l2_reg_file_unit_index),
|
|
unit_out_reg: UnitOutRegNum[self.config].new_sim(out_reg),
|
|
};
|
|
self.update_rename_table(
|
|
&insn,
|
|
RenameTableUpdate::UpdateForReadL2Reg {
|
|
dest: dest.clone(),
|
|
src: needed_load.clone(),
|
|
},
|
|
);
|
|
self.add_renamed_with_new_id(
|
|
&insn,
|
|
RobEntry::new(
|
|
#[hdl(sim)]
|
|
MOpInstance::<_> {
|
|
fetch_block_id,
|
|
id: MOpId.zero(), // filled in by add_renamed_with_new_id
|
|
pc,
|
|
predicted_next_pc,
|
|
size_in_bytes,
|
|
is_first_mop_in_insn,
|
|
is_last_mop_in_insn,
|
|
mop: ReadL2RegMOp::read_l2_reg::<RenamedMOp<C>>(
|
|
dest,
|
|
repeat(RenamedSrcRegUInt[self.config].zero(), ConstUsize),
|
|
needed_load,
|
|
),
|
|
},
|
|
),
|
|
);
|
|
Ok(())
|
|
} else {
|
|
Err(insn)
|
|
};
|
|
}
|
|
let mop = UnitMOp::with_transformed_move_op_sim(
|
|
mop,
|
|
RenamedMOp[self.config].TransformedMove,
|
|
|_move_reg| unreachable!(),
|
|
);
|
|
let renamed_dest_reg = #[hdl(sim)]
|
|
(RenameTableEntry[self.config]).L1(renamed_dest_reg);
|
|
for unrenamed_reg_num in unrenamed_dest_regs {
|
|
self.update_rename_table(
|
|
&insn,
|
|
RenameTableUpdate::Write {
|
|
unrenamed_reg_num,
|
|
new: renamed_dest_reg.clone(),
|
|
},
|
|
);
|
|
}
|
|
self.add_renamed_with_new_id(
|
|
&insn,
|
|
RobEntry::new(
|
|
#[hdl(sim)]
|
|
MOpInstance::<_> {
|
|
fetch_block_id,
|
|
id: MOpId.zero(), // filled in by add_renamed_with_new_id
|
|
pc,
|
|
predicted_next_pc,
|
|
size_in_bytes,
|
|
is_first_mop_in_insn,
|
|
is_last_mop_in_insn,
|
|
mop,
|
|
},
|
|
),
|
|
);
|
|
self.rob.finished_unrenamed_push_back(&insn);
|
|
Ok(())
|
|
}
|
|
#[hdl]
|
|
fn get_unit_enqueue(&self, unit_index: usize) -> SimValue<HdlOption<UnitEnqueue<C>>> {
|
|
let ret_ty = HdlOption[UnitEnqueue[self.config]];
|
|
if self.is_canceling() {
|
|
let retval = #[hdl(sim)]
|
|
ret_ty.HdlNone();
|
|
return retval; // separate variable to work around rust-analyzer parse error
|
|
}
|
|
for rob in self.rob.renamed() {
|
|
if rob.unit_index() == unit_index
|
|
&& let Some(_) = rob.mop_in_unit_state.after_enqueue()
|
|
{
|
|
let retval = #[hdl(sim)]
|
|
ret_ty.HdlSome(
|
|
#[hdl(sim)]
|
|
UnitEnqueue::<_> {
|
|
mop: &rob.mop,
|
|
config: self.config,
|
|
},
|
|
);
|
|
return retval;
|
|
}
|
|
}
|
|
#[hdl(sim)]
|
|
ret_ty.HdlNone()
|
|
}
|
|
#[hdl]
|
|
fn get_unit_inputs_ready(&self, unit_index: usize) -> SimValue<HdlOption<UnitInputsReady<C>>> {
|
|
let ret_ty = HdlOption[UnitInputsReady[self.config]];
|
|
if self.is_canceling() {
|
|
let retval = #[hdl(sim)]
|
|
ret_ty.HdlNone();
|
|
return retval; // separate variable to work around rust-analyzer parse error
|
|
}
|
|
let zero_reg = PRegNum[self.config].const_zero().into_sim_value();
|
|
let zero_value = zeroed(PRegValue);
|
|
for rob in self.rob.renamed() {
|
|
if rob.unit_index() == unit_index
|
|
&& let Some(_) = rob.mop_in_unit_state.with_inputs_ready()
|
|
{
|
|
let mut src_values: [_; COMMON_MOP_SRC_LEN] =
|
|
std::array::from_fn(|_| Some(zero_value.clone()));
|
|
MOpTrait::for_each_src_reg_sim_ref(&rob.mop.mop, &mut |src_reg, index| {
|
|
let src_reg = src_reg.cast_bits_to(zero_reg.ty());
|
|
#[hdl(sim)]
|
|
let PRegNum::<_> {
|
|
unit_num,
|
|
unit_out_reg,
|
|
} = &src_reg;
|
|
if let Some(src_unit_index) = UnitNum::index_sim(unit_num) {
|
|
src_values[index] = self.l1_reg_file[src_unit_index]
|
|
[UnitOutRegNum::value_sim(unit_out_reg)]
|
|
.clone();
|
|
} else {
|
|
assert_eq!(src_reg, zero_reg);
|
|
src_values[index] = Some(zeroed(PRegValue));
|
|
}
|
|
});
|
|
if src_values.iter().all(|v| v.is_some()) {
|
|
let src_values: [SimValue<_>; 3] = src_values.map(Option::unwrap);
|
|
let retval = #[hdl(sim)]
|
|
ret_ty.HdlSome(
|
|
#[hdl(sim)]
|
|
UnitInputsReady::<_> {
|
|
mop: &rob.mop,
|
|
src_values,
|
|
config: self.config,
|
|
},
|
|
);
|
|
return retval;
|
|
}
|
|
}
|
|
}
|
|
#[hdl(sim)]
|
|
ret_ty.HdlNone()
|
|
}
|
|
#[hdl]
|
|
fn get_unit_mop_is_no_longer_speculative(
|
|
&self,
|
|
unit_index: usize,
|
|
) -> SimValue<HdlOption<UnitMOpIsNoLongerSpeculative<C>>> {
|
|
let ret_ty = HdlOption[UnitMOpIsNoLongerSpeculative[self.config]];
|
|
if self.is_canceling() {
|
|
let retval = #[hdl(sim)]
|
|
ret_ty.HdlNone();
|
|
return retval; // separate variable to work around rust-analyzer parse error
|
|
}
|
|
for rob in self.rob.renamed() {
|
|
if rob.unit_index() == unit_index
|
|
&& !rob.is_speculative
|
|
&& let Some(_) = rob.mop_in_unit_state.without_speculative()
|
|
{
|
|
let retval = #[hdl(sim)]
|
|
ret_ty.HdlSome(
|
|
#[hdl(sim)]
|
|
UnitMOpIsNoLongerSpeculative::<_> {
|
|
id: &rob.mop.id,
|
|
config: self.config,
|
|
},
|
|
);
|
|
return retval; // separate variable to work around rust-analyzer parse error
|
|
}
|
|
}
|
|
#[hdl(sim)]
|
|
ret_ty.HdlNone()
|
|
}
|
|
#[hdl]
|
|
fn unit_output_ready(&mut self, output_ready: SimValue<UnitOutputReady<C>>) {
|
|
#[hdl(sim)]
|
|
let UnitOutputReady::<_> {
|
|
id,
|
|
dest_value,
|
|
predictor_op,
|
|
} = output_ready;
|
|
assert!(!self.is_canceling());
|
|
let rob = self.rob.renamed_by_id_mut(&id);
|
|
let unit_index = rob.unit_index();
|
|
let out_reg_index = rob.unit_out_reg_index();
|
|
let RobEntry {
|
|
mop: _,
|
|
mop_in_unit_state,
|
|
is_speculative: _,
|
|
finished,
|
|
caused_cancel,
|
|
} = rob;
|
|
assert!(finished.is_none());
|
|
assert!(caused_cancel.is_none());
|
|
let l1_reg = &mut self.l1_reg_file[unit_index][out_reg_index];
|
|
assert!(l1_reg.is_none());
|
|
*l1_reg = Some(dest_value);
|
|
*finished = Some(predictor_op);
|
|
*mop_in_unit_state = mop_in_unit_state
|
|
.after_output_ready()
|
|
.expect("should be valid state for output to become ready");
|
|
}
|
|
#[hdl]
|
|
fn unit_finish_cause_cancel(
|
|
&mut self,
|
|
finish_cause_cancel: SimValue<UnitFinishCauseCancel<C>>,
|
|
) {
|
|
#[hdl(sim)]
|
|
let UnitFinishCauseCancel::<_> {
|
|
id,
|
|
caused_cancel: unit_caused_cancel,
|
|
config: _,
|
|
} = finish_cause_cancel;
|
|
assert!(!self.is_canceling());
|
|
let RobEntry {
|
|
mop: _,
|
|
mop_in_unit_state,
|
|
is_speculative: _,
|
|
finished,
|
|
caused_cancel,
|
|
} = self.rob.renamed_by_id_mut(&id);
|
|
assert!(caused_cancel.is_none());
|
|
let cancel_after_retire;
|
|
#[hdl(sim)]
|
|
if let HdlSome(unit_caused_cancel) = unit_caused_cancel {
|
|
cancel_after_retire = *unit_caused_cancel.cancel_after_retire;
|
|
*caused_cancel = Some(unit_caused_cancel);
|
|
} else {
|
|
cancel_after_retire = false;
|
|
}
|
|
if let Some(v) = mop_in_unit_state
|
|
.after_finish_cause_cancel(cancel_after_retire, caused_cancel.is_some())
|
|
{
|
|
*mop_in_unit_state = v
|
|
} else {
|
|
panic!(
|
|
"MOp {id:?} made an invalid attempt to finish/cause a cancel:\n\
|
|
mop_in_unit_state={mop_in_unit_state:?}\n\
|
|
finished={finished:?}\n\
|
|
caused_cancel={caused_cancel:?}"
|
|
);
|
|
}
|
|
}
|
|
fn get_from_post_decode_ready(&self) -> usize {
|
|
if self.is_canceling() {
|
|
0
|
|
} else {
|
|
TwiceCpuConfigFetchWidth[self.config]
|
|
.saturating_sub(self.rename_delayed.len())
|
|
.min(CpuConfigFetchWidth[self.config])
|
|
.min(
|
|
CpuConfigRobSize[self.config]
|
|
.saturating_sub(self.rename_delayed.len())
|
|
.saturating_sub(self.rob.unrenamed_len()),
|
|
)
|
|
}
|
|
}
|
|
fn handle_from_post_decode(&mut self, insns: &[SimValue<MOpInstance<MOp>>]) {
|
|
if insns.is_empty() {
|
|
return;
|
|
}
|
|
assert!(!self.is_canceling());
|
|
for insn in insns {
|
|
self.rename_delayed.push_back(insn.clone());
|
|
}
|
|
for _ in 0..CpuConfigFetchWidth[self.config] {
|
|
let Some(insn) = self.rename_delayed.pop_front() else {
|
|
break;
|
|
};
|
|
match self.try_rename(insn) {
|
|
Ok(()) => {}
|
|
Err(insn) => {
|
|
self.rename_delayed.push_front(insn);
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
#[hdl]
|
|
fn finish_receive_cancel_from_post_decode(&mut self) {
|
|
let Self {
|
|
rename_delayed,
|
|
rename_table,
|
|
retire_rename_table,
|
|
rob,
|
|
next_pc_canceling,
|
|
unit_canceling: _,
|
|
l1_reg_file: _,
|
|
l2_reg_file_unit_index: _,
|
|
config: _,
|
|
} = self;
|
|
assert_eq!(
|
|
*next_pc_canceling,
|
|
Some(NextPcCancelingState::NeedReceiveCancel)
|
|
);
|
|
rename_delayed.clear();
|
|
rename_table.clone_from(retire_rename_table);
|
|
rob.clear();
|
|
*next_pc_canceling = None;
|
|
}
|
|
#[hdl]
|
|
fn finish_send_cancel_to_next_pc(&mut self) {
|
|
assert!(matches!(
|
|
self.next_pc_canceling,
|
|
Some(NextPcCancelingState::NeedSendCancel(_))
|
|
));
|
|
self.next_pc_canceling = Some(NextPcCancelingState::NeedReceiveCancel);
|
|
}
|
|
#[hdl]
|
|
fn peek_retiring_insns(&self) -> Vec<SimValue<NextPcPredictorOp<C>>> {
|
|
let mut retval = Vec::new();
|
|
for retire_group in self.rob.retire_groups() {
|
|
for renamed_entry in retire_group.clone().flat_map(|v| &v.renamed_entries) {
|
|
if let RobEntry {
|
|
mop: _,
|
|
mop_in_unit_state: MOpInUnitState::FinishedAndOrCausedCancel,
|
|
is_speculative: _,
|
|
finished,
|
|
caused_cancel,
|
|
} = renamed_entry
|
|
{
|
|
if caused_cancel.is_some() {
|
|
// only the part before the cancel needs to be ready
|
|
break;
|
|
}
|
|
assert!(finished.is_some());
|
|
} else {
|
|
// group isn't ready
|
|
return retval;
|
|
}
|
|
}
|
|
for RobEntries {
|
|
unrenamed: _,
|
|
rename_table_updates: _,
|
|
renamed_entries,
|
|
} in retire_group
|
|
{
|
|
let caused_cancel = renamed_entries.iter().any(|v| v.caused_cancel.is_some());
|
|
let caused_cancel_after_retire =
|
|
renamed_entries
|
|
.iter()
|
|
.rev()
|
|
.enumerate()
|
|
.all(|(rev_index, v)| {
|
|
if rev_index == 0 {
|
|
v.caused_cancel
|
|
.as_ref()
|
|
.is_some_and(|v| *v.cancel_after_retire)
|
|
} else {
|
|
v.caused_cancel.is_none()
|
|
}
|
|
});
|
|
if !caused_cancel || caused_cancel_after_retire {
|
|
let mut unrenamed_op = #[hdl(sim)]
|
|
NextPcPredictorOp::<_> {
|
|
call_stack_op: #[hdl(sim)]
|
|
CallStackOp.None(),
|
|
cond_br_taken: #[hdl(sim)]
|
|
HdlNone(),
|
|
config: self.config,
|
|
};
|
|
for renamed in renamed_entries {
|
|
let Some(finished) = &renamed.finished else {
|
|
unreachable!();
|
|
};
|
|
#[hdl(sim)]
|
|
let NextPcPredictorOp::<_> {
|
|
call_stack_op,
|
|
cond_br_taken,
|
|
config: _,
|
|
} = finished;
|
|
#[hdl(sim)]
|
|
if let CallStackOp::None = &unrenamed_op.call_stack_op {
|
|
unrenamed_op.call_stack_op = call_stack_op.clone();
|
|
}
|
|
#[hdl(sim)]
|
|
if let HdlNone = &unrenamed_op.cond_br_taken {
|
|
unrenamed_op.cond_br_taken = cond_br_taken.clone();
|
|
}
|
|
}
|
|
retval.push(unrenamed_op);
|
|
if retval.len() >= self.config.get().fetch_width.get() {
|
|
return retval;
|
|
}
|
|
}
|
|
if caused_cancel {
|
|
return retval;
|
|
}
|
|
}
|
|
}
|
|
retval
|
|
}
|
|
#[hdl]
|
|
fn retire_peek(&self) -> SimValue<HdlOption<RetireToNextPcInterfaceInner<C>>> {
|
|
let ty = RetireToNextPcInterfaceInner[self.config];
|
|
let ret_ty = HdlOption[ty];
|
|
let next_pc_predictor_op = NextPcPredictorOp[self.config];
|
|
if let Some(NextPcCancelingState::NeedSendCancel(v)) = &self.next_pc_canceling {
|
|
#[hdl(sim)]
|
|
ret_ty.HdlSome(
|
|
#[hdl(sim)]
|
|
ty.CancelAndStartAt(v),
|
|
)
|
|
} else if self.is_canceling() {
|
|
#[hdl(sim)]
|
|
ret_ty.HdlNone()
|
|
} else {
|
|
let retiring_insns = self.peek_retiring_insns();
|
|
if retiring_insns.is_empty() {
|
|
#[hdl(sim)]
|
|
ret_ty.HdlNone()
|
|
} else {
|
|
#[hdl(sim)]
|
|
ret_ty.HdlSome(
|
|
#[hdl(sim)]
|
|
ty.RetiredInstructions(
|
|
ty.RetiredInstructions
|
|
.from_iter_sim(zeroed(next_pc_predictor_op), retiring_insns)
|
|
.expect("known to fit"),
|
|
),
|
|
)
|
|
}
|
|
}
|
|
}
|
|
#[hdl]
|
|
fn retire_one(&mut self, retire: &SimValue<NextPcPredictorOp<C>>) {
|
|
assert!(!self.is_canceling());
|
|
#[hdl(sim)]
|
|
let NextPcPredictorOp::<_> {
|
|
call_stack_op: _,
|
|
cond_br_taken: _,
|
|
config: _,
|
|
} = retire;
|
|
let Some(RobEntries {
|
|
unrenamed: _,
|
|
rename_table_updates,
|
|
renamed_entries,
|
|
}) = self.rob.entries.pop_front()
|
|
else {
|
|
unreachable!();
|
|
};
|
|
rename_table_updates
|
|
.iter()
|
|
.for_each(|v| self.retire_rename_table.update(v, "retire_rename_table"));
|
|
for RobEntry {
|
|
mop: _,
|
|
mop_in_unit_state,
|
|
is_speculative: _,
|
|
finished: _,
|
|
caused_cancel,
|
|
} in renamed_entries
|
|
{
|
|
assert_eq!(mop_in_unit_state, MOpInUnitState::FinishedAndOrCausedCancel);
|
|
if let Some(caused_cancel) = caused_cancel {
|
|
assert!(*caused_cancel.cancel_after_retire);
|
|
self.start_cancel(caused_cancel);
|
|
return;
|
|
}
|
|
}
|
|
}
|
|
#[hdl]
|
|
fn start_cancel(&mut self, caused_cancel: SimValue<UnitCausedCancel<C>>) {
|
|
assert!(!self.is_canceling());
|
|
#[hdl(sim)]
|
|
let UnitCausedCancel::<_> {
|
|
start_at_pc,
|
|
cancel_after_retire: _,
|
|
config: _,
|
|
} = caused_cancel;
|
|
self.next_pc_canceling = Some(NextPcCancelingState::NeedSendCancel(start_at_pc.as_int()));
|
|
self.unit_canceling.fill(true);
|
|
}
|
|
#[hdl]
|
|
fn step(&mut self) {
|
|
if self.is_canceling() {
|
|
return;
|
|
}
|
|
for renamed in self.rob.renamed_mut() {
|
|
let can_cause_cancel = match renamed.mop_in_unit_state {
|
|
MOpInUnitState::NotYetEnqueued => true,
|
|
MOpInUnitState::InputsNotReadySpeculative { can_cause_cancel } => can_cause_cancel,
|
|
MOpInUnitState::InputsReady {
|
|
speculative: _,
|
|
can_cause_cancel,
|
|
} => can_cause_cancel,
|
|
MOpInUnitState::OutputReady {
|
|
speculative: _,
|
|
can_cause_cancel,
|
|
} => can_cause_cancel,
|
|
MOpInUnitState::FinishedAndOrCausedCancel => renamed.caused_cancel.is_some(),
|
|
};
|
|
renamed.is_speculative = false;
|
|
if can_cause_cancel {
|
|
break;
|
|
}
|
|
}
|
|
let first_renamed = self.rob.renamed().next();
|
|
if let Some(RobEntry {
|
|
mop: _,
|
|
mop_in_unit_state: MOpInUnitState::FinishedAndOrCausedCancel,
|
|
is_speculative: _,
|
|
finished: _,
|
|
caused_cancel: Some(caused_cancel),
|
|
}) = first_renamed
|
|
&& !*caused_cancel.cancel_after_retire
|
|
{
|
|
let caused_cancel = caused_cancel.clone();
|
|
self.start_cancel(caused_cancel);
|
|
return;
|
|
}
|
|
}
|
|
}
|
|
|
|
#[hdl]
|
|
async fn rename_execute_retire_run(
|
|
mut sim: ExternModuleSimulationState,
|
|
cd: Expr<ClockDomain>,
|
|
from_post_decode: Expr<PostDecodeOutputInterface<PhantomConst<CpuConfig>>>,
|
|
to_next_pc: Expr<RetireToNextPcInterface<PhantomConst<CpuConfig>>>,
|
|
to_units: Expr<ExecuteToUnitInterfaces<PhantomConst<CpuConfig>>>,
|
|
state_for_debug: Expr<RenameExecuteRetireDebugState<PhantomConst<CpuConfig>>>,
|
|
config: PhantomConst<CpuConfig>,
|
|
) {
|
|
let mut state = RenameExecuteRetireState::new(config);
|
|
loop {
|
|
state
|
|
.write_to_next_pc_next_insns(&mut sim, to_next_pc.next_insns)
|
|
.await;
|
|
state.write_for_debug(&mut sim, state_for_debug).await;
|
|
let from_post_decode_ready = state.get_from_post_decode_ready();
|
|
assert!(from_post_decode_ready <= from_post_decode.ty().ready.end());
|
|
sim.write(from_post_decode.ready, from_post_decode_ready)
|
|
.await;
|
|
sim.write(
|
|
from_post_decode.cancel.ready,
|
|
state.next_pc_canceling == Some(NextPcCancelingState::NeedReceiveCancel),
|
|
)
|
|
.await;
|
|
let retire_peek = state.retire_peek();
|
|
sim.write(to_next_pc.inner.data, &retire_peek).await;
|
|
let is_canceling = state.is_canceling();
|
|
for (unit_index, to_unit) in ExecuteToUnitInterfaces::unit_fields(to_units)
|
|
.into_iter()
|
|
.enumerate()
|
|
{
|
|
#[hdl]
|
|
let ExecuteToUnitInterface::<_> {
|
|
enqueue,
|
|
inputs_ready,
|
|
is_no_longer_speculative,
|
|
cant_cause_cancel: _,
|
|
output_ready: _,
|
|
finish_cause_cancel: _,
|
|
unit_outputs_ready,
|
|
cancel_all,
|
|
config: _,
|
|
} = to_unit;
|
|
sim.write(enqueue.data, state.get_unit_enqueue(unit_index))
|
|
.await;
|
|
sim.write(inputs_ready, state.get_unit_inputs_ready(unit_index))
|
|
.await;
|
|
sim.write(
|
|
cancel_all.data,
|
|
if state.unit_canceling[unit_index] {
|
|
#[hdl(sim)]
|
|
HdlSome(())
|
|
} else {
|
|
#[hdl(sim)]
|
|
HdlNone()
|
|
},
|
|
)
|
|
.await;
|
|
sim.write(
|
|
is_no_longer_speculative,
|
|
state.get_unit_mop_is_no_longer_speculative(unit_index),
|
|
)
|
|
.await;
|
|
sim.write(unit_outputs_ready, !is_canceling).await;
|
|
}
|
|
sim.wait_for_clock_edge(cd.clk).await;
|
|
let from_post_decode_insns = sim.read_past(from_post_decode.insns, cd.clk).await;
|
|
let from_post_decode_insns = ArrayVec::elements_sim_ref(&from_post_decode_insns);
|
|
state.handle_from_post_decode(
|
|
from_post_decode_insns
|
|
.get(..from_post_decode_ready)
|
|
.unwrap_or(from_post_decode_insns),
|
|
);
|
|
for (unit_index, to_unit) in ExecuteToUnitInterfaces::unit_fields(to_units)
|
|
.into_iter()
|
|
.enumerate()
|
|
{
|
|
#[hdl]
|
|
let ExecuteToUnitInterface::<_> {
|
|
enqueue,
|
|
inputs_ready,
|
|
is_no_longer_speculative,
|
|
cant_cause_cancel,
|
|
output_ready,
|
|
finish_cause_cancel,
|
|
unit_outputs_ready,
|
|
cancel_all,
|
|
config: _,
|
|
} = to_unit;
|
|
if sim.read_past_bool(enqueue.ready, cd.clk).await {
|
|
#[hdl(sim)]
|
|
if let HdlSome(enqueue) = sim.read_past(enqueue.data, cd.clk).await {
|
|
assert!(!state.is_canceling());
|
|
let RobEntry {
|
|
mop: _,
|
|
mop_in_unit_state,
|
|
is_speculative: _,
|
|
finished,
|
|
caused_cancel,
|
|
} = state.rob.renamed_by_id_mut(&enqueue.mop.id);
|
|
assert!(finished.is_none());
|
|
assert!(caused_cancel.is_none());
|
|
*mop_in_unit_state = mop_in_unit_state
|
|
.after_enqueue()
|
|
.expect("UnitEnqueue is known to be valid");
|
|
}
|
|
}
|
|
#[hdl(sim)]
|
|
if let HdlSome(inputs_ready) = sim.read_past(inputs_ready, cd.clk).await {
|
|
assert!(!state.is_canceling());
|
|
let RobEntry {
|
|
mop: _,
|
|
mop_in_unit_state,
|
|
is_speculative: _,
|
|
finished,
|
|
caused_cancel,
|
|
} = state.rob.renamed_by_id_mut(&inputs_ready.mop.id);
|
|
assert!(finished.is_none());
|
|
assert!(caused_cancel.is_none());
|
|
*mop_in_unit_state = mop_in_unit_state
|
|
.with_inputs_ready()
|
|
.expect("UnitInputsReady is known to be valid");
|
|
}
|
|
#[hdl(sim)]
|
|
if let HdlSome(is_no_longer_speculative) =
|
|
sim.read_past(is_no_longer_speculative, cd.clk).await
|
|
{
|
|
assert!(!state.is_canceling());
|
|
let RobEntry {
|
|
mop: _,
|
|
mop_in_unit_state,
|
|
is_speculative: _,
|
|
finished,
|
|
caused_cancel,
|
|
} = state.rob.renamed_by_id_mut(&is_no_longer_speculative.id);
|
|
assert!(finished.is_none());
|
|
assert!(caused_cancel.is_none());
|
|
*mop_in_unit_state = mop_in_unit_state
|
|
.without_speculative()
|
|
.expect("UnitMOpIsNoLongerSpeculative is known to be valid");
|
|
}
|
|
if sim.read_past_bool(unit_outputs_ready, cd.clk).await {
|
|
#[hdl(sim)]
|
|
if let HdlSome(cant_cause_cancel) = sim.read_past(cant_cause_cancel, cd.clk).await {
|
|
#[hdl(sim)]
|
|
let UnitMOpCantCauseCancel::<_> { id, config: _ } = cant_cause_cancel;
|
|
assert!(!state.is_canceling());
|
|
let RobEntry {
|
|
mop: _,
|
|
mop_in_unit_state,
|
|
is_speculative: _,
|
|
finished,
|
|
caused_cancel,
|
|
} = state.rob.renamed_by_id_mut(&id);
|
|
assert!(finished.is_none());
|
|
assert!(caused_cancel.is_none());
|
|
*mop_in_unit_state = mop_in_unit_state
|
|
.with_cant_cause_cancel()
|
|
.expect("UnitMOpCantCauseCancel should be valid");
|
|
}
|
|
#[hdl(sim)]
|
|
if let HdlSome(output_ready) = sim.read_past(output_ready, cd.clk).await {
|
|
state.unit_output_ready(output_ready);
|
|
}
|
|
#[hdl(sim)]
|
|
if let HdlSome(finish_cause_cancel) =
|
|
sim.read_past(finish_cause_cancel, cd.clk).await
|
|
{
|
|
state.unit_finish_cause_cancel(finish_cause_cancel);
|
|
}
|
|
}
|
|
if sim.read_past_bool(cancel_all.ready, cd.clk).await {
|
|
#[hdl(sim)]
|
|
if let HdlSome(v) = sim.read_past(cancel_all.data, cd.clk).await {
|
|
let () = *v;
|
|
assert!(state.unit_canceling[unit_index]);
|
|
state.unit_canceling[unit_index] = false;
|
|
}
|
|
}
|
|
}
|
|
match &mut state.next_pc_canceling {
|
|
Some(NextPcCancelingState::NeedReceiveCancel) => {
|
|
#[hdl(sim)]
|
|
if let HdlSome(_) = sim.read_past(from_post_decode.cancel.data, cd.clk).await {
|
|
state.finish_receive_cancel_from_post_decode();
|
|
}
|
|
}
|
|
Some(NextPcCancelingState::NeedSendCancel(_)) => {
|
|
if sim.read_past_bool(to_next_pc.inner.ready, cd.clk).await {
|
|
state.finish_send_cancel_to_next_pc();
|
|
}
|
|
}
|
|
None => {
|
|
if sim.read_past_bool(to_next_pc.inner.ready, cd.clk).await {
|
|
#[hdl(sim)]
|
|
if let HdlSome(v) = retire_peek {
|
|
let ops =
|
|
#[hdl(sim)]
|
|
if let RetireToNextPcInterfaceInner::<_>::RetiredInstructions(ops) = v {
|
|
ops
|
|
} else {
|
|
unreachable!()
|
|
};
|
|
for op in ArrayVec::elements_sim_ref(&ops) {
|
|
state.retire_one(op);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
state.step();
|
|
}
|
|
}
|
|
|
|
#[hdl_module(extern)]
|
|
pub fn rename_execute_retire(config: PhantomConst<CpuConfig>) {
|
|
#[hdl]
|
|
let cd: ClockDomain = m.input();
|
|
#[hdl]
|
|
let from_post_decode: PostDecodeOutputInterface<PhantomConst<CpuConfig>> =
|
|
m.input(PostDecodeOutputInterface[config]);
|
|
#[hdl]
|
|
let to_next_pc: RetireToNextPcInterface<PhantomConst<CpuConfig>> =
|
|
m.output(RetireToNextPcInterface[config]);
|
|
#[hdl]
|
|
let to_units: ExecuteToUnitInterfaces<PhantomConst<CpuConfig>> =
|
|
m.output(ExecuteToUnitInterfaces[config]);
|
|
#[hdl]
|
|
let state_for_debug: RenameExecuteRetireDebugState<PhantomConst<CpuConfig>> =
|
|
m.output(RenameExecuteRetireDebugState[config]);
|
|
m.register_clock_for_past(cd.clk);
|
|
m.extern_module_simulation_fn(
|
|
(
|
|
cd,
|
|
from_post_decode,
|
|
to_next_pc,
|
|
to_units,
|
|
state_for_debug,
|
|
config,
|
|
),
|
|
async |args, mut sim| {
|
|
let (cd, from_post_decode, to_next_pc, to_units, state_for_debug, config) = args;
|
|
sim.write(state_for_debug, state_for_debug.ty().sim_value_default())
|
|
.await;
|
|
sim.resettable(
|
|
cd,
|
|
async |mut sim: ExternModuleSimulationState| {
|
|
sim.write(from_post_decode.ready, 0usize).await;
|
|
sim.write(from_post_decode.cancel.ready, false).await;
|
|
sim.write(to_next_pc.inner.data, to_next_pc.ty().inner.data.HdlNone())
|
|
.await;
|
|
sim.write(to_next_pc.next_insns, to_next_pc.ty().next_insns.HdlNone())
|
|
.await;
|
|
for to_unit in ExecuteToUnitInterfaces::unit_fields(to_units) {
|
|
#[hdl]
|
|
let ExecuteToUnitInterface::<_> {
|
|
enqueue,
|
|
inputs_ready,
|
|
is_no_longer_speculative,
|
|
cant_cause_cancel: _,
|
|
output_ready: _,
|
|
finish_cause_cancel: _,
|
|
unit_outputs_ready,
|
|
cancel_all,
|
|
config: _,
|
|
} = to_unit;
|
|
sim.write(
|
|
enqueue.data,
|
|
#[hdl(sim)]
|
|
(enqueue.ty().data).HdlNone(),
|
|
)
|
|
.await;
|
|
sim.write(
|
|
inputs_ready,
|
|
#[hdl(sim)]
|
|
(inputs_ready.ty()).HdlNone(),
|
|
)
|
|
.await;
|
|
sim.write(
|
|
cancel_all.data,
|
|
#[hdl(sim)]
|
|
HdlNone(),
|
|
)
|
|
.await;
|
|
sim.write(
|
|
is_no_longer_speculative,
|
|
#[hdl(sim)]
|
|
(is_no_longer_speculative.ty()).HdlNone(),
|
|
)
|
|
.await;
|
|
sim.write(unit_outputs_ready, false).await;
|
|
}
|
|
},
|
|
|sim, ()| {
|
|
rename_execute_retire_run(
|
|
sim,
|
|
cd,
|
|
from_post_decode,
|
|
to_next_pc,
|
|
to_units,
|
|
state_for_debug,
|
|
config,
|
|
)
|
|
},
|
|
)
|
|
.await;
|
|
},
|
|
);
|
|
}
|