From e7e831cf00c17a6a840c5aab5c54b1d62c5a4ee1 Mon Sep 17 00:00:00 2001 From: Jacob Lifshay Date: Tue, 26 Aug 2025 19:17:21 -0700 Subject: [PATCH 1/3] split out simulator compiler into a separate module --- crates/fayalite/src/sim.rs | 5075 +------------------------- crates/fayalite/src/sim/compiler.rs | 5087 +++++++++++++++++++++++++++ 2 files changed, 5102 insertions(+), 5060 deletions(-) create mode 100644 crates/fayalite/src/sim/compiler.rs diff --git a/crates/fayalite/src/sim.rs b/crates/fayalite/src/sim.rs index d0daf34..596e323 100644 --- a/crates/fayalite/src/sim.rs +++ b/crates/fayalite/src/sim.rs @@ -5,64 +5,41 @@ use crate::{ bundle::{BundleField, BundleType}, - enum_::{EnumType, EnumVariant}, expr::{ - ExprEnum, Flow, ToLiteralBits, ops, + Flow, ToLiteralBits, target::{ - GetTarget, Target, TargetBase, TargetPathArrayElement, TargetPathBundleField, - TargetPathElement, + GetTarget, Target, TargetPathArrayElement, TargetPathBundleField, TargetPathElement, }, }, int::{BoolOrIntType, UIntValue}, - intern::{ - Intern, Interned, InternedCompare, Memoize, PtrEqWithTypeId, SupportsPtrEqWithTypeId, - }, - memory::PortKind, - module::{ - AnnotatedModuleIO, Block, ExternModuleBody, Id, InstantiatedModule, ModuleBody, NameId, - NormalModuleBody, ScopedNameId, Stmt, StmtConnect, StmtDeclaration, StmtFormal, StmtIf, - StmtInstance, StmtMatch, StmtReg, StmtWire, TargetInInstantiatedModule, - transform::deduce_resets::deduce_resets, - }, + intern::{Intern, Interned, InternedCompare, PtrEqWithTypeId, SupportsPtrEqWithTypeId}, prelude::*, - reset::{ResetType, ResetTypeDispatch}, + reset::ResetType, sim::{ + compiler::{ + CompiledBundleField, CompiledExternModule, CompiledTypeLayoutBody, CompiledValue, + }, interpreter::{ - BreakAction, BreakpointsSet, Insn, InsnField, InsnFieldKind, InsnFieldType, - InsnOrLabel, Insns, InsnsBuilding, InsnsBuildingDone, InsnsBuildingKind, Label, - MemoryData, RunResult, SlotDebugData, SmallUInt, State, StatePartArrayIndex, - StatePartArrayIndexed, StatePartIndex, StatePartIndexRange, StatePartKind, - StatePartKindBigSlots, StatePartKindMemories, StatePartKindSmallSlots, StatePartLayout, - StatePartLen, StatePartsValue, TypeArrayIndex, TypeArrayIndexes, TypeIndex, - TypeIndexRange, TypeLayout, TypeLen, TypeParts, + BreakAction, BreakpointsSet, RunResult, SmallUInt, State, StatePartIndex, + StatePartKindBigSlots, StatePartKindMemories, StatePartKindSmallSlots, TypeIndexRange, + TypeLen, }, time::{SimDuration, SimInstant}, value::SimValue, }, - ty::StaticType, util::{BitSliceWriteWithBase, DebugAsDisplay, HashMap, HashSet}, }; use bitvec::{bits, order::Lsb0, slice::BitSlice, vec::BitVec, view::BitView}; use num_bigint::BigInt; use num_traits::{Signed, Zero}; -use petgraph::{ - data::FromElements, - visit::{ - EdgeRef, GraphBase, IntoEdgeReferences, IntoNeighbors, IntoNeighborsDirected, - IntoNodeIdentifiers, IntoNodeReferences, NodeRef, VisitMap, Visitable, - }, -}; use std::{ any::Any, borrow::Cow, cell::RefCell, - collections::BTreeSet, fmt, future::{Future, IntoFuture}, hash::Hash, - marker::PhantomData, mem, - ops::IndexMut, pin::Pin, ptr, rc::Rc, @@ -70,5035 +47,13 @@ use std::{ task::Poll, }; +mod compiler; mod interpreter; pub mod time; pub mod value; pub mod vcd; -#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)] -enum CondBody { - IfTrue { - cond: CompiledValue, - }, - IfFalse { - cond: CompiledValue, - }, - MatchArm { - discriminant: StatePartIndex, - variant_index: usize, - }, -} - -#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)] -struct Cond { - body: CondBody, - source_location: SourceLocation, -} - -#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)] -struct CompiledBundleField { - offset: TypeIndex, - ty: CompiledTypeLayout, -} - -#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)] -enum CompiledTypeLayoutBody { - Scalar, - Array { - /// debug names are ignored, use parent's layout instead - element: Interned>, - }, - Bundle { - /// debug names are ignored, use parent's layout instead - fields: Interned<[CompiledBundleField]>, - }, -} - -#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)] -struct CompiledTypeLayout { - ty: T, - layout: TypeLayout, - body: CompiledTypeLayoutBody, -} - -impl CompiledTypeLayout { - fn with_prefixed_debug_names(self, prefix: &str) -> Self { - let Self { ty, layout, body } = self; - Self { - ty, - layout: layout.with_prefixed_debug_names(prefix), - body, - } - } - fn with_anonymized_debug_info(self) -> Self { - let Self { ty, layout, body } = self; - Self { - ty, - layout: layout.with_anonymized_debug_info(), - body, - } - } - fn get(ty: T) -> Self { - #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] - struct MyMemoize; - impl Memoize for MyMemoize { - type Input = CanonicalType; - type InputOwned = CanonicalType; - type Output = CompiledTypeLayout; - - fn inner(self, input: &Self::Input) -> Self::Output { - match input { - CanonicalType::UInt(_) - | CanonicalType::SInt(_) - | CanonicalType::Bool(_) - | CanonicalType::Enum(_) - | CanonicalType::AsyncReset(_) - | CanonicalType::SyncReset(_) - | CanonicalType::Reset(_) - | CanonicalType::Clock(_) => { - let mut layout = TypeLayout::empty(); - let debug_data = SlotDebugData { - name: Interned::default(), - ty: *input, - }; - layout.big_slots = StatePartLayout::scalar(debug_data, ()); - CompiledTypeLayout { - ty: *input, - layout: layout.into(), - body: CompiledTypeLayoutBody::Scalar, - } - } - CanonicalType::Array(array) => { - let mut layout = TypeLayout::empty(); - let element = CompiledTypeLayout::get(array.element()).intern_sized(); - for index in 0..array.len() { - layout.allocate( - &element - .layout - .with_prefixed_debug_names(&format!("[{index}]")), - ); - } - CompiledTypeLayout { - ty: *input, - layout: layout.into(), - body: CompiledTypeLayoutBody::Array { element }, - } - } - CanonicalType::PhantomConst(_) => { - let unit_layout = CompiledTypeLayout::get(()); - CompiledTypeLayout { - ty: *input, - layout: unit_layout.layout, - body: unit_layout.body, - } - } - CanonicalType::Bundle(bundle) => { - let mut layout = TypeLayout::empty(); - let fields = bundle - .fields() - .iter() - .map( - |BundleField { - name, - flipped: _, - ty, - }| { - let ty = CompiledTypeLayout::get(*ty); - let offset = layout - .allocate( - &ty.layout - .with_prefixed_debug_names(&format!(".{name}")), - ) - .start(); - CompiledBundleField { offset, ty } - }, - ) - .collect(); - CompiledTypeLayout { - ty: *input, - layout: layout.into(), - body: CompiledTypeLayoutBody::Bundle { fields }, - } - } - } - } - } - let CompiledTypeLayout { - ty: _, - layout, - body, - } = MyMemoize.get_owned(ty.canonical()); - Self { ty, layout, body } - } -} - -#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)] -struct CompiledValue { - layout: CompiledTypeLayout, - range: TypeIndexRange, - write: Option<(CompiledTypeLayout, TypeIndexRange)>, -} - -impl CompiledValue { - fn write(self) -> (CompiledTypeLayout, TypeIndexRange) { - self.write.unwrap_or((self.layout, self.range)) - } - fn write_value(self) -> Self { - let (layout, range) = self.write(); - Self { - layout, - range, - write: None, - } - } - fn map( - self, - mut f: impl FnMut( - CompiledTypeLayout, - TypeIndexRange, - ) -> (CompiledTypeLayout, TypeIndexRange), - ) -> CompiledValue { - let (layout, range) = f(self.layout, self.range); - CompiledValue { - layout, - range, - write: self.write.map(|(layout, range)| f(layout, range)), - } - } - fn map_ty(self, mut f: impl FnMut(T) -> U) -> CompiledValue { - self.map(|CompiledTypeLayout { ty, layout, body }, range| { - ( - CompiledTypeLayout { - ty: f(ty), - layout, - body, - }, - range, - ) - }) - } -} - -impl CompiledValue { - fn field_by_index(self, field_index: usize) -> CompiledValue { - self.map(|layout, range| { - let CompiledTypeLayout { - ty: _, - layout: _, - body: CompiledTypeLayoutBody::Bundle { fields }, - } = layout - else { - unreachable!(); - }; - ( - fields[field_index].ty, - range.slice(TypeIndexRange::new( - fields[field_index].offset, - fields[field_index].ty.layout.len(), - )), - ) - }) - } - fn field_by_name(self, name: Interned) -> CompiledValue { - self.field_by_index(self.layout.ty.name_indexes()[&name]) - } -} - -impl CompiledValue { - fn element(self, index: usize) -> CompiledValue { - self.map(|layout, range| { - let CompiledTypeLayoutBody::Array { element } = layout.body else { - unreachable!(); - }; - (*element, range.index_array(element.layout.len(), index)) - }) - } - fn element_dyn( - self, - index_slot: StatePartIndex, - ) -> CompiledExpr { - CompiledExpr::from(self).element_dyn(index_slot) - } -} - -#[derive(Debug, PartialEq, Eq, Hash, Copy, Clone)] -struct CompiledExpr { - static_part: CompiledValue, - indexes: TypeArrayIndexes, -} - -impl From> for CompiledExpr { - fn from(static_part: CompiledValue) -> Self { - Self { - static_part, - indexes: TypeArrayIndexes::default(), - } - } -} - -impl CompiledExpr { - fn map_ty(self, f: impl FnMut(T) -> U) -> CompiledExpr { - let Self { - static_part, - indexes, - } = self; - CompiledExpr { - static_part: static_part.map_ty(f), - indexes, - } - } - fn add_target_without_indexes_to_set(self, inputs: &mut SlotSet) { - let Self { - static_part, - indexes, - } = self; - indexes.as_ref().for_each_offset(|offset| { - inputs.extend([static_part.range.offset(offset)]); - }); - } - fn add_target_and_indexes_to_set(self, inputs: &mut SlotSet) { - let Self { - static_part: _, - indexes, - } = self; - self.add_target_without_indexes_to_set(inputs); - inputs.extend(indexes.as_ref().iter()); - } -} - -impl CompiledExpr { - fn field_by_index(self, field_index: usize) -> CompiledExpr { - CompiledExpr { - static_part: self.static_part.field_by_index(field_index), - indexes: self.indexes, - } - } - fn field_by_name(self, name: Interned) -> CompiledExpr { - CompiledExpr { - static_part: self.static_part.field_by_name(name), - indexes: self.indexes, - } - } -} - -impl CompiledExpr { - fn element(self, index: usize) -> CompiledExpr { - CompiledExpr { - static_part: self.static_part.element(index), - indexes: self.indexes, - } - } - fn element_dyn( - self, - index_slot: StatePartIndex, - ) -> CompiledExpr { - let CompiledTypeLayoutBody::Array { element } = self.static_part.layout.body else { - unreachable!(); - }; - let stride = element.layout.len(); - let indexes = self.indexes.join(TypeArrayIndex::from_parts( - index_slot, - self.static_part.layout.ty.len(), - stride, - )); - CompiledExpr { - static_part: self.static_part.map(|layout, range| { - let CompiledTypeLayoutBody::Array { element } = layout.body else { - unreachable!(); - }; - (*element, range.index_array(stride, 0)) - }), - indexes, - } - } -} - -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] -enum AssignmentOrSlotIndex { - AssignmentIndex(usize), - SmallSlot(StatePartIndex), - BigSlot(StatePartIndex), -} - -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] -enum AssignmentIO { - BigInput { - assignment_index: usize, - slot: StatePartIndex, - }, - SmallInput { - assignment_index: usize, - slot: StatePartIndex, - }, - BigOutput { - assignment_index: usize, - slot: StatePartIndex, - }, - SmallOutput { - assignment_index: usize, - slot: StatePartIndex, - }, -} - -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] -enum AssignmentsEdge { - IO(AssignmentIO), - AssignmentImmediatePredecessor { - predecessor_assignment_index: usize, - assignment_index: usize, - }, -} - -#[derive(Debug)] -enum Assignments { - Accumulating { - assignments: Vec, - }, - Finalized { - assignments: Box<[Assignment]>, - slots_layout: TypeLayout, - slot_readers: SlotToAssignmentIndexFullMap, - slot_writers: SlotToAssignmentIndexFullMap, - assignment_immediate_predecessors: Box<[Box<[usize]>]>, - assignment_immediate_successors: Box<[Box<[usize]>]>, - }, -} - -impl Default for Assignments { - fn default() -> Self { - Self::Accumulating { - assignments: Vec::new(), - } - } -} - -impl Assignments { - fn finalize(&mut self, slots_layout: TypeLayout) { - let Self::Accumulating { assignments } = self else { - unreachable!("already finalized"); - }; - let assignments = mem::take(assignments).into_boxed_slice(); - let mut slot_readers = SlotToAssignmentIndexFullMap::new(slots_layout.len()); - let mut slot_writers = SlotToAssignmentIndexFullMap::new(slots_layout.len()); - let mut assignment_immediate_predecessors = vec![BTreeSet::new(); assignments.len()]; - let mut assignment_immediate_successors = vec![BTreeSet::new(); assignments.len()]; - for (assignment_index, assignment) in assignments.iter().enumerate() { - slot_readers - .keys_for_assignment(assignment_index) - .extend([&assignment.inputs]); - slot_readers - .keys_for_assignment(assignment_index) - .extend(&assignment.conditions); - let SlotSet(TypeParts { - small_slots, - big_slots, - }) = &assignment.outputs; - for &slot in small_slots { - if let Some(&pred) = slot_writers[slot].last() { - assignment_immediate_predecessors[assignment_index].insert(pred); - assignment_immediate_successors[pred].insert(assignment_index); - } - slot_writers[slot].push(assignment_index); - } - for &slot in big_slots { - if let Some(&pred) = slot_writers[slot].last() { - assignment_immediate_predecessors[assignment_index].insert(pred); - assignment_immediate_successors[pred].insert(assignment_index); - } - slot_writers[slot].push(assignment_index); - } - } - *self = Self::Finalized { - assignments, - slots_layout, - slot_readers, - slot_writers, - assignment_immediate_predecessors: assignment_immediate_predecessors - .into_iter() - .map(Box::from_iter) - .collect(), - assignment_immediate_successors: assignment_immediate_successors - .into_iter() - .map(Box::from_iter) - .collect(), - }; - } - fn push(&mut self, v: Assignment) { - let Self::Accumulating { assignments } = self else { - unreachable!("already finalized"); - }; - assignments.push(v); - } - fn assignments(&self) -> &[Assignment] { - let Self::Finalized { assignments, .. } = self else { - unreachable!("Assignments::finalize should have been called"); - }; - assignments - } - fn slots_layout(&self) -> TypeLayout { - let Self::Finalized { slots_layout, .. } = self else { - unreachable!("Assignments::finalize should have been called"); - }; - *slots_layout - } - fn slot_readers(&self) -> &SlotToAssignmentIndexFullMap { - let Self::Finalized { slot_readers, .. } = self else { - unreachable!("Assignments::finalize should have been called"); - }; - slot_readers - } - fn slot_writers(&self) -> &SlotToAssignmentIndexFullMap { - let Self::Finalized { slot_writers, .. } = self else { - unreachable!("Assignments::finalize should have been called"); - }; - slot_writers - } - fn assignment_immediate_predecessors(&self) -> &[Box<[usize]>] { - let Self::Finalized { - assignment_immediate_predecessors, - .. - } = self - else { - unreachable!("Assignments::finalize should have been called"); - }; - assignment_immediate_predecessors - } - fn assignment_immediate_successors(&self) -> &[Box<[usize]>] { - let Self::Finalized { - assignment_immediate_successors, - .. - } = self - else { - unreachable!("Assignments::finalize should have been called"); - }; - assignment_immediate_successors - } - fn elements(&self) -> AssignmentsElements<'_> { - let SlotToAssignmentIndexFullMap(TypeParts { - small_slots, - big_slots, - }) = self.slot_readers(); - AssignmentsElements { - node_indexes: HashMap::with_capacity_and_hasher( - self.assignments().len() + small_slots.len() + big_slots.len(), - Default::default(), - ), - nodes: self.node_references(), - edges: self.edge_references(), - } - } -} - -impl GraphBase for Assignments { - type EdgeId = AssignmentsEdge; - type NodeId = AssignmentOrSlotIndex; -} - -#[derive(Debug, Clone, Copy)] -enum AssignmentsNodeRef<'a> { - Assignment { - index: usize, - assignment: &'a Assignment, - }, - SmallSlot(StatePartIndex, SlotDebugData), - BigSlot(StatePartIndex, SlotDebugData), -} - -impl<'a> NodeRef for AssignmentsNodeRef<'a> { - type NodeId = AssignmentOrSlotIndex; - type Weight = AssignmentsNodeRef<'a>; - - fn id(&self) -> Self::NodeId { - match *self { - AssignmentsNodeRef::Assignment { - index, - assignment: _, - } => AssignmentOrSlotIndex::AssignmentIndex(index), - AssignmentsNodeRef::SmallSlot(slot, _) => AssignmentOrSlotIndex::SmallSlot(slot), - AssignmentsNodeRef::BigSlot(slot, _) => AssignmentOrSlotIndex::BigSlot(slot), - } - } - - fn weight(&self) -> &Self::Weight { - self - } -} - -impl<'a> petgraph::visit::Data for &'a Assignments { - type NodeWeight = AssignmentsNodeRef<'a>; - type EdgeWeight = AssignmentsEdge; -} - -struct AssignmentsElements<'a> { - node_indexes: HashMap, - nodes: AssignmentsNodes<'a>, - edges: AssignmentsEdges<'a>, -} - -impl<'a> Iterator for AssignmentsElements<'a> { - type Item = petgraph::data::Element< - <&'a Assignments as petgraph::visit::Data>::NodeWeight, - <&'a Assignments as petgraph::visit::Data>::EdgeWeight, - >; - - fn next(&mut self) -> Option { - let Self { - node_indexes, - nodes, - edges, - } = self; - if let Some(node) = nodes.next() { - node_indexes.insert(node.id(), node_indexes.len()); - return Some(petgraph::data::Element::Node { weight: node }); - } - let edge = edges.next()?; - Some(petgraph::data::Element::Edge { - source: node_indexes[&edge.source()], - target: node_indexes[&edge.target()], - weight: *edge.weight(), - }) - } -} - -#[derive(Clone)] -struct AssignmentsNodeIdentifiers { - assignment_indexes: std::ops::Range, - small_slots: std::ops::Range, - big_slots: std::ops::Range, -} - -impl AssignmentsNodeIdentifiers { - fn internal_iter<'a>(&'a mut self) -> impl Iterator + 'a { - let Self { - assignment_indexes, - small_slots, - big_slots, - } = self; - assignment_indexes - .map(AssignmentOrSlotIndex::AssignmentIndex) - .chain(small_slots.map(|value| { - AssignmentOrSlotIndex::SmallSlot(StatePartIndex { - value, - _phantom: PhantomData, - }) - })) - .chain(big_slots.map(|value| { - AssignmentOrSlotIndex::BigSlot(StatePartIndex { - value, - _phantom: PhantomData, - }) - })) - } -} - -impl Iterator for AssignmentsNodeIdentifiers { - type Item = AssignmentOrSlotIndex; - fn next(&mut self) -> Option { - self.internal_iter().next() - } - - fn nth(&mut self, n: usize) -> Option { - self.internal_iter().nth(n) - } -} - -impl<'a> IntoNodeIdentifiers for &'a Assignments { - type NodeIdentifiers = AssignmentsNodeIdentifiers; - - fn node_identifiers(self) -> Self::NodeIdentifiers { - let TypeLen { - small_slots, - big_slots, - } = self.slot_readers().len(); - AssignmentsNodeIdentifiers { - assignment_indexes: 0..self.assignments().len(), - small_slots: 0..small_slots.value, - big_slots: 0..big_slots.value, - } - } -} - -struct AssignmentsNodes<'a> { - assignments: &'a Assignments, - nodes: AssignmentsNodeIdentifiers, -} - -impl<'a> Iterator for AssignmentsNodes<'a> { - type Item = AssignmentsNodeRef<'a>; - - fn next(&mut self) -> Option { - self.nodes.next().map(|node| match node { - AssignmentOrSlotIndex::AssignmentIndex(index) => AssignmentsNodeRef::Assignment { - index, - assignment: &self.assignments.assignments()[index], - }, - AssignmentOrSlotIndex::SmallSlot(slot) => AssignmentsNodeRef::SmallSlot( - slot, - *self.assignments.slots_layout().small_slots.debug_data(slot), - ), - AssignmentOrSlotIndex::BigSlot(slot) => AssignmentsNodeRef::BigSlot( - slot, - *self.assignments.slots_layout().big_slots.debug_data(slot), - ), - }) - } -} - -impl<'a> IntoNodeReferences for &'a Assignments { - type NodeRef = AssignmentsNodeRef<'a>; - type NodeReferences = AssignmentsNodes<'a>; - - fn node_references(self) -> Self::NodeReferences { - AssignmentsNodes { - assignments: self, - nodes: self.node_identifiers(), - } - } -} - -struct AssignmentsNeighborsDirected<'a> { - assignment_indexes: std::slice::Iter<'a, usize>, - small_slots: std::collections::btree_set::Iter<'a, StatePartIndex>, - big_slots: std::collections::btree_set::Iter<'a, StatePartIndex>, -} - -impl Iterator for AssignmentsNeighborsDirected<'_> { - type Item = AssignmentOrSlotIndex; - fn next(&mut self) -> Option { - let Self { - assignment_indexes, - small_slots, - big_slots, - } = self; - if let retval @ Some(_) = assignment_indexes - .next() - .copied() - .map(AssignmentOrSlotIndex::AssignmentIndex) - { - retval - } else if let retval @ Some(_) = small_slots - .next() - .copied() - .map(AssignmentOrSlotIndex::SmallSlot) - { - retval - } else if let retval @ Some(_) = big_slots - .next() - .copied() - .map(AssignmentOrSlotIndex::BigSlot) - { - retval - } else { - None - } - } -} - -impl<'a> IntoNeighbors for &'a Assignments { - type Neighbors = AssignmentsNeighborsDirected<'a>; - - fn neighbors(self, n: Self::NodeId) -> Self::Neighbors { - self.neighbors_directed(n, petgraph::Direction::Outgoing) - } -} - -impl<'a> IntoNeighborsDirected for &'a Assignments { - type NeighborsDirected = AssignmentsNeighborsDirected<'a>; - - fn neighbors_directed( - self, - n: Self::NodeId, - d: petgraph::Direction, - ) -> Self::NeighborsDirected { - use petgraph::Direction::*; - let slot_map = match d { - Outgoing => self.slot_readers(), - Incoming => self.slot_writers(), - }; - match n { - AssignmentOrSlotIndex::AssignmentIndex(assignment_index) => { - let assignment = &self.assignments()[assignment_index]; - let ( - assignment_indexes, - SlotSet(TypeParts { - small_slots, - big_slots, - }), - ) = match d { - Outgoing => ( - &self.assignment_immediate_successors()[assignment_index], - &assignment.outputs, - ), - Incoming => ( - &self.assignment_immediate_predecessors()[assignment_index], - &assignment.inputs, - ), - }; - AssignmentsNeighborsDirected { - assignment_indexes: assignment_indexes.iter(), - small_slots: small_slots.iter(), - big_slots: big_slots.iter(), - } - } - AssignmentOrSlotIndex::SmallSlot(slot) => AssignmentsNeighborsDirected { - assignment_indexes: slot_map[slot].iter(), - small_slots: Default::default(), - big_slots: Default::default(), - }, - AssignmentOrSlotIndex::BigSlot(slot) => AssignmentsNeighborsDirected { - assignment_indexes: slot_map[slot].iter(), - small_slots: Default::default(), - big_slots: Default::default(), - }, - } - } -} - -impl EdgeRef for AssignmentsEdge { - type NodeId = AssignmentOrSlotIndex; - type EdgeId = AssignmentsEdge; - type Weight = AssignmentsEdge; - - fn source(&self) -> Self::NodeId { - match *self { - AssignmentsEdge::IO(AssignmentIO::BigInput { - assignment_index: _, - slot, - }) => AssignmentOrSlotIndex::BigSlot(slot), - AssignmentsEdge::IO(AssignmentIO::SmallInput { - assignment_index: _, - slot, - }) => AssignmentOrSlotIndex::SmallSlot(slot), - AssignmentsEdge::IO(AssignmentIO::BigOutput { - assignment_index, - slot: _, - }) => AssignmentOrSlotIndex::AssignmentIndex(assignment_index), - AssignmentsEdge::IO(AssignmentIO::SmallOutput { - assignment_index, - slot: _, - }) => AssignmentOrSlotIndex::AssignmentIndex(assignment_index), - AssignmentsEdge::AssignmentImmediatePredecessor { - predecessor_assignment_index, - assignment_index: _, - } => AssignmentOrSlotIndex::AssignmentIndex(predecessor_assignment_index), - } - } - - fn target(&self) -> Self::NodeId { - match *self { - AssignmentsEdge::IO(AssignmentIO::BigInput { - assignment_index, - slot: _, - }) => AssignmentOrSlotIndex::AssignmentIndex(assignment_index), - AssignmentsEdge::IO(AssignmentIO::SmallInput { - assignment_index, - slot: _, - }) => AssignmentOrSlotIndex::AssignmentIndex(assignment_index), - AssignmentsEdge::IO(AssignmentIO::BigOutput { - assignment_index: _, - slot, - }) => AssignmentOrSlotIndex::BigSlot(slot), - AssignmentsEdge::IO(AssignmentIO::SmallOutput { - assignment_index: _, - slot, - }) => AssignmentOrSlotIndex::SmallSlot(slot), - AssignmentsEdge::AssignmentImmediatePredecessor { - predecessor_assignment_index: _, - assignment_index, - } => AssignmentOrSlotIndex::AssignmentIndex(assignment_index), - } - } - - fn weight(&self) -> &Self::Weight { - self - } - - fn id(&self) -> Self::EdgeId { - *self - } -} - -struct AssignmentsEdges<'a> { - assignments: &'a Assignments, - nodes: AssignmentsNodeIdentifiers, - outgoing_neighbors: Option<(AssignmentOrSlotIndex, AssignmentsNeighborsDirected<'a>)>, -} - -impl Iterator for AssignmentsEdges<'_> { - type Item = AssignmentsEdge; - - fn next(&mut self) -> Option { - loop { - if let Some((node, outgoing_neighbors)) = &mut self.outgoing_neighbors { - if let Some(outgoing_neighbor) = outgoing_neighbors.next() { - return Some(match (*node, outgoing_neighbor) { - ( - AssignmentOrSlotIndex::SmallSlot(_) | AssignmentOrSlotIndex::BigSlot(_), - AssignmentOrSlotIndex::SmallSlot(_) | AssignmentOrSlotIndex::BigSlot(_), - ) => unreachable!(), - ( - AssignmentOrSlotIndex::AssignmentIndex(predecessor_assignment_index), - AssignmentOrSlotIndex::AssignmentIndex(assignment_index), - ) => AssignmentsEdge::AssignmentImmediatePredecessor { - predecessor_assignment_index, - assignment_index, - }, - ( - AssignmentOrSlotIndex::AssignmentIndex(assignment_index), - AssignmentOrSlotIndex::SmallSlot(slot), - ) => AssignmentsEdge::IO(AssignmentIO::SmallOutput { - assignment_index, - slot, - }), - ( - AssignmentOrSlotIndex::AssignmentIndex(assignment_index), - AssignmentOrSlotIndex::BigSlot(slot), - ) => AssignmentsEdge::IO(AssignmentIO::BigOutput { - assignment_index, - slot, - }), - ( - AssignmentOrSlotIndex::SmallSlot(slot), - AssignmentOrSlotIndex::AssignmentIndex(assignment_index), - ) => AssignmentsEdge::IO(AssignmentIO::SmallInput { - assignment_index, - slot, - }), - ( - AssignmentOrSlotIndex::BigSlot(slot), - AssignmentOrSlotIndex::AssignmentIndex(assignment_index), - ) => AssignmentsEdge::IO(AssignmentIO::BigInput { - assignment_index, - slot, - }), - }); - } - } - let node = self.nodes.next()?; - self.outgoing_neighbors = Some(( - node, - self.assignments - .neighbors_directed(node, petgraph::Direction::Outgoing), - )); - } - } -} - -impl<'a> IntoEdgeReferences for &'a Assignments { - type EdgeRef = AssignmentsEdge; - type EdgeReferences = AssignmentsEdges<'a>; - - fn edge_references(self) -> Self::EdgeReferences { - AssignmentsEdges { - assignments: self, - nodes: self.node_identifiers(), - outgoing_neighbors: None, - } - } -} - -struct AssignmentsVisitMap { - assignments: Vec, - slots: DenseSlotSet, -} - -impl VisitMap for AssignmentsVisitMap { - fn visit(&mut self, n: AssignmentOrSlotIndex) -> bool { - match n { - AssignmentOrSlotIndex::AssignmentIndex(assignment_index) => { - !mem::replace(&mut self.assignments[assignment_index], true) - } - AssignmentOrSlotIndex::SmallSlot(slot) => self.slots.insert(slot), - AssignmentOrSlotIndex::BigSlot(slot) => self.slots.insert(slot), - } - } - - fn is_visited(&self, n: &AssignmentOrSlotIndex) -> bool { - match *n { - AssignmentOrSlotIndex::AssignmentIndex(assignment_index) => { - self.assignments[assignment_index] - } - AssignmentOrSlotIndex::SmallSlot(slot) => self.slots.contains(slot), - AssignmentOrSlotIndex::BigSlot(slot) => self.slots.contains(slot), - } - } - - fn unvisit(&mut self, n: AssignmentOrSlotIndex) -> bool { - match n { - AssignmentOrSlotIndex::AssignmentIndex(assignment_index) => { - mem::replace(&mut self.assignments[assignment_index], false) - } - AssignmentOrSlotIndex::SmallSlot(slot) => self.slots.remove(slot), - AssignmentOrSlotIndex::BigSlot(slot) => self.slots.remove(slot), - } - } -} - -impl Visitable for Assignments { - type Map = AssignmentsVisitMap; - - fn visit_map(self: &Self) -> Self::Map { - AssignmentsVisitMap { - assignments: vec![false; self.assignments().len()], - slots: DenseSlotSet::new(self.slot_readers().len()), - } - } - - fn reset_map(self: &Self, map: &mut Self::Map) { - let AssignmentsVisitMap { assignments, slots } = map; - assignments.clear(); - assignments.resize(self.assignments().len(), false); - if slots.len() != self.slot_readers().len() { - *slots = DenseSlotSet::new(self.slot_readers().len()); - } else { - slots.clear(); - } - } -} - -#[derive(Clone, Debug, PartialEq, Eq, Hash)] -struct DenseSlotSet(TypeParts); - -impl DenseSlotSet { - fn new(len: TypeLen) -> Self { - let TypeLen { - small_slots, - big_slots, - } = len; - Self(TypeParts { - small_slots: vec![false; small_slots.value.try_into().expect("length too big")] - .into_boxed_slice(), - big_slots: vec![false; big_slots.value.try_into().expect("length too big")] - .into_boxed_slice(), - }) - } - fn len(&self) -> TypeLen { - TypeLen { - small_slots: StatePartLen { - value: self.0.small_slots.len() as _, - _phantom: PhantomData, - }, - big_slots: StatePartLen { - value: self.0.big_slots.len() as _, - _phantom: PhantomData, - }, - } - } - fn clear(&mut self) { - let Self(TypeParts { - small_slots, - big_slots, - }) = self; - small_slots.fill(false); - big_slots.fill(false); - } -} - -impl StatePartsValue for DenseSlotSet { - type Value = Box<[bool]>; -} - -trait DenseSlotSetMethods: Extend> { - fn contains(&self, k: StatePartIndex) -> bool; - fn remove(&mut self, k: StatePartIndex) -> bool { - self.take(k).is_some() - } - fn take(&mut self, k: StatePartIndex) -> Option>; - fn replace(&mut self, k: StatePartIndex) -> Option>; - fn insert(&mut self, k: StatePartIndex) -> bool { - self.replace(k).is_none() - } -} - -impl Extend> for DenseSlotSet -where - Self: DenseSlotSetMethods, -{ - fn extend>>(&mut self, iter: T) { - iter.into_iter().for_each(|v| { - self.insert(v); - }); - } -} - -impl DenseSlotSetMethods for DenseSlotSet { - fn contains(&self, k: StatePartIndex) -> bool { - self.0.small_slots[k.as_usize()] - } - - fn take( - &mut self, - k: StatePartIndex, - ) -> Option> { - mem::replace(self.0.small_slots.get_mut(k.as_usize())?, false).then_some(k) - } - - fn replace( - &mut self, - k: StatePartIndex, - ) -> Option> { - mem::replace(&mut self.0.small_slots[k.as_usize()], true).then_some(k) - } -} - -impl DenseSlotSetMethods for DenseSlotSet { - fn contains(&self, k: StatePartIndex) -> bool { - self.0.big_slots[k.as_usize()] - } - - fn take( - &mut self, - k: StatePartIndex, - ) -> Option> { - mem::replace(self.0.big_slots.get_mut(k.as_usize())?, false).then_some(k) - } - - fn replace( - &mut self, - k: StatePartIndex, - ) -> Option> { - mem::replace(&mut self.0.big_slots[k.as_usize()], true).then_some(k) - } -} - -#[derive(Clone, Debug, Default, PartialEq, Eq, Hash)] -struct SlotVec(TypeParts); - -impl SlotVec { - fn is_empty(&self) -> bool { - let Self(TypeParts { - small_slots, - big_slots, - }) = self; - small_slots.is_empty() && big_slots.is_empty() - } -} - -impl StatePartsValue for SlotVec { - type Value = Vec>; -} - -#[derive(Clone, Debug, Default, PartialEq, Eq, Hash)] -struct SlotSet(TypeParts); - -impl SlotSet { - fn is_empty(&self) -> bool { - let Self(TypeParts { - small_slots, - big_slots, - }) = self; - small_slots.is_empty() && big_slots.is_empty() - } - fn for_each( - &self, - small_slots_fn: impl FnMut(StatePartIndex), - big_slots_fn: impl FnMut(StatePartIndex), - ) { - let Self(TypeParts { - small_slots, - big_slots, - }) = self; - small_slots.iter().copied().for_each(small_slots_fn); - big_slots.iter().copied().for_each(big_slots_fn); - } - fn all( - &self, - small_slots_fn: impl FnMut(StatePartIndex) -> bool, - big_slots_fn: impl FnMut(StatePartIndex) -> bool, - ) -> bool { - let Self(TypeParts { - small_slots, - big_slots, - }) = self; - small_slots.iter().copied().all(small_slots_fn) - && big_slots.iter().copied().all(big_slots_fn) - } -} - -impl StatePartsValue for SlotSet { - type Value = BTreeSet>; -} - -impl Extend> for SlotSet { - fn extend>>(&mut self, iter: T) { - self.0.small_slots.extend(iter); - } -} - -impl Extend> for SlotSet { - fn extend>>(&mut self, iter: T) { - self.0.big_slots.extend(iter); - } -} - -impl Extend> for SlotSet -where - Self: Extend>, -{ - fn extend>>(&mut self, iter: T) { - self.extend(iter.into_iter().flat_map(|v| v.iter())); - } -} - -impl Extend for SlotSet { - fn extend>(&mut self, iter: T) { - iter.into_iter().for_each( - |TypeIndexRange { - small_slots, - big_slots, - }| { - self.extend(small_slots.iter()); - self.extend(big_slots.iter()); - }, - ) - } -} - -impl Extend for SlotSet { - fn extend>(&mut self, iter: T) { - iter.into_iter().for_each( - |TypeArrayIndex { - small_slots, - big_slots, - }| { - self.extend([small_slots]); - self.extend([big_slots]); - }, - ) - } -} - -impl Extend> for SlotSet { - fn extend>>(&mut self, iter: T) { - self.extend(iter.into_iter().map(|v| v.index)); - } -} - -impl Extend for SlotSet { - fn extend>(&mut self, iter: T) { - iter.into_iter().for_each(|cond_body| match cond_body { - CondBody::IfTrue { cond } | CondBody::IfFalse { cond } => { - self.extend([cond.range]); - } - CondBody::MatchArm { - discriminant, - variant_index: _, - } => self.extend([discriminant]), - }) - } -} - -impl Extend for SlotSet { - fn extend>(&mut self, iter: T) { - self.extend(iter.into_iter().map(|v| v.body)) - } -} - -#[derive(Debug)] -struct Assignment { - inputs: SlotSet, - outputs: SlotSet, - conditions: Interned<[Cond]>, - insns: Vec, - source_location: SourceLocation, -} - -#[derive(Debug)] -struct SlotToAssignmentIndexFullMap(TypeParts); - -impl StatePartsValue for SlotToAssignmentIndexFullMap { - type Value = Box<[Vec]>; -} - -impl SlotToAssignmentIndexFullMap { - fn new(len: TypeLen) -> Self { - let TypeLen { - small_slots, - big_slots, - } = len; - Self(TypeParts { - small_slots: vec![Vec::new(); small_slots.value.try_into().expect("length too big")] - .into_boxed_slice(), - big_slots: vec![Vec::new(); big_slots.value.try_into().expect("length too big")] - .into_boxed_slice(), - }) - } - fn len(&self) -> TypeLen { - TypeLen { - small_slots: StatePartLen { - value: self.0.small_slots.len() as _, - _phantom: PhantomData, - }, - big_slots: StatePartLen { - value: self.0.big_slots.len() as _, - _phantom: PhantomData, - }, - } - } - fn keys_for_assignment( - &mut self, - assignment_index: usize, - ) -> SlotToAssignmentIndexFullMapKeysForAssignment<'_> { - SlotToAssignmentIndexFullMapKeysForAssignment { - map: self, - assignment_index, - } - } - fn for_each( - &self, - mut small_slots_fn: impl FnMut(StatePartIndex, &[usize]), - mut big_slots_fn: impl FnMut(StatePartIndex, &[usize]), - ) { - let Self(TypeParts { - small_slots, - big_slots, - }) = self; - small_slots.iter().enumerate().for_each(|(k, v)| { - small_slots_fn( - StatePartIndex { - value: k as _, - _phantom: PhantomData, - }, - v, - ) - }); - big_slots.iter().enumerate().for_each(|(k, v)| { - big_slots_fn( - StatePartIndex { - value: k as _, - _phantom: PhantomData, - }, - v, - ) - }); - } -} - -impl std::ops::Index> for SlotToAssignmentIndexFullMap { - type Output = Vec; - - fn index(&self, index: StatePartIndex) -> &Self::Output { - &self.0.small_slots[index.as_usize()] - } -} - -impl std::ops::IndexMut> for SlotToAssignmentIndexFullMap { - fn index_mut(&mut self, index: StatePartIndex) -> &mut Self::Output { - &mut self.0.small_slots[index.as_usize()] - } -} - -impl std::ops::Index> for SlotToAssignmentIndexFullMap { - type Output = Vec; - - fn index(&self, index: StatePartIndex) -> &Self::Output { - &self.0.big_slots[index.as_usize()] - } -} - -impl std::ops::IndexMut> for SlotToAssignmentIndexFullMap { - fn index_mut(&mut self, index: StatePartIndex) -> &mut Self::Output { - &mut self.0.big_slots[index.as_usize()] - } -} - -struct SlotToAssignmentIndexFullMapKeysForAssignment<'a> { - map: &'a mut SlotToAssignmentIndexFullMap, - assignment_index: usize, -} - -impl<'a, K: StatePartKind> Extend<&'a StatePartIndex> - for SlotToAssignmentIndexFullMapKeysForAssignment<'_> -where - Self: Extend>, -{ - fn extend>>(&mut self, iter: T) { - self.extend(iter.into_iter().copied()); - } -} - -impl Extend> - for SlotToAssignmentIndexFullMapKeysForAssignment<'_> -where - SlotToAssignmentIndexFullMap: IndexMut, Output = Vec>, -{ - fn extend>>(&mut self, iter: T) { - iter.into_iter() - .for_each(|slot| self.map[slot].push(self.assignment_index)); - } -} - -impl<'a> Extend<&'a SlotSet> for SlotToAssignmentIndexFullMapKeysForAssignment<'_> { - fn extend>(&mut self, iter: T) { - iter.into_iter().for_each( - |SlotSet(TypeParts { - small_slots, - big_slots, - })| { - self.extend(small_slots); - self.extend(big_slots); - }, - ); - } -} - -impl<'a> Extend<&'a Cond> for SlotToAssignmentIndexFullMapKeysForAssignment<'_> { - fn extend>(&mut self, iter: T) { - iter.into_iter().for_each(|cond| match cond.body { - CondBody::IfTrue { cond } | CondBody::IfFalse { cond } => { - let CompiledValue { - range: - TypeIndexRange { - small_slots, - big_slots, - }, - layout: _, - write: _, - } = cond; - self.extend(small_slots.iter()); - self.extend(big_slots.iter()); - } - CondBody::MatchArm { - discriminant, - variant_index: _, - } => self.extend([discriminant]), - }); - } -} - -impl Assignment { - fn new( - conditions: Interned<[Cond]>, - insns: Vec, - source_location: SourceLocation, - ) -> Self { - let mut inputs = SlotSet::default(); - let mut outputs = SlotSet::default(); - for insn in &insns { - let insn = match insn { - InsnOrLabel::Insn(insn) => insn, - InsnOrLabel::Label(_) => continue, - }; - for InsnField { ty, kind } in insn.fields() { - match (kind, ty) { - (InsnFieldKind::Input, InsnFieldType::SmallSlot(&slot)) => { - inputs.extend([slot]); - } - (InsnFieldKind::Input, InsnFieldType::BigSlot(&slot)) => { - inputs.extend([slot]); - } - ( - InsnFieldKind::Input, - InsnFieldType::SmallSlotArrayIndexed(&array_indexed), - ) => { - array_indexed.for_each_target(|slot| inputs.extend([slot])); - inputs.extend(array_indexed.indexes); - } - (InsnFieldKind::Input, InsnFieldType::BigSlotArrayIndexed(&array_indexed)) => { - array_indexed.for_each_target(|slot| inputs.extend([slot])); - inputs.extend(array_indexed.indexes); - } - (InsnFieldKind::Output, InsnFieldType::SmallSlot(&slot)) => { - outputs.extend([slot]); - } - (InsnFieldKind::Output, InsnFieldType::BigSlot(&slot)) => { - outputs.extend([slot]); - } - ( - InsnFieldKind::Output, - InsnFieldType::SmallSlotArrayIndexed(&array_indexed), - ) => { - array_indexed.for_each_target(|slot| { - outputs.extend([slot]); - }); - inputs.extend(array_indexed.indexes); - } - (InsnFieldKind::Output, InsnFieldType::BigSlotArrayIndexed(&array_indexed)) => { - array_indexed.for_each_target(|slot| { - outputs.extend([slot]); - }); - inputs.extend(array_indexed.indexes); - } - ( - _, - InsnFieldType::Memory(_) - | InsnFieldType::SmallUInt(_) - | InsnFieldType::SmallSInt(_) - | InsnFieldType::InternedBigInt(_) - | InsnFieldType::U8(_) - | InsnFieldType::USize(_) - | InsnFieldType::Empty(_), - ) - | ( - InsnFieldKind::Immediate - | InsnFieldKind::Memory - | InsnFieldKind::BranchTarget, - _, - ) => {} - } - } - } - Self { - inputs, - outputs, - conditions, - insns, - source_location, - } - } -} - -#[derive(Debug)] -struct RegisterReset { - is_async: bool, - init: CompiledValue, - rst: StatePartIndex, -} - -#[derive(Debug, Clone, Copy)] -struct ClockTrigger { - last_clk_was_low: StatePartIndex, - clk: StatePartIndex, - clk_triggered: StatePartIndex, - source_location: SourceLocation, -} - -#[derive(Debug)] -struct Register { - value: CompiledValue, - clk_triggered: StatePartIndex, - reset: Option, - source_location: SourceLocation, -} - -#[derive(Debug)] - -struct MemoryPort { - clk_triggered: StatePartIndex, - addr_delayed: Vec>, - en_delayed: Vec>, - data_layout: CompiledTypeLayout, - read_data_delayed: Vec, - write_data_delayed: Vec, - write_mask_delayed: Vec, - write_mode_delayed: Vec>, - write_insns: Vec, -} - -struct MemoryPortReadInsns<'a> { - addr: StatePartIndex, - en: StatePartIndex, - write_mode: Option>, - data: TypeIndexRange, - insns: &'a mut Vec, -} - -struct MemoryPortWriteInsns<'a> { - addr: StatePartIndex, - en: StatePartIndex, - write_mode: Option>, - data: TypeIndexRange, - mask: TypeIndexRange, - insns: &'a mut Vec, -} - -#[derive(Debug)] -struct Memory { - mem: Mem, - memory: StatePartIndex, - trace: TraceMem, - ports: Vec, -} - -#[derive(Copy, Clone)] -enum MakeTraceDeclTarget { - Expr(Expr), - Memory { - id: TraceMemoryId, - depth: usize, - stride: usize, - start: usize, - ty: CanonicalType, - }, -} - -impl MakeTraceDeclTarget { - fn flow(self) -> Flow { - match self { - MakeTraceDeclTarget::Expr(expr) => Expr::flow(expr), - MakeTraceDeclTarget::Memory { .. } => Flow::Duplex, - } - } - fn ty(self) -> CanonicalType { - match self { - MakeTraceDeclTarget::Expr(expr) => Expr::ty(expr), - MakeTraceDeclTarget::Memory { ty, .. } => ty, - } - } -} - -struct DebugOpaque(T); - -impl fmt::Debug for DebugOpaque { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str("<...>") - } -} - -#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] -struct CompiledExternModule { - module_io_targets: Interned<[Target]>, - module_io: Interned<[CompiledValue]>, - simulation: ExternModuleSimulation, -} - -#[derive(Debug)] -pub struct Compiler { - insns: Insns, - original_base_module: Interned>, - base_module: Interned>, - modules: HashMap, - extern_modules: Vec, - compiled_values: HashMap>, - compiled_exprs: HashMap, CompiledExpr>, - compiled_exprs_to_values: HashMap, CompiledValue>, - decl_conditions: HashMap>, - compiled_values_to_dyn_array_indexes: - HashMap, StatePartIndex>, - compiled_value_bool_dest_is_small_map: - HashMap, StatePartIndex>, - assignments: Assignments, - clock_triggers: Vec, - compiled_value_to_clock_trigger_map: HashMap, ClockTrigger>, - enum_discriminants: HashMap, StatePartIndex>, - registers: Vec, - traces: SimTraces>>, - memories: Vec, - dump_assignments_dot: Option>>, -} - -impl Compiler { - pub fn new(base_module: Interned>) -> Self { - let original_base_module = base_module; - let base_module = deduce_resets(base_module, true) - .unwrap_or_else(|e| panic!("failed to deduce reset types: {e}")); - Self { - insns: Insns::new(), - original_base_module, - base_module, - modules: HashMap::default(), - extern_modules: Vec::new(), - compiled_values: HashMap::default(), - compiled_exprs: HashMap::default(), - compiled_exprs_to_values: HashMap::default(), - decl_conditions: HashMap::default(), - compiled_values_to_dyn_array_indexes: HashMap::default(), - compiled_value_bool_dest_is_small_map: HashMap::default(), - assignments: Assignments::default(), - clock_triggers: Vec::new(), - compiled_value_to_clock_trigger_map: HashMap::default(), - enum_discriminants: HashMap::default(), - registers: Vec::new(), - traces: SimTraces(Vec::new()), - memories: Vec::new(), - dump_assignments_dot: None, - } - } - #[doc(hidden)] - /// This is explicitly unstable and may be changed/removed at any time - pub fn dump_assignments_dot(&mut self, callback: Box) { - self.dump_assignments_dot = Some(DebugOpaque(callback)); - } - fn new_sim_trace(&mut self, kind: SimTraceKind) -> TraceScalarId { - let id = TraceScalarId(self.traces.0.len()); - self.traces.0.push(SimTrace { - kind, - state: (), - last_state: (), - }); - id - } - fn make_trace_scalar_helper( - &mut self, - instantiated_module: InstantiatedModule, - target: MakeTraceDeclTarget, - source_location: SourceLocation, - small_kind: impl FnOnce(StatePartIndex) -> SimTraceKind, - big_kind: impl FnOnce(StatePartIndex) -> SimTraceKind, - ) -> TraceLocation { - match target { - MakeTraceDeclTarget::Expr(target) => { - let compiled_value = self.compile_expr(instantiated_module, target); - let compiled_value = self.compiled_expr_to_value(compiled_value, source_location); - TraceLocation::Scalar(self.new_sim_trace(match compiled_value.range.len() { - TypeLen::A_SMALL_SLOT => small_kind(compiled_value.range.small_slots.start), - TypeLen::A_BIG_SLOT => big_kind(compiled_value.range.big_slots.start), - _ => unreachable!(), - })) - } - MakeTraceDeclTarget::Memory { - id, - depth, - stride, - start, - ty, - } => TraceLocation::Memory(TraceMemoryLocation { - id, - depth, - stride, - start, - len: ty.bit_width(), - }), - } - } - fn make_trace_scalar( - &mut self, - instantiated_module: InstantiatedModule, - target: MakeTraceDeclTarget, - name: Interned, - source_location: SourceLocation, - ) -> TraceDecl { - let flow = target.flow(); - match target.ty() { - CanonicalType::UInt(ty) => TraceUInt { - location: self.make_trace_scalar_helper( - instantiated_module, - target, - source_location, - |index| SimTraceKind::SmallUInt { index, ty }, - |index| SimTraceKind::BigUInt { index, ty }, - ), - name, - ty, - flow, - } - .into(), - CanonicalType::SInt(ty) => TraceSInt { - location: self.make_trace_scalar_helper( - instantiated_module, - target, - source_location, - |index| SimTraceKind::SmallSInt { index, ty }, - |index| SimTraceKind::BigSInt { index, ty }, - ), - name, - ty, - flow, - } - .into(), - CanonicalType::Bool(_) => TraceBool { - location: self.make_trace_scalar_helper( - instantiated_module, - target, - source_location, - |index| SimTraceKind::SmallBool { index }, - |index| SimTraceKind::BigBool { index }, - ), - name, - flow, - } - .into(), - CanonicalType::Array(_) => unreachable!(), - CanonicalType::Enum(ty) => { - assert_eq!(ty.discriminant_bit_width(), ty.type_properties().bit_width); - let location = match target { - MakeTraceDeclTarget::Expr(target) => { - let compiled_value = self.compile_expr(instantiated_module, target); - let compiled_value = - self.compiled_expr_to_value(compiled_value, source_location); - let discriminant = self.compile_enum_discriminant( - compiled_value.map_ty(Enum::from_canonical), - source_location, - ); - TraceLocation::Scalar(self.new_sim_trace(SimTraceKind::EnumDiscriminant { - index: discriminant, - ty, - })) - } - MakeTraceDeclTarget::Memory { - id, - depth, - stride, - start, - ty: _, - } => TraceLocation::Memory(TraceMemoryLocation { - id, - depth, - stride, - start, - len: ty.type_properties().bit_width, - }), - }; - TraceFieldlessEnum { - location, - name, - ty, - flow, - } - .into() - } - CanonicalType::Bundle(_) | CanonicalType::PhantomConst(_) => unreachable!(), - CanonicalType::AsyncReset(_) => TraceAsyncReset { - location: self.make_trace_scalar_helper( - instantiated_module, - target, - source_location, - |index| SimTraceKind::SmallAsyncReset { index }, - |index| SimTraceKind::BigAsyncReset { index }, - ), - name, - flow, - } - .into(), - CanonicalType::SyncReset(_) => TraceSyncReset { - location: self.make_trace_scalar_helper( - instantiated_module, - target, - source_location, - |index| SimTraceKind::SmallSyncReset { index }, - |index| SimTraceKind::BigSyncReset { index }, - ), - name, - flow, - } - .into(), - CanonicalType::Reset(_) => unreachable!(), - CanonicalType::Clock(_) => TraceClock { - location: self.make_trace_scalar_helper( - instantiated_module, - target, - source_location, - |index| SimTraceKind::SmallClock { index }, - |index| SimTraceKind::BigClock { index }, - ), - name, - flow, - } - .into(), - } - } - fn make_trace_decl_child( - &mut self, - instantiated_module: InstantiatedModule, - target: MakeTraceDeclTarget, - name: Interned, - source_location: SourceLocation, - ) -> TraceDecl { - match target.ty() { - CanonicalType::Array(ty) => { - let elements = Interned::from_iter((0..ty.len()).map(|index| { - self.make_trace_decl_child( - instantiated_module, - match target { - MakeTraceDeclTarget::Expr(target) => MakeTraceDeclTarget::Expr( - Expr::::from_canonical(target)[index], - ), - MakeTraceDeclTarget::Memory { - id, - depth, - stride, - start, - ty: _, - } => MakeTraceDeclTarget::Memory { - id, - depth, - stride, - start: start + ty.element().bit_width() * index, - ty: ty.element(), - }, - }, - Intern::intern_owned(format!("[{index}]")), - source_location, - ) - })); - TraceArray { - name, - elements, - ty, - flow: target.flow(), - } - .into() - } - CanonicalType::Enum(ty) => { - if ty.variants().iter().all(|v| v.ty.is_none()) { - self.make_trace_scalar(instantiated_module, target, name, source_location) - } else { - let flow = target.flow(); - let location = match target { - MakeTraceDeclTarget::Expr(target) => { - let compiled_value = self.compile_expr(instantiated_module, target); - let compiled_value = - self.compiled_expr_to_value(compiled_value, source_location); - let discriminant = self.compile_enum_discriminant( - compiled_value.map_ty(Enum::from_canonical), - source_location, - ); - TraceLocation::Scalar(self.new_sim_trace( - SimTraceKind::EnumDiscriminant { - index: discriminant, - ty, - }, - )) - } - MakeTraceDeclTarget::Memory { - id, - depth, - stride, - start, - ty: _, - } => TraceLocation::Memory(TraceMemoryLocation { - id, - depth, - stride, - start, - len: ty.discriminant_bit_width(), - }), - }; - let discriminant = TraceEnumDiscriminant { - location, - name: "$tag".intern(), - ty, - flow, - }; - let non_empty_fields = - Interned::from_iter(ty.variants().into_iter().enumerate().flat_map( - |(variant_index, variant)| { - variant.ty.map(|variant_ty| { - self.make_trace_decl_child( - instantiated_module, - match target { - MakeTraceDeclTarget::Expr(target) => { - MakeTraceDeclTarget::Expr( - ops::VariantAccess::new_by_index( - Expr::::from_canonical(target), - variant_index, - ) - .to_expr(), - ) - } - MakeTraceDeclTarget::Memory { - id, - depth, - stride, - start, - ty: _, - } => MakeTraceDeclTarget::Memory { - id, - depth, - stride, - start: start + ty.discriminant_bit_width(), - ty: variant_ty, - }, - }, - variant.name, - source_location, - ) - }) - }, - )); - TraceEnumWithFields { - name, - discriminant, - non_empty_fields, - ty, - flow, - } - .into() - } - } - CanonicalType::Bundle(ty) => { - let fields = Interned::from_iter(ty.fields().iter().zip(ty.field_offsets()).map( - |(field, field_offset)| { - self.make_trace_decl_child( - instantiated_module, - match target { - MakeTraceDeclTarget::Expr(target) => { - MakeTraceDeclTarget::Expr(Expr::field( - Expr::::from_canonical(target), - &field.name, - )) - } - MakeTraceDeclTarget::Memory { - id, - depth, - stride, - start, - ty: _, - } => MakeTraceDeclTarget::Memory { - id, - depth, - stride, - start: start + field_offset, - ty: field.ty, - }, - }, - field.name, - source_location, - ) - }, - )); - TraceBundle { - name, - fields, - ty, - flow: target.flow(), - } - .into() - } - CanonicalType::UInt(_) - | CanonicalType::SInt(_) - | CanonicalType::Bool(_) - | CanonicalType::AsyncReset(_) - | CanonicalType::SyncReset(_) - | CanonicalType::Reset(_) - | CanonicalType::Clock(_) => { - self.make_trace_scalar(instantiated_module, target, name, source_location) - } - CanonicalType::PhantomConst(_) => TraceBundle { - name, - fields: Interned::default(), - ty: Bundle::new(Interned::default()), - flow: target.flow(), - } - .into(), - } - } - fn make_trace_decl( - &mut self, - instantiated_module: InstantiatedModule, - target_base: TargetBase, - ) -> TraceDecl { - let target = MakeTraceDeclTarget::Expr(target_base.to_expr()); - match target_base { - TargetBase::ModuleIO(module_io) => TraceModuleIO { - name: module_io.name(), - child: self - .make_trace_decl_child( - instantiated_module, - target, - module_io.name(), - module_io.source_location(), - ) - .intern(), - ty: module_io.ty(), - flow: module_io.flow(), - } - .into(), - TargetBase::MemPort(mem_port) => { - let name = Intern::intern_owned(mem_port.port_name().to_string()); - let TraceDecl::Scope(TraceScope::Bundle(bundle)) = self.make_trace_decl_child( - instantiated_module, - target, - name, - mem_port.source_location(), - ) else { - unreachable!() - }; - TraceMemPort { - name, - bundle, - ty: mem_port.ty(), - } - .into() - } - TargetBase::Reg(reg) => TraceReg { - name: reg.name(), - child: self - .make_trace_decl_child( - instantiated_module, - target, - reg.name(), - reg.source_location(), - ) - .intern(), - ty: reg.ty(), - } - .into(), - TargetBase::RegSync(reg) => TraceReg { - name: reg.name(), - child: self - .make_trace_decl_child( - instantiated_module, - target, - reg.name(), - reg.source_location(), - ) - .intern(), - ty: reg.ty(), - } - .into(), - TargetBase::RegAsync(reg) => TraceReg { - name: reg.name(), - child: self - .make_trace_decl_child( - instantiated_module, - target, - reg.name(), - reg.source_location(), - ) - .intern(), - ty: reg.ty(), - } - .into(), - TargetBase::Wire(wire) => TraceWire { - name: wire.name(), - child: self - .make_trace_decl_child( - instantiated_module, - target, - wire.name(), - wire.source_location(), - ) - .intern(), - ty: wire.ty(), - } - .into(), - TargetBase::Instance(instance) => { - let TraceDecl::Scope(TraceScope::Bundle(instance_io)) = self.make_trace_decl_child( - instantiated_module, - target, - instance.name(), - instance.source_location(), - ) else { - unreachable!() - }; - let compiled_module = &self.modules[&InstantiatedModule::Child { - parent: instantiated_module.intern(), - instance: instance.intern(), - }]; - TraceInstance { - name: instance.name(), - instance_io, - module: compiled_module.trace_decls, - ty: instance.ty(), - } - .into() - } - } - } - fn compile_value( - &mut self, - target: TargetInInstantiatedModule, - ) -> CompiledValue { - if let Some(&retval) = self.compiled_values.get(&target) { - return retval; - } - let retval = match target.target { - Target::Base(base) => { - let unprefixed_layout = CompiledTypeLayout::get(base.canonical_ty()); - let layout = unprefixed_layout.with_prefixed_debug_names(&format!( - "{:?}.{:?}", - target.instantiated_module, - base.target_name() - )); - let range = self.insns.allocate_variable(&layout.layout); - let write = match *base { - TargetBase::ModuleIO(_) - | TargetBase::MemPort(_) - | TargetBase::Wire(_) - | TargetBase::Instance(_) => None, - TargetBase::Reg(_) | TargetBase::RegSync(_) | TargetBase::RegAsync(_) => { - let write_layout = unprefixed_layout.with_prefixed_debug_names(&format!( - "{:?}.{:?}$next", - target.instantiated_module, - base.target_name() - )); - Some(( - write_layout, - self.insns.allocate_variable(&write_layout.layout), - )) - } - }; - CompiledValue { - range, - layout, - write, - } - } - Target::Child(target_child) => { - let parent = self.compile_value(TargetInInstantiatedModule { - instantiated_module: target.instantiated_module, - target: *target_child.parent(), - }); - match *target_child.path_element() { - TargetPathElement::BundleField(TargetPathBundleField { name }) => { - parent.map_ty(Bundle::from_canonical).field_by_name(name) - } - TargetPathElement::ArrayElement(TargetPathArrayElement { index }) => { - parent.map_ty(Array::from_canonical).element(index) - } - TargetPathElement::DynArrayElement(_) => unreachable!(), - } - } - }; - self.compiled_values.insert(target, retval); - retval - } - fn compiled_expr_to_value( - &mut self, - expr: CompiledExpr, - source_location: SourceLocation, - ) -> CompiledValue { - if let Some(&retval) = self.compiled_exprs_to_values.get(&expr) { - return retval; - } - assert!( - expr.static_part.layout.ty.is_passive(), - "invalid expression passed to compiled_expr_to_value -- type must be passive", - ); - let CompiledExpr { - static_part, - indexes, - } = expr; - let retval = if indexes.as_ref().is_empty() { - CompiledValue { - layout: static_part.layout, - range: static_part.range, - write: None, - } - } else { - let layout = static_part.layout.with_anonymized_debug_info(); - let retval = CompiledValue { - layout, - range: self.insns.allocate_variable(&layout.layout), - write: None, - }; - let TypeIndexRange { - small_slots, - big_slots, - } = retval.range; - self.add_assignment( - Interned::default(), - small_slots - .iter() - .zip(static_part.range.small_slots.iter()) - .map(|(dest, base)| Insn::ReadSmallIndexed { - dest, - src: StatePartArrayIndexed { - base, - indexes: indexes.small_slots, - }, - }) - .chain( - big_slots - .iter() - .zip(static_part.range.big_slots.iter()) - .map(|(dest, base)| Insn::ReadIndexed { - dest, - src: StatePartArrayIndexed { - base, - indexes: indexes.big_slots, - }, - }), - ), - source_location, - ); - retval - }; - self.compiled_exprs_to_values.insert(expr, retval); - retval - } - fn add_assignment>( - &mut self, - conditions: Interned<[Cond]>, - insns: impl IntoIterator, - source_location: SourceLocation, - ) { - let insns = Vec::from_iter(insns.into_iter().map(Into::into)); - self.assignments - .push(Assignment::new(conditions, insns, source_location)); - } - fn simple_big_expr_input( - &mut self, - instantiated_module: InstantiatedModule, - input: Expr, - ) -> StatePartIndex { - let input = self.compile_expr(instantiated_module, input); - let input = - self.compiled_expr_to_value(input, instantiated_module.leaf_module().source_location()); - assert_eq!(input.range.len(), TypeLen::A_BIG_SLOT); - input.range.big_slots.start - } - fn compile_expr_helper( - &mut self, - instantiated_module: InstantiatedModule, - dest_ty: CanonicalType, - make_insns: impl FnOnce(&mut Self, TypeIndexRange) -> Vec, - ) -> CompiledValue { - let layout = CompiledTypeLayout::get(dest_ty); - let range = self.insns.allocate_variable(&layout.layout); - let retval = CompiledValue { - layout, - range, - write: None, - }; - let insns = make_insns(self, range); - self.add_assignment( - Interned::default(), - insns, - instantiated_module.leaf_module().source_location(), - ); - retval - } - fn simple_nary_big_expr_helper( - &mut self, - instantiated_module: InstantiatedModule, - dest_ty: CanonicalType, - make_insns: impl FnOnce(StatePartIndex) -> Vec, - ) -> CompiledValue { - self.compile_expr_helper(instantiated_module, dest_ty, |_, dest| { - assert_eq!(dest.len(), TypeLen::A_BIG_SLOT); - make_insns(dest.big_slots.start) - }) - } - fn simple_nary_big_expr( - &mut self, - instantiated_module: InstantiatedModule, - dest_ty: CanonicalType, - inputs: [Expr; N], - make_insns: impl FnOnce( - StatePartIndex, - [StatePartIndex; N], - ) -> Vec, - ) -> CompiledValue { - let inputs = inputs.map(|input| self.simple_big_expr_input(instantiated_module, input)); - self.simple_nary_big_expr_helper(instantiated_module, dest_ty, |dest| { - make_insns(dest, inputs) - }) - } - fn compiled_value_to_dyn_array_index( - &mut self, - compiled_value: CompiledValue, - source_location: SourceLocation, - ) -> StatePartIndex { - if let Some(&retval) = self - .compiled_values_to_dyn_array_indexes - .get(&compiled_value) - { - return retval; - } - let mut ty = compiled_value.layout.ty; - ty.width = ty.width.min(SmallUInt::BITS as usize); - let retval = match compiled_value.range.len() { - TypeLen::A_SMALL_SLOT => compiled_value.range.small_slots.start, - TypeLen::A_BIG_SLOT => { - let debug_data = SlotDebugData { - name: Interned::default(), - ty: ty.canonical(), - }; - let dest = self - .insns - .allocate_variable(&TypeLayout { - small_slots: StatePartLayout::scalar(debug_data, ()), - big_slots: StatePartLayout::empty(), - }) - .small_slots - .start; - self.add_assignment( - Interned::default(), - vec![Insn::CastBigToArrayIndex { - dest, - src: compiled_value.range.big_slots.start, - }], - source_location, - ); - dest - } - _ => unreachable!(), - }; - self.compiled_values_to_dyn_array_indexes - .insert(compiled_value, retval); - retval - } - fn compiled_value_bool_dest_is_small( - &mut self, - compiled_value: CompiledValue, - source_location: SourceLocation, - ) -> StatePartIndex { - if let Some(&retval) = self - .compiled_value_bool_dest_is_small_map - .get(&compiled_value) - { - return retval; - } - let retval = match compiled_value.range.len() { - TypeLen::A_SMALL_SLOT => compiled_value.range.small_slots.start, - TypeLen::A_BIG_SLOT => { - let debug_data = SlotDebugData { - name: Interned::default(), - ty: Bool.canonical(), - }; - let dest = self - .insns - .allocate_variable(&TypeLayout { - small_slots: StatePartLayout::scalar(debug_data, ()), - big_slots: StatePartLayout::empty(), - }) - .small_slots - .start; - self.add_assignment( - Interned::default(), - vec![Insn::IsNonZeroDestIsSmall { - dest, - src: compiled_value.range.big_slots.start, - }], - source_location, - ); - dest - } - _ => unreachable!(), - }; - self.compiled_value_bool_dest_is_small_map - .insert(compiled_value, retval); - retval - } - fn compile_cast_scalar_to_bits( - &mut self, - instantiated_module: InstantiatedModule, - arg: Expr, - cast_fn: impl FnOnce(Expr) -> Expr, - ) -> CompiledValue { - let arg = Expr::::from_canonical(arg); - let retval = cast_fn(arg); - let retval = self.compile_expr(instantiated_module, Expr::canonical(retval)); - let retval = self - .compiled_expr_to_value(retval, instantiated_module.leaf_module().source_location()); - retval.map_ty(UInt::from_canonical) - } - fn compile_cast_aggregate_to_bits( - &mut self, - instantiated_module: InstantiatedModule, - parts: impl IntoIterator>, - ) -> CompiledValue { - let retval = parts - .into_iter() - .map(|part| part.cast_to_bits()) - .reduce(|accumulator, part| accumulator | (part << Expr::ty(accumulator).width)) - .unwrap_or_else(|| UInt[0].zero().to_expr()); - let retval = self.compile_expr(instantiated_module, Expr::canonical(retval)); - let retval = self - .compiled_expr_to_value(retval, instantiated_module.leaf_module().source_location()); - retval.map_ty(UInt::from_canonical) - } - fn compile_cast_to_bits( - &mut self, - instantiated_module: InstantiatedModule, - expr: ops::CastToBits, - ) -> CompiledValue { - match Expr::ty(expr.arg()) { - CanonicalType::UInt(_) => { - self.compile_cast_scalar_to_bits(instantiated_module, expr.arg(), |arg| arg) - } - CanonicalType::SInt(ty) => self.compile_cast_scalar_to_bits( - instantiated_module, - expr.arg(), - |arg: Expr| arg.cast_to(ty.as_same_width_uint()), - ), - CanonicalType::Bool(_) - | CanonicalType::AsyncReset(_) - | CanonicalType::SyncReset(_) - | CanonicalType::Reset(_) - | CanonicalType::Clock(_) => self.compile_cast_scalar_to_bits( - instantiated_module, - expr.arg(), - |arg: Expr| arg.cast_to(UInt[1]), - ), - CanonicalType::Array(ty) => self.compile_cast_aggregate_to_bits( - instantiated_module, - (0..ty.len()).map(|index| Expr::::from_canonical(expr.arg())[index]), - ), - CanonicalType::Enum(ty) => self - .simple_nary_big_expr( - instantiated_module, - UInt[ty.type_properties().bit_width].canonical(), - [Expr::canonical(expr.arg())], - |dest, [src]| vec![Insn::Copy { dest, src }], - ) - .map_ty(UInt::from_canonical), - CanonicalType::Bundle(ty) => self.compile_cast_aggregate_to_bits( - instantiated_module, - ty.fields().iter().map(|field| { - Expr::field(Expr::::from_canonical(expr.arg()), &field.name) - }), - ), - CanonicalType::PhantomConst(_) => { - self.compile_cast_aggregate_to_bits(instantiated_module, []) - } - } - } - fn compile_cast_bits_to( - &mut self, - instantiated_module: InstantiatedModule, - expr: ops::CastBitsTo, - ) -> CompiledValue { - let retval = match expr.ty() { - CanonicalType::UInt(_) => Expr::canonical(expr.arg()), - CanonicalType::SInt(ty) => Expr::canonical(expr.arg().cast_to(ty)), - CanonicalType::Bool(ty) => Expr::canonical(expr.arg().cast_to(ty)), - CanonicalType::Array(ty) => { - let stride = ty.element().bit_width(); - Expr::::canonical( - ops::ArrayLiteral::new( - ty.element(), - Interned::from_iter((0..ty.len()).map(|index| { - let start = stride * index; - let end = start + stride; - expr.arg()[start..end].cast_bits_to(ty.element()) - })), - ) - .to_expr(), - ) - } - ty @ CanonicalType::Enum(_) => { - return self.simple_nary_big_expr( - instantiated_module, - ty, - [Expr::canonical(expr.arg())], - |dest, [src]| vec![Insn::Copy { dest, src }], - ); - } - CanonicalType::Bundle(ty) => Expr::canonical( - ops::BundleLiteral::new( - ty, - Interned::from_iter(ty.field_offsets().iter().zip(&ty.fields()).map( - |(&offset, &field)| { - let end = offset + field.ty.bit_width(); - expr.arg()[offset..end].cast_bits_to(field.ty) - }, - )), - ) - .to_expr(), - ), - CanonicalType::AsyncReset(ty) => Expr::canonical(expr.arg().cast_to(ty)), - CanonicalType::SyncReset(ty) => Expr::canonical(expr.arg().cast_to(ty)), - CanonicalType::Reset(_) => unreachable!(), - CanonicalType::Clock(ty) => Expr::canonical(expr.arg().cast_to(ty)), - CanonicalType::PhantomConst(ty) => { - let _ = self.compile_expr(instantiated_module, Expr::canonical(expr.arg())); - Expr::canonical(ty.to_expr()) - } - }; - let retval = self.compile_expr(instantiated_module, Expr::canonical(retval)); - self.compiled_expr_to_value(retval, instantiated_module.leaf_module().source_location()) - } - fn compile_aggregate_literal( - &mut self, - instantiated_module: InstantiatedModule, - dest_ty: CanonicalType, - inputs: Interned<[Expr]>, - ) -> CompiledValue { - self.compile_expr_helper(instantiated_module, dest_ty, |this, dest| { - let mut insns = Vec::new(); - let mut offset = TypeIndex::ZERO; - for input in inputs { - let input = this.compile_expr(instantiated_module, input); - let input = this - .compiled_expr_to_value( - input, - instantiated_module.leaf_module().source_location(), - ) - .range; - insns.extend( - input.insns_for_copy_to(dest.slice(TypeIndexRange::new(offset, input.len()))), - ); - offset = offset.offset(input.len().as_index()); - } - insns - }) - } - fn compile_expr( - &mut self, - instantiated_module: InstantiatedModule, - expr: Expr, - ) -> CompiledExpr { - if let Some(&retval) = self.compiled_exprs.get(&expr) { - return retval; - } - let mut cast_bit = |arg: Expr| { - let src_signed = match Expr::ty(arg) { - CanonicalType::UInt(_) => false, - CanonicalType::SInt(_) => true, - CanonicalType::Bool(_) => false, - CanonicalType::Array(_) => unreachable!(), - CanonicalType::Enum(_) => unreachable!(), - CanonicalType::Bundle(_) => unreachable!(), - CanonicalType::AsyncReset(_) => false, - CanonicalType::SyncReset(_) => false, - CanonicalType::Reset(_) => false, - CanonicalType::Clock(_) => false, - CanonicalType::PhantomConst(_) => unreachable!(), - }; - let dest_signed = match Expr::ty(expr) { - CanonicalType::UInt(_) => false, - CanonicalType::SInt(_) => true, - CanonicalType::Bool(_) => false, - CanonicalType::Array(_) => unreachable!(), - CanonicalType::Enum(_) => unreachable!(), - CanonicalType::Bundle(_) => unreachable!(), - CanonicalType::AsyncReset(_) => false, - CanonicalType::SyncReset(_) => false, - CanonicalType::Reset(_) => false, - CanonicalType::Clock(_) => false, - CanonicalType::PhantomConst(_) => unreachable!(), - }; - self.simple_nary_big_expr(instantiated_module, Expr::ty(expr), [arg], |dest, [src]| { - match (src_signed, dest_signed) { - (false, false) | (true, true) => { - vec![Insn::Copy { dest, src }] - } - (false, true) => vec![Insn::CastToSInt { - dest, - src, - dest_width: 1, - }], - (true, false) => vec![Insn::CastToUInt { - dest, - src, - dest_width: 1, - }], - } - }) - .into() - }; - let retval: CompiledExpr<_> = match *Expr::expr_enum(expr) { - ExprEnum::UIntLiteral(expr) => self - .simple_nary_big_expr( - instantiated_module, - expr.ty().canonical(), - [], - |dest, []| { - vec![Insn::Const { - dest, - value: expr.to_bigint().intern_sized(), - }] - }, - ) - .into(), - ExprEnum::SIntLiteral(expr) => self - .simple_nary_big_expr( - instantiated_module, - expr.ty().canonical(), - [], - |dest, []| { - vec![Insn::Const { - dest, - value: expr.to_bigint().intern_sized(), - }] - }, - ) - .into(), - ExprEnum::BoolLiteral(expr) => self - .simple_nary_big_expr(instantiated_module, Bool.canonical(), [], |dest, []| { - vec![Insn::Const { - dest, - value: BigInt::from(expr).intern_sized(), - }] - }) - .into(), - ExprEnum::PhantomConst(_) => self - .compile_aggregate_literal(instantiated_module, Expr::ty(expr), Interned::default()) - .into(), - ExprEnum::BundleLiteral(literal) => self - .compile_aggregate_literal( - instantiated_module, - Expr::ty(expr), - literal.field_values(), - ) - .into(), - ExprEnum::ArrayLiteral(literal) => self - .compile_aggregate_literal( - instantiated_module, - Expr::ty(expr), - literal.element_values(), - ) - .into(), - ExprEnum::EnumLiteral(expr) => { - let enum_bits_ty = UInt[expr.ty().type_properties().bit_width]; - let enum_bits = if let Some(variant_value) = expr.variant_value() { - ( - UInt[expr.ty().discriminant_bit_width()] - .from_int_wrapping(expr.variant_index()), - variant_value, - ) - .cast_to_bits() - .cast_to(enum_bits_ty) - } else { - enum_bits_ty - .from_int_wrapping(expr.variant_index()) - .to_expr() - }; - self.compile_expr( - instantiated_module, - enum_bits.cast_bits_to(expr.ty().canonical()), - ) - } - ExprEnum::Uninit(expr) => self.compile_expr( - instantiated_module, - UInt[expr.ty().bit_width()].zero().cast_bits_to(expr.ty()), - ), - ExprEnum::NotU(expr) => self - .simple_nary_big_expr( - instantiated_module, - Expr::ty(expr.arg()).canonical(), - [Expr::canonical(expr.arg())], - |dest, [src]| { - vec![Insn::NotU { - dest, - src, - width: Expr::ty(expr.arg()).width(), - }] - }, - ) - .into(), - ExprEnum::NotS(expr) => self - .simple_nary_big_expr( - instantiated_module, - Expr::ty(expr.arg()).canonical(), - [Expr::canonical(expr.arg())], - |dest, [src]| vec![Insn::NotS { dest, src }], - ) - .into(), - ExprEnum::NotB(expr) => self - .simple_nary_big_expr( - instantiated_module, - Expr::ty(expr.arg()).canonical(), - [Expr::canonical(expr.arg())], - |dest, [src]| { - vec![Insn::NotU { - dest, - src, - width: 1, - }] - }, - ) - .into(), - ExprEnum::Neg(expr) => self - .simple_nary_big_expr( - instantiated_module, - expr.ty().canonical(), - [Expr::canonical(expr.arg())], - |dest, [src]| vec![Insn::Neg { dest, src }], - ) - .into(), - ExprEnum::BitAndU(expr) => self - .simple_nary_big_expr( - instantiated_module, - expr.ty().canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::And { dest, lhs, rhs }], - ) - .into(), - ExprEnum::BitAndS(expr) => self - .simple_nary_big_expr( - instantiated_module, - expr.ty().canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::And { dest, lhs, rhs }], - ) - .into(), - ExprEnum::BitAndB(expr) => self - .simple_nary_big_expr( - instantiated_module, - Bool.canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::And { dest, lhs, rhs }], - ) - .into(), - ExprEnum::BitOrU(expr) => self - .simple_nary_big_expr( - instantiated_module, - expr.ty().canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::Or { dest, lhs, rhs }], - ) - .into(), - ExprEnum::BitOrS(expr) => self - .simple_nary_big_expr( - instantiated_module, - expr.ty().canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::Or { dest, lhs, rhs }], - ) - .into(), - ExprEnum::BitOrB(expr) => self - .simple_nary_big_expr( - instantiated_module, - Bool.canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::Or { dest, lhs, rhs }], - ) - .into(), - ExprEnum::BitXorU(expr) => self - .simple_nary_big_expr( - instantiated_module, - expr.ty().canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::Xor { dest, lhs, rhs }], - ) - .into(), - ExprEnum::BitXorS(expr) => self - .simple_nary_big_expr( - instantiated_module, - expr.ty().canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::Xor { dest, lhs, rhs }], - ) - .into(), - ExprEnum::BitXorB(expr) => self - .simple_nary_big_expr( - instantiated_module, - Bool.canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::Xor { dest, lhs, rhs }], - ) - .into(), - ExprEnum::AddU(expr) => self - .simple_nary_big_expr( - instantiated_module, - expr.ty().canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::Add { dest, lhs, rhs }], - ) - .into(), - ExprEnum::AddS(expr) => self - .simple_nary_big_expr( - instantiated_module, - expr.ty().canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::Add { dest, lhs, rhs }], - ) - .into(), - ExprEnum::SubU(expr) => self - .simple_nary_big_expr( - instantiated_module, - expr.ty().canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| { - vec![Insn::SubU { - dest, - lhs, - rhs, - dest_width: expr.ty().width(), - }] - }, - ) - .into(), - ExprEnum::SubS(expr) => self - .simple_nary_big_expr( - instantiated_module, - expr.ty().canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::SubS { dest, lhs, rhs }], - ) - .into(), - ExprEnum::MulU(expr) => self - .simple_nary_big_expr( - instantiated_module, - expr.ty().canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::Mul { dest, lhs, rhs }], - ) - .into(), - ExprEnum::MulS(expr) => self - .simple_nary_big_expr( - instantiated_module, - expr.ty().canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::Mul { dest, lhs, rhs }], - ) - .into(), - ExprEnum::DivU(expr) => self - .simple_nary_big_expr( - instantiated_module, - expr.ty().canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::Div { dest, lhs, rhs }], - ) - .into(), - ExprEnum::DivS(expr) => self - .simple_nary_big_expr( - instantiated_module, - expr.ty().canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::Div { dest, lhs, rhs }], - ) - .into(), - ExprEnum::RemU(expr) => self - .simple_nary_big_expr( - instantiated_module, - expr.ty().canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::Rem { dest, lhs, rhs }], - ) - .into(), - ExprEnum::RemS(expr) => self - .simple_nary_big_expr( - instantiated_module, - expr.ty().canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::Rem { dest, lhs, rhs }], - ) - .into(), - ExprEnum::DynShlU(expr) => self - .simple_nary_big_expr( - instantiated_module, - expr.ty().canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::DynShl { dest, lhs, rhs }], - ) - .into(), - ExprEnum::DynShlS(expr) => self - .simple_nary_big_expr( - instantiated_module, - expr.ty().canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::DynShl { dest, lhs, rhs }], - ) - .into(), - ExprEnum::DynShrU(expr) => self - .simple_nary_big_expr( - instantiated_module, - expr.ty().canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::DynShr { dest, lhs, rhs }], - ) - .into(), - ExprEnum::DynShrS(expr) => self - .simple_nary_big_expr( - instantiated_module, - expr.ty().canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::DynShr { dest, lhs, rhs }], - ) - .into(), - ExprEnum::FixedShlU(expr) => self - .simple_nary_big_expr( - instantiated_module, - expr.ty().canonical(), - [Expr::canonical(expr.lhs())], - |dest, [lhs]| { - vec![Insn::Shl { - dest, - lhs, - rhs: expr.rhs(), - }] - }, - ) - .into(), - ExprEnum::FixedShlS(expr) => self - .simple_nary_big_expr( - instantiated_module, - expr.ty().canonical(), - [Expr::canonical(expr.lhs())], - |dest, [lhs]| { - vec![Insn::Shl { - dest, - lhs, - rhs: expr.rhs(), - }] - }, - ) - .into(), - ExprEnum::FixedShrU(expr) => self - .simple_nary_big_expr( - instantiated_module, - expr.ty().canonical(), - [Expr::canonical(expr.lhs())], - |dest, [lhs]| { - vec![Insn::Shr { - dest, - lhs, - rhs: expr.rhs(), - }] - }, - ) - .into(), - ExprEnum::FixedShrS(expr) => self - .simple_nary_big_expr( - instantiated_module, - expr.ty().canonical(), - [Expr::canonical(expr.lhs())], - |dest, [lhs]| { - vec![Insn::Shr { - dest, - lhs, - rhs: expr.rhs(), - }] - }, - ) - .into(), - ExprEnum::CmpLtB(expr) => self - .simple_nary_big_expr( - instantiated_module, - Bool.canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::CmpLt { dest, lhs, rhs }], - ) - .into(), - ExprEnum::CmpLeB(expr) => self - .simple_nary_big_expr( - instantiated_module, - Bool.canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::CmpLe { dest, lhs, rhs }], - ) - .into(), - ExprEnum::CmpGtB(expr) => self - .simple_nary_big_expr( - instantiated_module, - Bool.canonical(), - // swap both comparison direction and lhs/rhs - [Expr::canonical(expr.rhs()), Expr::canonical(expr.lhs())], - |dest, [lhs, rhs]| vec![Insn::CmpLt { dest, lhs, rhs }], - ) - .into(), - ExprEnum::CmpGeB(expr) => self - .simple_nary_big_expr( - instantiated_module, - Bool.canonical(), - // swap both comparison direction and lhs/rhs - [Expr::canonical(expr.rhs()), Expr::canonical(expr.lhs())], - |dest, [lhs, rhs]| vec![Insn::CmpLe { dest, lhs, rhs }], - ) - .into(), - ExprEnum::CmpEqB(expr) => self - .simple_nary_big_expr( - instantiated_module, - Bool.canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::CmpEq { dest, lhs, rhs }], - ) - .into(), - ExprEnum::CmpNeB(expr) => self - .simple_nary_big_expr( - instantiated_module, - Bool.canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::CmpNe { dest, lhs, rhs }], - ) - .into(), - ExprEnum::CmpLtU(expr) => self - .simple_nary_big_expr( - instantiated_module, - Bool.canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::CmpLt { dest, lhs, rhs }], - ) - .into(), - ExprEnum::CmpLeU(expr) => self - .simple_nary_big_expr( - instantiated_module, - Bool.canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::CmpLe { dest, lhs, rhs }], - ) - .into(), - ExprEnum::CmpGtU(expr) => self - .simple_nary_big_expr( - instantiated_module, - Bool.canonical(), - // swap both comparison direction and lhs/rhs - [Expr::canonical(expr.rhs()), Expr::canonical(expr.lhs())], - |dest, [lhs, rhs]| vec![Insn::CmpLt { dest, lhs, rhs }], - ) - .into(), - ExprEnum::CmpGeU(expr) => self - .simple_nary_big_expr( - instantiated_module, - Bool.canonical(), - // swap both comparison direction and lhs/rhs - [Expr::canonical(expr.rhs()), Expr::canonical(expr.lhs())], - |dest, [lhs, rhs]| vec![Insn::CmpLe { dest, lhs, rhs }], - ) - .into(), - ExprEnum::CmpEqU(expr) => self - .simple_nary_big_expr( - instantiated_module, - Bool.canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::CmpEq { dest, lhs, rhs }], - ) - .into(), - ExprEnum::CmpNeU(expr) => self - .simple_nary_big_expr( - instantiated_module, - Bool.canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::CmpNe { dest, lhs, rhs }], - ) - .into(), - ExprEnum::CmpLtS(expr) => self - .simple_nary_big_expr( - instantiated_module, - Bool.canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::CmpLt { dest, lhs, rhs }], - ) - .into(), - ExprEnum::CmpLeS(expr) => self - .simple_nary_big_expr( - instantiated_module, - Bool.canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::CmpLe { dest, lhs, rhs }], - ) - .into(), - ExprEnum::CmpGtS(expr) => self - .simple_nary_big_expr( - instantiated_module, - Bool.canonical(), - // swap both comparison direction and lhs/rhs - [Expr::canonical(expr.rhs()), Expr::canonical(expr.lhs())], - |dest, [lhs, rhs]| vec![Insn::CmpLt { dest, lhs, rhs }], - ) - .into(), - ExprEnum::CmpGeS(expr) => self - .simple_nary_big_expr( - instantiated_module, - Bool.canonical(), - // swap both comparison direction and lhs/rhs - [Expr::canonical(expr.rhs()), Expr::canonical(expr.lhs())], - |dest, [lhs, rhs]| vec![Insn::CmpLe { dest, lhs, rhs }], - ) - .into(), - ExprEnum::CmpEqS(expr) => self - .simple_nary_big_expr( - instantiated_module, - Bool.canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::CmpEq { dest, lhs, rhs }], - ) - .into(), - ExprEnum::CmpNeS(expr) => self - .simple_nary_big_expr( - instantiated_module, - Bool.canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::CmpNe { dest, lhs, rhs }], - ) - .into(), - ExprEnum::CastUIntToUInt(expr) => self - .simple_nary_big_expr( - instantiated_module, - expr.ty().canonical(), - [Expr::canonical(expr.arg())], - |dest, [src]| { - vec![Insn::CastToUInt { - dest, - src, - dest_width: expr.ty().width(), - }] - }, - ) - .into(), - ExprEnum::CastUIntToSInt(expr) => self - .simple_nary_big_expr( - instantiated_module, - expr.ty().canonical(), - [Expr::canonical(expr.arg())], - |dest, [src]| { - vec![Insn::CastToSInt { - dest, - src, - dest_width: expr.ty().width(), - }] - }, - ) - .into(), - ExprEnum::CastSIntToUInt(expr) => self - .simple_nary_big_expr( - instantiated_module, - expr.ty().canonical(), - [Expr::canonical(expr.arg())], - |dest, [src]| { - vec![Insn::CastToUInt { - dest, - src, - dest_width: expr.ty().width(), - }] - }, - ) - .into(), - ExprEnum::CastSIntToSInt(expr) => self - .simple_nary_big_expr( - instantiated_module, - expr.ty().canonical(), - [Expr::canonical(expr.arg())], - |dest, [src]| { - vec![Insn::CastToSInt { - dest, - src, - dest_width: expr.ty().width(), - }] - }, - ) - .into(), - ExprEnum::CastBoolToUInt(expr) => cast_bit(Expr::canonical(expr.arg())), - ExprEnum::CastBoolToSInt(expr) => cast_bit(Expr::canonical(expr.arg())), - ExprEnum::CastUIntToBool(expr) => cast_bit(Expr::canonical(expr.arg())), - ExprEnum::CastSIntToBool(expr) => cast_bit(Expr::canonical(expr.arg())), - ExprEnum::CastBoolToSyncReset(expr) => cast_bit(Expr::canonical(expr.arg())), - ExprEnum::CastUIntToSyncReset(expr) => cast_bit(Expr::canonical(expr.arg())), - ExprEnum::CastSIntToSyncReset(expr) => cast_bit(Expr::canonical(expr.arg())), - ExprEnum::CastBoolToAsyncReset(expr) => cast_bit(Expr::canonical(expr.arg())), - ExprEnum::CastUIntToAsyncReset(expr) => cast_bit(Expr::canonical(expr.arg())), - ExprEnum::CastSIntToAsyncReset(expr) => cast_bit(Expr::canonical(expr.arg())), - ExprEnum::CastSyncResetToBool(expr) => cast_bit(Expr::canonical(expr.arg())), - ExprEnum::CastSyncResetToUInt(expr) => cast_bit(Expr::canonical(expr.arg())), - ExprEnum::CastSyncResetToSInt(expr) => cast_bit(Expr::canonical(expr.arg())), - ExprEnum::CastSyncResetToReset(expr) => cast_bit(Expr::canonical(expr.arg())), - ExprEnum::CastAsyncResetToBool(expr) => cast_bit(Expr::canonical(expr.arg())), - ExprEnum::CastAsyncResetToUInt(expr) => cast_bit(Expr::canonical(expr.arg())), - ExprEnum::CastAsyncResetToSInt(expr) => cast_bit(Expr::canonical(expr.arg())), - ExprEnum::CastAsyncResetToReset(expr) => cast_bit(Expr::canonical(expr.arg())), - ExprEnum::CastResetToBool(expr) => cast_bit(Expr::canonical(expr.arg())), - ExprEnum::CastResetToUInt(expr) => cast_bit(Expr::canonical(expr.arg())), - ExprEnum::CastResetToSInt(expr) => cast_bit(Expr::canonical(expr.arg())), - ExprEnum::CastBoolToClock(expr) => cast_bit(Expr::canonical(expr.arg())), - ExprEnum::CastUIntToClock(expr) => cast_bit(Expr::canonical(expr.arg())), - ExprEnum::CastSIntToClock(expr) => cast_bit(Expr::canonical(expr.arg())), - ExprEnum::CastClockToBool(expr) => cast_bit(Expr::canonical(expr.arg())), - ExprEnum::CastClockToUInt(expr) => cast_bit(Expr::canonical(expr.arg())), - ExprEnum::CastClockToSInt(expr) => cast_bit(Expr::canonical(expr.arg())), - ExprEnum::FieldAccess(expr) => self - .compile_expr(instantiated_module, Expr::canonical(expr.base())) - .map_ty(Bundle::from_canonical) - .field_by_index(expr.field_index()), - ExprEnum::VariantAccess(variant_access) => { - let start = Expr::ty(variant_access.base()).discriminant_bit_width(); - let len = Expr::ty(expr).bit_width(); - self.compile_expr( - instantiated_module, - variant_access.base().cast_to_bits()[start..start + len] - .cast_bits_to(Expr::ty(expr)), - ) - } - ExprEnum::ArrayIndex(expr) => self - .compile_expr(instantiated_module, Expr::canonical(expr.base())) - .map_ty(Array::from_canonical) - .element(expr.element_index()), - ExprEnum::DynArrayIndex(expr) => { - let element_index = - self.compile_expr(instantiated_module, Expr::canonical(expr.element_index())); - let element_index = self.compiled_expr_to_value( - element_index, - instantiated_module.leaf_module().source_location(), - ); - let index_slot = self.compiled_value_to_dyn_array_index( - element_index.map_ty(UInt::from_canonical), - instantiated_module.leaf_module().source_location(), - ); - self.compile_expr(instantiated_module, Expr::canonical(expr.base())) - .map_ty(Array::from_canonical) - .element_dyn(index_slot) - } - ExprEnum::ReduceBitAndU(expr) => if Expr::ty(expr.arg()).width() == 0 { - self.compile_expr(instantiated_module, Expr::canonical(true.to_expr())) - } else { - self.compile_expr( - instantiated_module, - Expr::canonical( - expr.arg() - .cmp_eq(Expr::ty(expr.arg()).from_int_wrapping(-1)), - ), - ) - } - .into(), - ExprEnum::ReduceBitAndS(expr) => if Expr::ty(expr.arg()).width() == 0 { - self.compile_expr(instantiated_module, Expr::canonical(true.to_expr())) - } else { - self.compile_expr( - instantiated_module, - Expr::canonical( - expr.arg() - .cmp_eq(Expr::ty(expr.arg()).from_int_wrapping(-1)), - ), - ) - } - .into(), - ExprEnum::ReduceBitOrU(expr) => if Expr::ty(expr.arg()).width() == 0 { - self.compile_expr(instantiated_module, Expr::canonical(false.to_expr())) - } else { - self.compile_expr( - instantiated_module, - Expr::canonical(expr.arg().cmp_ne(Expr::ty(expr.arg()).from_int_wrapping(0))), - ) - } - .into(), - ExprEnum::ReduceBitOrS(expr) => if Expr::ty(expr.arg()).width() == 0 { - self.compile_expr(instantiated_module, Expr::canonical(false.to_expr())) - } else { - self.compile_expr( - instantiated_module, - Expr::canonical(expr.arg().cmp_ne(Expr::ty(expr.arg()).from_int_wrapping(0))), - ) - } - .into(), - ExprEnum::ReduceBitXorU(expr) => self - .simple_nary_big_expr( - instantiated_module, - UInt::<1>::TYPE.canonical(), - [Expr::canonical(expr.arg())], - |dest, [src]| { - vec![Insn::ReduceBitXor { - dest, - src, - input_width: Expr::ty(expr.arg()).width(), - }] - }, - ) - .into(), - ExprEnum::ReduceBitXorS(expr) => self - .simple_nary_big_expr( - instantiated_module, - UInt::<1>::TYPE.canonical(), - [Expr::canonical(expr.arg())], - |dest, [src]| { - vec![Insn::ReduceBitXor { - dest, - src, - input_width: Expr::ty(expr.arg()).width(), - }] - }, - ) - .into(), - ExprEnum::SliceUInt(expr) => self - .simple_nary_big_expr( - instantiated_module, - UInt::new_dyn(expr.range().len()).canonical(), - [Expr::canonical(expr.base())], - |dest, [src]| { - vec![Insn::SliceInt { - dest, - src, - start: expr.range().start, - len: expr.range().len(), - }] - }, - ) - .into(), - ExprEnum::SliceSInt(expr) => self - .simple_nary_big_expr( - instantiated_module, - UInt::new_dyn(expr.range().len()).canonical(), - [Expr::canonical(expr.base())], - |dest, [src]| { - vec![Insn::SliceInt { - dest, - src, - start: expr.range().start, - len: expr.range().len(), - }] - }, - ) - .into(), - ExprEnum::CastToBits(expr) => self - .compile_cast_to_bits(instantiated_module, expr) - .map_ty(CanonicalType::UInt) - .into(), - ExprEnum::CastBitsTo(expr) => { - self.compile_cast_bits_to(instantiated_module, expr).into() - } - ExprEnum::ModuleIO(expr) => self - .compile_value(TargetInInstantiatedModule { - instantiated_module, - target: expr.into(), - }) - .into(), - ExprEnum::Instance(expr) => self - .compile_value(TargetInInstantiatedModule { - instantiated_module, - target: expr.into(), - }) - .into(), - ExprEnum::Wire(expr) => self - .compile_value(TargetInInstantiatedModule { - instantiated_module, - target: expr.into(), - }) - .into(), - ExprEnum::Reg(expr) => self - .compile_value(TargetInInstantiatedModule { - instantiated_module, - target: expr.into(), - }) - .into(), - ExprEnum::RegSync(expr) => self - .compile_value(TargetInInstantiatedModule { - instantiated_module, - target: expr.into(), - }) - .into(), - ExprEnum::RegAsync(expr) => self - .compile_value(TargetInInstantiatedModule { - instantiated_module, - target: expr.into(), - }) - .into(), - ExprEnum::MemPort(expr) => self - .compile_value(TargetInInstantiatedModule { - instantiated_module, - target: expr.into(), - }) - .into(), - }; - self.compiled_exprs.insert(expr, retval); - retval - } - fn compile_simple_connect( - &mut self, - conditions: Interned<[Cond]>, - lhs: CompiledExpr, - rhs: CompiledValue, - source_location: SourceLocation, - ) { - let CompiledExpr { - static_part: lhs_static_part, - indexes, - } = lhs; - let (lhs_layout, lhs_range) = lhs_static_part.write(); - assert!( - lhs_layout.ty.is_passive(), - "invalid expression passed to compile_simple_connect -- type must be passive", - ); - let TypeIndexRange { - small_slots, - big_slots, - } = lhs_range; - self.add_assignment( - conditions, - small_slots - .iter() - .zip(rhs.range.small_slots.iter()) - .map(|(base, src)| { - if indexes.small_slots.is_empty() { - Insn::CopySmall { dest: base, src } - } else { - Insn::WriteSmallIndexed { - dest: StatePartArrayIndexed { - base, - indexes: indexes.small_slots, - }, - src, - } - } - }) - .chain( - big_slots - .iter() - .zip(rhs.range.big_slots.iter()) - .map(|(base, src)| { - if indexes.big_slots.is_empty() { - Insn::Copy { dest: base, src } - } else { - Insn::WriteIndexed { - dest: StatePartArrayIndexed { - base, - indexes: indexes.big_slots, - }, - src, - } - } - }), - ), - source_location, - ); - } - fn compile_connect( - &mut self, - lhs_instantiated_module: InstantiatedModule, - lhs_conditions: Interned<[Cond]>, - lhs: Expr, - rhs_instantiated_module: InstantiatedModule, - rhs_conditions: Interned<[Cond]>, - mut rhs: Expr, - source_location: SourceLocation, - ) { - if Expr::ty(lhs) != Expr::ty(rhs) || !Expr::ty(lhs).is_passive() { - match Expr::ty(lhs) { - CanonicalType::UInt(lhs_ty) => { - rhs = Expr::canonical(Expr::::from_canonical(rhs).cast_to(lhs_ty)); - } - CanonicalType::SInt(lhs_ty) => { - rhs = Expr::canonical(Expr::::from_canonical(rhs).cast_to(lhs_ty)); - } - CanonicalType::Bool(_) => unreachable!(), - CanonicalType::Array(lhs_ty) => { - let CanonicalType::Array(rhs_ty) = Expr::ty(rhs) else { - unreachable!(); - }; - assert_eq!(lhs_ty.len(), rhs_ty.len()); - let lhs = Expr::::from_canonical(lhs); - let rhs = Expr::::from_canonical(rhs); - for index in 0..lhs_ty.len() { - self.compile_connect( - lhs_instantiated_module, - lhs_conditions, - lhs[index], - rhs_instantiated_module, - rhs_conditions, - rhs[index], - source_location, - ); - } - return; - } - CanonicalType::Enum(lhs_ty) => { - let CanonicalType::Enum(rhs_ty) = Expr::ty(rhs) else { - unreachable!(); - }; - todo!("handle connect with different enum types"); - } - CanonicalType::Bundle(lhs_ty) => { - let CanonicalType::Bundle(rhs_ty) = Expr::ty(rhs) else { - unreachable!(); - }; - assert_eq!(lhs_ty.fields().len(), rhs_ty.fields().len()); - let lhs = Expr::::from_canonical(lhs); - let rhs = Expr::::from_canonical(rhs); - for ( - field_index, - ( - BundleField { - name, - flipped, - ty: _, - }, - rhs_field, - ), - ) in lhs_ty.fields().into_iter().zip(rhs_ty.fields()).enumerate() - { - assert_eq!(name, rhs_field.name); - assert_eq!(flipped, rhs_field.flipped); - let lhs_expr = ops::FieldAccess::new_by_index(lhs, field_index).to_expr(); - let rhs_expr = ops::FieldAccess::new_by_index(rhs, field_index).to_expr(); - if flipped { - // swap lhs/rhs - self.compile_connect( - rhs_instantiated_module, - rhs_conditions, - rhs_expr, - lhs_instantiated_module, - lhs_conditions, - lhs_expr, - source_location, - ); - } else { - self.compile_connect( - lhs_instantiated_module, - lhs_conditions, - lhs_expr, - rhs_instantiated_module, - rhs_conditions, - rhs_expr, - source_location, - ); - } - } - return; - } - CanonicalType::AsyncReset(_) => unreachable!(), - CanonicalType::SyncReset(_) => unreachable!(), - CanonicalType::Reset(_) => unreachable!(), - CanonicalType::Clock(_) => unreachable!(), - CanonicalType::PhantomConst(_) => unreachable!("PhantomConst mismatch"), - } - } - let Some(target) = lhs.target() else { - unreachable!("connect lhs must have target"); - }; - let lhs_decl_conditions = self.decl_conditions[&TargetInInstantiatedModule { - instantiated_module: lhs_instantiated_module, - target: target.base().into(), - }]; - let lhs = self.compile_expr(lhs_instantiated_module, lhs); - let rhs = self.compile_expr(rhs_instantiated_module, rhs); - let rhs = self.compiled_expr_to_value(rhs, source_location); - self.compile_simple_connect( - lhs_conditions[lhs_decl_conditions.len()..].intern(), - lhs, - rhs, - source_location, - ); - } - fn compile_clock( - &mut self, - clk: CompiledValue, - source_location: SourceLocation, - ) -> ClockTrigger { - if let Some(&retval) = self.compiled_value_to_clock_trigger_map.get(&clk) { - return retval; - } - let mut alloc_small_slot = |part_name: &str| { - self.insns - .state_layout - .ty - .small_slots - .allocate(&StatePartLayout::scalar( - SlotDebugData { - name: Interned::default(), - ty: Bool.canonical(), - }, - (), - )) - .start - }; - let last_clk_was_low = alloc_small_slot("last_clk_was_low"); - let clk_triggered = alloc_small_slot("clk_triggered"); - let retval = ClockTrigger { - last_clk_was_low, - clk: self.compiled_value_bool_dest_is_small( - clk.map_ty(CanonicalType::Clock), - source_location, - ), - clk_triggered, - source_location, - }; - self.add_assignment( - Interned::default(), - [Insn::AndSmall { - dest: clk_triggered, - lhs: retval.clk, - rhs: last_clk_was_low, - }], - source_location, - ); - self.clock_triggers.push(retval); - self.compiled_value_to_clock_trigger_map.insert(clk, retval); - retval - } - fn compile_enum_discriminant( - &mut self, - enum_value: CompiledValue, - source_location: SourceLocation, - ) -> StatePartIndex { - if let Some(&retval) = self.enum_discriminants.get(&enum_value) { - return retval; - } - let retval_ty = Enum::new( - enum_value - .layout - .ty - .variants() - .iter() - .map(|variant| EnumVariant { - name: variant.name, - ty: None, - }) - .collect(), - ); - let retval = if retval_ty == enum_value.layout.ty - && enum_value.range.len() == TypeLen::A_SMALL_SLOT - { - enum_value.range.small_slots.start - } else { - let retval = self - .insns - .state_layout - .ty - .small_slots - .allocate(&StatePartLayout::scalar( - SlotDebugData { - name: Interned::default(), - ty: retval_ty.canonical(), - }, - (), - )) - .start; - let discriminant_bit_width = enum_value.layout.ty.discriminant_bit_width(); - let discriminant_mask = !(!0u64 << discriminant_bit_width); - let insn = match enum_value.range.len() { - TypeLen::A_BIG_SLOT => Insn::AndBigWithSmallImmediate { - dest: retval, - lhs: enum_value.range.big_slots.start, - rhs: discriminant_mask, - }, - TypeLen::A_SMALL_SLOT => { - if discriminant_bit_width == enum_value.layout.ty.type_properties().bit_width { - Insn::CopySmall { - dest: retval, - src: enum_value.range.small_slots.start, - } - } else { - Insn::AndSmallImmediate { - dest: retval, - lhs: enum_value.range.small_slots.start, - rhs: discriminant_mask, - } - } - } - _ => unreachable!(), - }; - self.add_assignment(Interned::default(), [insn], source_location); - retval - }; - self.enum_discriminants.insert(enum_value, retval); - retval - } - fn compile_stmt_reg( - &mut self, - stmt_reg: StmtReg, - instantiated_module: InstantiatedModule, - value: CompiledValue, - ) { - let StmtReg { annotations, reg } = stmt_reg; - let clk = self.compile_expr(instantiated_module, Expr::canonical(reg.clock_domain().clk)); - let clk = self - .compiled_expr_to_value(clk, reg.source_location()) - .map_ty(Clock::from_canonical); - let clk = self.compile_clock(clk, reg.source_location()); - struct Dispatch; - impl ResetTypeDispatch for Dispatch { - type Input = (); - - type Output = bool; - - fn reset(self, _input: Self::Input) -> Self::Output { - unreachable!() - } - - fn sync_reset(self, _input: Self::Input) -> Self::Output { - false - } - - fn async_reset(self, _input: Self::Input) -> Self::Output { - true - } - } - let reset = if let Some(init) = reg.init() { - let init = self.compile_expr(instantiated_module, init); - let init = self.compiled_expr_to_value(init, reg.source_location()); - let rst = - self.compile_expr(instantiated_module, Expr::canonical(reg.clock_domain().rst)); - let rst = self.compiled_expr_to_value(rst, reg.source_location()); - let rst = self.compiled_value_bool_dest_is_small(rst, reg.source_location()); - let is_async = R::dispatch((), Dispatch); - if is_async { - let cond = Expr::canonical(reg.clock_domain().rst.cast_to(Bool)); - let cond = self.compile_expr(instantiated_module, cond); - let cond = self.compiled_expr_to_value(cond, reg.source_location()); - let cond = cond.map_ty(Bool::from_canonical); - // write to the register's current value since asynchronous reset is combinational - let lhs = CompiledValue { - layout: value.layout, - range: value.range, - write: None, - } - .into(); - self.compile_simple_connect( - [Cond { - body: CondBody::IfTrue { cond }, - source_location: reg.source_location(), - }][..] - .intern(), - lhs, - init, - reg.source_location(), - ); - } - Some(RegisterReset { - is_async, - init, - rst, - }) - } else { - None - }; - self.registers.push(Register { - value, - clk_triggered: clk.clk_triggered, - reset, - source_location: reg.source_location(), - }); - } - fn compile_declaration( - &mut self, - declaration: StmtDeclaration, - parent_module: Interned, - conditions: Interned<[Cond]>, - ) -> TraceDecl { - let target_base: TargetBase = match &declaration { - StmtDeclaration::Wire(v) => v.wire.into(), - StmtDeclaration::Reg(v) => v.reg.into(), - StmtDeclaration::RegSync(v) => v.reg.into(), - StmtDeclaration::RegAsync(v) => v.reg.into(), - StmtDeclaration::Instance(v) => v.instance.into(), - }; - let target = TargetInInstantiatedModule { - instantiated_module: *parent_module, - target: target_base.into(), - }; - self.decl_conditions.insert(target, conditions); - let compiled_value = self.compile_value(target); - match declaration { - StmtDeclaration::Wire(StmtWire { annotations, wire }) => {} - StmtDeclaration::Reg(_) => { - unreachable!("Reset types were already replaced by SyncReset or AsyncReset"); - } - StmtDeclaration::RegSync(stmt_reg) => { - self.compile_stmt_reg(stmt_reg, *parent_module, compiled_value) - } - StmtDeclaration::RegAsync(stmt_reg) => { - self.compile_stmt_reg(stmt_reg, *parent_module, compiled_value) - } - StmtDeclaration::Instance(StmtInstance { - annotations, - instance, - }) => { - let inner_instantiated_module = InstantiatedModule::Child { - parent: parent_module, - instance: instance.intern_sized(), - } - .intern_sized(); - let instance_expr = instance.to_expr(); - self.compile_module(inner_instantiated_module); - for (field_index, module_io) in - instance.instantiated().module_io().into_iter().enumerate() - { - let instance_field = - ops::FieldAccess::new_by_index(instance_expr, field_index).to_expr(); - match Expr::flow(instance_field) { - Flow::Source => { - // we need to supply the value to the instance since the - // parent module expects to read from the instance - self.compile_connect( - *parent_module, - conditions, - instance_field, - *inner_instantiated_module, - Interned::default(), - module_io.module_io.to_expr(), - instance.source_location(), - ); - } - Flow::Sink => { - // we need to take the value from the instance since the - // parent module expects to write to the instance - self.compile_connect( - *inner_instantiated_module, - Interned::default(), - module_io.module_io.to_expr(), - *parent_module, - conditions, - instance_field, - instance.source_location(), - ); - } - Flow::Duplex => unreachable!(), - } - } - } - } - self.make_trace_decl(*parent_module, target_base) - } - fn allocate_delay_chain( - &mut self, - len: usize, - layout: &TypeLayout, - first: Option, - last: Option, - mut from_allocation: impl FnMut(TypeIndexRange) -> T, - ) -> Vec { - match (len, first, last) { - (0, _, _) => Vec::new(), - (1, Some(v), _) | (1, None, Some(v)) => vec![v], - (2, Some(first), Some(last)) => vec![first, last], - (len, first, last) => { - let inner_len = len - first.is_some() as usize - last.is_some() as usize; - first - .into_iter() - .chain( - (0..inner_len) - .map(|_| from_allocation(self.insns.allocate_variable(layout))), - ) - .chain(last) - .collect() - } - } - } - fn allocate_delay_chain_small( - &mut self, - len: usize, - ty: CanonicalType, - first: Option>, - last: Option>, - ) -> Vec> { - self.allocate_delay_chain( - len, - &TypeLayout { - small_slots: StatePartLayout::scalar( - SlotDebugData { - name: Interned::default(), - ty, - }, - (), - ), - big_slots: StatePartLayout::empty(), - }, - first, - last, - |range| range.small_slots.start, - ) - } - fn compile_memory_port_rw_helper( - &mut self, - memory: StatePartIndex, - stride: usize, - mut start: usize, - data_layout: CompiledTypeLayout, - mask_layout: CompiledTypeLayout, - mut read: Option>, - mut write: Option>, - ) { - match data_layout.body { - CompiledTypeLayoutBody::Scalar => { - let CompiledTypeLayoutBody::Scalar = mask_layout.body else { - unreachable!(); - }; - let signed = match data_layout.ty { - CanonicalType::UInt(_) => false, - CanonicalType::SInt(_) => true, - CanonicalType::Bool(_) => false, - CanonicalType::Array(_) => unreachable!(), - CanonicalType::Enum(_) => false, - CanonicalType::Bundle(_) => unreachable!(), - CanonicalType::AsyncReset(_) => false, - CanonicalType::SyncReset(_) => false, - CanonicalType::Reset(_) => false, - CanonicalType::Clock(_) => false, - CanonicalType::PhantomConst(_) => unreachable!(), - }; - let width = data_layout.ty.bit_width(); - if let Some(MemoryPortReadInsns { - addr, - en: _, - write_mode: _, - data, - insns, - }) = read - { - insns.push( - match data.len() { - TypeLen::A_BIG_SLOT => { - let dest = data.big_slots.start; - if signed { - Insn::MemoryReadSInt { - dest, - memory, - addr, - stride, - start, - width, - } - } else { - Insn::MemoryReadUInt { - dest, - memory, - addr, - stride, - start, - width, - } - } - } - TypeLen::A_SMALL_SLOT => { - let _dest = data.small_slots.start; - todo!("memory ports' data are always big for now"); - } - _ => unreachable!(), - } - .into(), - ); - } - if let Some(MemoryPortWriteInsns { - addr, - en: _, - write_mode: _, - data, - mask, - insns, - }) = write - { - let end_label = self.insns.new_label(); - insns.push( - match mask.len() { - TypeLen::A_BIG_SLOT => Insn::BranchIfZero { - target: end_label.0, - value: mask.big_slots.start, - }, - TypeLen::A_SMALL_SLOT => Insn::BranchIfSmallZero { - target: end_label.0, - value: mask.small_slots.start, - }, - _ => unreachable!(), - } - .into(), - ); - insns.push( - match data.len() { - TypeLen::A_BIG_SLOT => { - let value = data.big_slots.start; - if signed { - Insn::MemoryWriteSInt { - value, - memory, - addr, - stride, - start, - width, - } - } else { - Insn::MemoryWriteUInt { - value, - memory, - addr, - stride, - start, - width, - } - } - } - TypeLen::A_SMALL_SLOT => { - let _value = data.small_slots.start; - todo!("memory ports' data are always big for now"); - } - _ => unreachable!(), - } - .into(), - ); - insns.push(end_label.into()); - } - } - CompiledTypeLayoutBody::Array { element } => { - let CompiledTypeLayoutBody::Array { - element: mask_element, - } = mask_layout.body - else { - unreachable!(); - }; - let ty = ::from_canonical(data_layout.ty); - let element_bit_width = ty.element().bit_width(); - let element_size = element.layout.len(); - let mask_element_size = mask_element.layout.len(); - for element_index in 0..ty.len() { - self.compile_memory_port_rw_helper( - memory, - stride, - start, - *element, - *mask_element, - read.as_mut().map( - |MemoryPortReadInsns { - addr, - en, - write_mode, - data, - insns, - }| MemoryPortReadInsns { - addr: *addr, - en: *en, - write_mode: *write_mode, - data: data.index_array(element_size, element_index), - insns, - }, - ), - write.as_mut().map( - |MemoryPortWriteInsns { - addr, - en, - write_mode, - data, - mask, - insns, - }| { - MemoryPortWriteInsns { - addr: *addr, - en: *en, - write_mode: *write_mode, - data: data.index_array(element_size, element_index), - mask: mask.index_array(mask_element_size, element_index), - insns, - } - }, - ), - ); - start += element_bit_width; - } - } - CompiledTypeLayoutBody::Bundle { fields } => { - let CompiledTypeLayoutBody::Bundle { - fields: mask_fields, - } = mask_layout.body - else { - unreachable!(); - }; - assert_eq!(fields.len(), mask_fields.len()); - for (field, mask_field) in fields.into_iter().zip(mask_fields) { - let field_index_range = - TypeIndexRange::new(field.offset, field.ty.layout.len()); - let mask_field_index_range = - TypeIndexRange::new(mask_field.offset, mask_field.ty.layout.len()); - self.compile_memory_port_rw_helper( - memory, - stride, - start, - field.ty, - mask_field.ty, - read.as_mut().map( - |MemoryPortReadInsns { - addr, - en, - write_mode, - data, - insns, - }| MemoryPortReadInsns { - addr: *addr, - en: *en, - write_mode: *write_mode, - data: data.slice(field_index_range), - insns, - }, - ), - write.as_mut().map( - |MemoryPortWriteInsns { - addr, - en, - write_mode, - data, - mask, - insns, - }| { - MemoryPortWriteInsns { - addr: *addr, - en: *en, - write_mode: *write_mode, - data: data.slice(field_index_range), - mask: mask.slice(mask_field_index_range), - insns, - } - }, - ), - ); - start = start + field.ty.ty.bit_width(); - } - } - } - } - fn compile_memory_port_rw( - &mut self, - memory: StatePartIndex, - data_layout: CompiledTypeLayout, - mask_layout: CompiledTypeLayout, - mut read: Option>, - mut write: Option>, - ) { - let read_else_label = read.as_mut().map( - |MemoryPortReadInsns { - addr: _, - en, - write_mode, - data: _, - insns, - }| { - let else_label = self.insns.new_label(); - insns.push( - Insn::BranchIfSmallZero { - target: else_label.0, - value: *en, - } - .into(), - ); - if let Some(write_mode) = *write_mode { - insns.push( - Insn::BranchIfSmallNonZero { - target: else_label.0, - value: write_mode, - } - .into(), - ); - } - else_label - }, - ); - let write_end_label = write.as_mut().map( - |MemoryPortWriteInsns { - addr: _, - en, - write_mode, - data: _, - mask: _, - insns, - }| { - let end_label = self.insns.new_label(); - insns.push( - Insn::BranchIfSmallZero { - target: end_label.0, - value: *en, - } - .into(), - ); - if let Some(write_mode) = *write_mode { - insns.push( - Insn::BranchIfSmallZero { - target: end_label.0, - value: write_mode, - } - .into(), - ); - } - end_label - }, - ); - self.compile_memory_port_rw_helper( - memory, - data_layout.ty.bit_width(), - 0, - data_layout, - mask_layout, - read.as_mut().map( - |MemoryPortReadInsns { - addr, - en, - write_mode, - data, - insns, - }| MemoryPortReadInsns { - addr: *addr, - en: *en, - write_mode: *write_mode, - data: *data, - insns: *insns, - }, - ), - write.as_mut().map( - |MemoryPortWriteInsns { - addr, - en, - write_mode, - data, - mask, - insns, - }| MemoryPortWriteInsns { - addr: *addr, - en: *en, - write_mode: *write_mode, - data: *data, - mask: *mask, - insns: *insns, - }, - ), - ); - if let ( - Some(else_label), - Some(MemoryPortReadInsns { - addr: _, - en: _, - write_mode: _, - data, - insns, - }), - ) = (read_else_label, read) - { - let end_label = self.insns.new_label(); - insns.push( - Insn::Branch { - target: end_label.0, - } - .into(), - ); - insns.push(else_label.into()); - let TypeIndexRange { - small_slots, - big_slots, - } = data; - for dest in small_slots.iter() { - insns.push(Insn::ConstSmall { dest, value: 0 }.into()); - } - for dest in big_slots.iter() { - insns.push( - Insn::Const { - dest, - value: BigInt::ZERO.intern_sized(), - } - .into(), - ); - } - insns.push(end_label.into()); - } - if let (Some(end_label), Some(write)) = (write_end_label, write) { - write.insns.push(end_label.into()); - } - } - fn compile_memory( - &mut self, - mem: Mem, - instantiated_module: InstantiatedModule, - conditions: Interned<[Cond]>, - trace_decls: &mut Vec, - ) { - let data_layout = CompiledTypeLayout::get(mem.array_type().element()); - let mask_layout = CompiledTypeLayout::get(mem.array_type().element().mask_type()); - let read_latency_plus_1 = mem - .read_latency() - .checked_add(1) - .expect("read latency too big"); - let write_latency_plus_1 = mem - .write_latency() - .get() - .checked_add(1) - .expect("write latency too big"); - let read_cycle = match mem.read_under_write() { - ReadUnderWrite::Old => 0, - ReadUnderWrite::New => mem.read_latency(), - ReadUnderWrite::Undefined => mem.read_latency() / 2, // something other than Old or New - }; - let memory = self - .insns - .state_layout - .memories - .allocate(&StatePartLayout::scalar( - (), - MemoryData { - array_type: mem.array_type(), - data: mem.initial_value().unwrap_or_else(|| { - Intern::intern_owned(BitVec::repeat( - false, - mem.array_type().type_properties().bit_width, - )) - }), - }, - )) - .start; - let (ports, trace_ports) = mem - .ports() - .iter() - .map(|&port| { - let target_base = TargetBase::MemPort(port); - let target = TargetInInstantiatedModule { - instantiated_module, - target: target_base.into(), - }; - self.decl_conditions.insert(target, conditions); - let TraceDecl::Scope(TraceScope::MemPort(trace_port)) = - self.make_trace_decl(instantiated_module, target_base) - else { - unreachable!(); - }; - let clk = Expr::field(port.to_expr(), "clk"); - let clk = self.compile_expr(instantiated_module, clk); - let clk = self.compiled_expr_to_value(clk, mem.source_location()); - let clk_triggered = self - .compile_clock(clk.map_ty(Clock::from_canonical), mem.source_location()) - .clk_triggered; - let en = Expr::field(port.to_expr(), "en"); - let en = self.compile_expr(instantiated_module, en); - let en = self.compiled_expr_to_value(en, mem.source_location()); - let en = self.compiled_value_bool_dest_is_small(en, mem.source_location()); - let addr = Expr::field(port.to_expr(), "addr"); - let addr = self.compile_expr(instantiated_module, addr); - let addr = self.compiled_expr_to_value(addr, mem.source_location()); - let addr_ty = addr.layout.ty; - let addr = self.compiled_value_to_dyn_array_index( - addr.map_ty(UInt::from_canonical), - mem.source_location(), - ); - let read_data = port.port_kind().rdata_name().map(|name| { - let read_data = - self.compile_expr(instantiated_module, Expr::field(port.to_expr(), name)); - let read_data = self.compiled_expr_to_value(read_data, mem.source_location()); - read_data.range - }); - let write_data = port.port_kind().wdata_name().map(|name| { - let write_data = - self.compile_expr(instantiated_module, Expr::field(port.to_expr(), name)); - let write_data = self.compiled_expr_to_value(write_data, mem.source_location()); - write_data.range - }); - let write_mask = port.port_kind().wmask_name().map(|name| { - let write_mask = - self.compile_expr(instantiated_module, Expr::field(port.to_expr(), name)); - let write_mask = self.compiled_expr_to_value(write_mask, mem.source_location()); - write_mask.range - }); - let write_mode = port.port_kind().wmode_name().map(|name| { - let write_mode = - self.compile_expr(instantiated_module, Expr::field(port.to_expr(), name)); - let write_mode = self.compiled_expr_to_value(write_mode, mem.source_location()); - self.compiled_value_bool_dest_is_small(write_mode, mem.source_location()) - }); - struct PortParts { - en_delayed_len: usize, - addr_delayed_len: usize, - read_data_delayed_len: usize, - write_data_delayed_len: usize, - write_mask_delayed_len: usize, - write_mode_delayed_len: usize, - read_cycle: Option, - write_cycle: Option, - } - let PortParts { - en_delayed_len, - addr_delayed_len, - read_data_delayed_len, - write_data_delayed_len, - write_mask_delayed_len, - write_mode_delayed_len, - read_cycle, - write_cycle, - } = match port.port_kind() { - PortKind::ReadOnly => PortParts { - en_delayed_len: read_cycle + 1, - addr_delayed_len: read_cycle + 1, - read_data_delayed_len: read_latency_plus_1 - read_cycle, - write_data_delayed_len: 0, - write_mask_delayed_len: 0, - write_mode_delayed_len: 0, - read_cycle: Some(read_cycle), - write_cycle: None, - }, - PortKind::WriteOnly => PortParts { - en_delayed_len: write_latency_plus_1, - addr_delayed_len: write_latency_plus_1, - read_data_delayed_len: 0, - write_data_delayed_len: write_latency_plus_1, - write_mask_delayed_len: write_latency_plus_1, - write_mode_delayed_len: 0, - read_cycle: None, - write_cycle: Some(mem.write_latency().get()), - }, - PortKind::ReadWrite => { - let can_rw_at_end = match mem.read_under_write() { - ReadUnderWrite::Old => false, - ReadUnderWrite::New | ReadUnderWrite::Undefined => true, - }; - let latency_plus_1 = read_latency_plus_1; - if latency_plus_1 != write_latency_plus_1 || !can_rw_at_end { - todo!( - "not sure what to do, issue: \ - https://github.com/chipsalliance/firrtl-spec/issues/263" - ); - } - PortParts { - en_delayed_len: latency_plus_1, - addr_delayed_len: latency_plus_1, - read_data_delayed_len: 1, - write_data_delayed_len: latency_plus_1, - write_mask_delayed_len: latency_plus_1, - write_mode_delayed_len: latency_plus_1, - read_cycle: Some(latency_plus_1 - 1), - write_cycle: Some(latency_plus_1 - 1), - } - } - }; - let addr_delayed = self.allocate_delay_chain_small( - addr_delayed_len, - addr_ty.canonical(), - Some(addr), - None, - ); - let en_delayed = self.allocate_delay_chain_small( - en_delayed_len, - Bool.canonical(), - Some(en), - None, - ); - let read_data_delayed = self.allocate_delay_chain( - read_data_delayed_len, - &data_layout.layout, - None, - read_data, - |v| v, - ); - let write_data_delayed = self.allocate_delay_chain( - write_data_delayed_len, - &data_layout.layout, - write_data, - None, - |v| v, - ); - let write_mask_delayed = self.allocate_delay_chain( - write_mask_delayed_len, - &mask_layout.layout, - write_mask, - None, - |v| v, - ); - let write_mode_delayed = self.allocate_delay_chain_small( - write_mode_delayed_len, - Bool.canonical(), - write_mode, - None, - ); - let mut read_insns = Vec::new(); - let mut write_insns = Vec::new(); - self.compile_memory_port_rw( - memory, - data_layout, - mask_layout, - read_cycle.map(|read_cycle| MemoryPortReadInsns { - addr: addr_delayed[read_cycle], - en: en_delayed[read_cycle], - write_mode: write_mode_delayed.get(read_cycle).copied(), - data: read_data_delayed[0], - insns: &mut read_insns, - }), - write_cycle.map(|write_cycle| MemoryPortWriteInsns { - addr: addr_delayed[write_cycle], - en: en_delayed[write_cycle], - write_mode: write_mode_delayed.get(write_cycle).copied(), - data: write_data_delayed[write_cycle], - mask: write_mask_delayed[write_cycle], - insns: &mut write_insns, - }), - ); - self.add_assignment(Interned::default(), read_insns, mem.source_location()); - ( - MemoryPort { - clk_triggered, - addr_delayed, - en_delayed, - data_layout, - read_data_delayed, - write_data_delayed, - write_mask_delayed, - write_mode_delayed, - write_insns, - }, - trace_port, - ) - }) - .unzip(); - let name = mem.scoped_name().1.0; - let id = TraceMemoryId(self.memories.len()); - let stride = mem.array_type().element().bit_width(); - let trace = TraceMem { - id, - name, - stride, - element_type: self - .make_trace_decl_child( - instantiated_module, - MakeTraceDeclTarget::Memory { - id, - depth: mem.array_type().len(), - stride, - start: 0, - ty: mem.array_type().element(), - }, - name, - mem.source_location(), - ) - .intern_sized(), - ports: Intern::intern_owned(trace_ports), - array_type: mem.array_type(), - }; - trace_decls.push(trace.into()); - self.memories.push(Memory { - mem, - memory, - trace, - ports, - }); - } - fn compile_block( - &mut self, - parent_module: Interned, - block: Block, - conditions: Interned<[Cond]>, - trace_decls: &mut Vec, - ) { - let Block { memories, stmts } = block; - for memory in memories { - self.compile_memory(memory, *parent_module, conditions, trace_decls); - } - for stmt in stmts { - match stmt { - Stmt::Connect(StmtConnect { - lhs, - rhs, - source_location, - }) => self.compile_connect( - *parent_module, - conditions, - lhs, - *parent_module, - conditions, - rhs, - source_location, - ), - Stmt::Formal(StmtFormal { .. }) => todo!("implement simulating formal statements"), - Stmt::If(StmtIf { - cond, - source_location, - blocks: [then_block, else_block], - }) => { - let cond = self.compile_expr(*parent_module, Expr::canonical(cond)); - let cond = self.compiled_expr_to_value(cond, source_location); - let cond = cond.map_ty(Bool::from_canonical); - self.compile_block( - parent_module, - then_block, - Interned::from_iter(conditions.iter().copied().chain([Cond { - body: CondBody::IfTrue { cond }, - source_location, - }])), - trace_decls, - ); - self.compile_block( - parent_module, - else_block, - Interned::from_iter(conditions.iter().copied().chain([Cond { - body: CondBody::IfFalse { cond }, - source_location, - }])), - trace_decls, - ); - } - Stmt::Match(StmtMatch { - expr, - source_location, - blocks, - }) => { - let enum_expr = self.compile_expr(*parent_module, Expr::canonical(expr)); - let enum_expr = self.compiled_expr_to_value(enum_expr, source_location); - let enum_expr = enum_expr.map_ty(Enum::from_canonical); - let discriminant = self.compile_enum_discriminant(enum_expr, source_location); - for (variant_index, block) in blocks.into_iter().enumerate() { - self.compile_block( - parent_module, - block, - Interned::from_iter(conditions.iter().copied().chain([Cond { - body: CondBody::MatchArm { - discriminant, - variant_index, - }, - source_location, - }])), - trace_decls, - ); - } - } - Stmt::Declaration(declaration) => { - trace_decls.push(self.compile_declaration( - declaration, - parent_module, - conditions, - )); - } - } - } - } - fn compile_module(&mut self, module: Interned) -> &CompiledModule { - let mut trace_decls = Vec::new(); - let module_io = module - .leaf_module() - .module_io() - .iter() - .map( - |&AnnotatedModuleIO { - annotations: _, - module_io, - }| { - let target = TargetInInstantiatedModule { - instantiated_module: *module, - target: Target::from(module_io), - }; - self.decl_conditions.insert(target, Interned::default()); - trace_decls.push(self.make_trace_decl(*module, module_io.into())); - self.compile_value(target) - }, - ) - .collect(); - match module.leaf_module().body() { - ModuleBody::Normal(NormalModuleBody { body }) => { - self.compile_block(module, body, Interned::default(), &mut trace_decls); - } - ModuleBody::Extern(ExternModuleBody { - verilog_name: _, - parameters: _, - simulation, - }) => { - let Some(simulation) = simulation else { - panic!( - "can't simulate extern module without extern_module_simulation: {}", - module.leaf_module().source_location() - ); - }; - self.extern_modules.push(CompiledExternModule { - module_io_targets: module - .leaf_module() - .module_io() - .iter() - .map(|v| Target::from(v.module_io)) - .collect(), - module_io, - simulation, - }); - } - } - let hashbrown::hash_map::Entry::Vacant(entry) = self.modules.entry(*module) else { - unreachable!("compiled same instantiated module twice"); - }; - entry.insert(CompiledModule { - module_io, - trace_decls: TraceModule { - name: module.leaf_module().name(), - children: Intern::intern_owned(trace_decls), - }, - }) - } - fn process_assignments(&mut self) { - self.assignments - .finalize(self.insns.state_layout().ty.clone().into()); - if let Some(DebugOpaque(dump_assignments_dot)) = &self.dump_assignments_dot { - let graph = - petgraph::graph::DiGraph::<_, _, usize>::from_elements(self.assignments.elements()); - dump_assignments_dot(&petgraph::dot::Dot::new(&graph)); - } - let assignments_order: Vec<_> = match petgraph::algo::toposort(&self.assignments, None) { - Ok(nodes) => nodes - .into_iter() - .filter_map(|n| match n { - AssignmentOrSlotIndex::AssignmentIndex(v) => Some(v), - _ => None, - }) - .collect(), - Err(e) => match e.node_id() { - AssignmentOrSlotIndex::AssignmentIndex(assignment_index) => panic!( - "combinatorial logic cycle detected at: {}", - self.assignments.assignments()[assignment_index].source_location, - ), - AssignmentOrSlotIndex::SmallSlot(slot) => panic!( - "combinatorial logic cycle detected through: {}", - self.insns.state_layout().ty.small_slots.debug_data[slot.as_usize()].name, - ), - AssignmentOrSlotIndex::BigSlot(slot) => panic!( - "combinatorial logic cycle detected through: {}", - self.insns.state_layout().ty.big_slots.debug_data[slot.as_usize()].name, - ), - }, - }; - struct CondStackEntry<'a> { - cond: &'a Cond, - end_label: Label, - } - let mut cond_stack = Vec::>::new(); - for assignment_index in assignments_order { - let Assignment { - inputs: _, - outputs: _, - conditions, - insns, - source_location, - } = &self.assignments.assignments()[assignment_index]; - let mut same_len = 0; - for (index, (entry, cond)) in cond_stack.iter().zip(conditions).enumerate() { - if entry.cond != cond { - break; - } - same_len = index + 1; - } - while cond_stack.len() > same_len { - let CondStackEntry { cond: _, end_label } = - cond_stack.pop().expect("just checked len"); - self.insns.define_label_at_next_insn(end_label); - } - for cond in &conditions[cond_stack.len()..] { - let end_label = self.insns.new_label(); - match cond.body { - CondBody::IfTrue { cond: cond_value } - | CondBody::IfFalse { cond: cond_value } => { - let (branch_if_zero, branch_if_non_zero) = match cond_value.range.len() { - TypeLen::A_SMALL_SLOT => ( - Insn::BranchIfSmallZero { - target: end_label.0, - value: cond_value.range.small_slots.start, - }, - Insn::BranchIfSmallNonZero { - target: end_label.0, - value: cond_value.range.small_slots.start, - }, - ), - TypeLen::A_BIG_SLOT => ( - Insn::BranchIfZero { - target: end_label.0, - value: cond_value.range.big_slots.start, - }, - Insn::BranchIfNonZero { - target: end_label.0, - value: cond_value.range.big_slots.start, - }, - ), - _ => unreachable!(), - }; - self.insns.push( - if let CondBody::IfTrue { .. } = cond.body { - branch_if_zero - } else { - branch_if_non_zero - }, - cond.source_location, - ); - } - CondBody::MatchArm { - discriminant, - variant_index, - } => { - self.insns.push( - Insn::BranchIfSmallNeImmediate { - target: end_label.0, - lhs: discriminant, - rhs: variant_index as _, - }, - cond.source_location, - ); - } - } - cond_stack.push(CondStackEntry { cond, end_label }); - } - self.insns.extend(insns.iter().copied(), *source_location); - } - for CondStackEntry { cond: _, end_label } in cond_stack { - self.insns.define_label_at_next_insn(end_label); - } - } - fn process_clocks(&mut self) -> Interned<[StatePartIndex]> { - mem::take(&mut self.clock_triggers) - .into_iter() - .map( - |ClockTrigger { - last_clk_was_low, - clk, - clk_triggered, - source_location, - }| { - self.insns.push( - Insn::XorSmallImmediate { - dest: last_clk_was_low, - lhs: clk, - rhs: 1, - }, - source_location, - ); - clk_triggered - }, - ) - .collect() - } - fn process_registers(&mut self) { - for Register { - value, - clk_triggered, - reset, - source_location, - } in mem::take(&mut self.registers) - { - match reset { - Some(RegisterReset { - is_async, - init, - rst, - }) => { - let reg_end = self.insns.new_label(); - let reg_reset = self.insns.new_label(); - let branch_if_reset = Insn::BranchIfSmallNonZero { - target: reg_reset.0, - value: rst, - }; - let branch_if_not_triggered = Insn::BranchIfSmallZero { - target: reg_end.0, - value: clk_triggered, - }; - if is_async { - self.insns.push(branch_if_reset, source_location); - self.insns.push(branch_if_not_triggered, source_location); - } else { - self.insns.push(branch_if_not_triggered, source_location); - self.insns.push(branch_if_reset, source_location); - } - self.insns.extend( - value.range.insns_for_copy_from(value.write_value().range), - source_location, - ); - self.insns - .push(Insn::Branch { target: reg_end.0 }, source_location); - self.insns.define_label_at_next_insn(reg_reset); - self.insns - .extend(value.range.insns_for_copy_from(init.range), source_location); - self.insns.define_label_at_next_insn(reg_end); - } - None => { - let reg_end = self.insns.new_label(); - self.insns.push( - Insn::BranchIfSmallZero { - target: reg_end.0, - value: clk_triggered, - }, - source_location, - ); - self.insns.extend( - value.range.insns_for_copy_from(value.write_value().range), - source_location, - ); - self.insns.define_label_at_next_insn(reg_end); - } - } - } - } - fn process_memories(&mut self) { - for memory_index in 0..self.memories.len() { - let Memory { - mem, - memory: _, - trace: _, - ref mut ports, - } = self.memories[memory_index]; - for MemoryPort { - clk_triggered, - addr_delayed, - en_delayed, - data_layout: _, - read_data_delayed, - write_data_delayed, - write_mask_delayed, - write_mode_delayed, - write_insns, - } in mem::take(ports) - { - let port_end = self.insns.new_label(); - let small_shift_reg = - |this: &mut Self, values: &[StatePartIndex]| { - for pair in values.windows(2).rev() { - this.insns.push( - Insn::CopySmall { - dest: pair[1], - src: pair[0], - }, - mem.source_location(), - ); - } - }; - let shift_reg = |this: &mut Self, values: &[TypeIndexRange]| { - for pair in values.windows(2).rev() { - this.insns - .extend(pair[0].insns_for_copy_to(pair[1]), mem.source_location()); - } - }; - self.insns.push( - Insn::BranchIfSmallZero { - target: port_end.0, - value: clk_triggered, - }, - mem.source_location(), - ); - small_shift_reg(self, &addr_delayed); - small_shift_reg(self, &en_delayed); - shift_reg(self, &write_data_delayed); - shift_reg(self, &write_mask_delayed); - small_shift_reg(self, &write_mode_delayed); - shift_reg(self, &read_data_delayed); - self.insns.extend(write_insns, mem.source_location()); - self.insns.define_label_at_next_insn(port_end); - } - } - } - pub fn compile(mut self) -> Compiled { - let base_module = - *self.compile_module(InstantiatedModule::Base(self.base_module).intern_sized()); - self.process_assignments(); - self.process_registers(); - self.process_memories(); - let clocks_triggered = self.process_clocks(); - self.insns - .push(Insn::Return, self.base_module.source_location()); - Compiled { - insns: Insns::from(self.insns).intern_sized(), - base_module, - extern_modules: Intern::intern_owned(self.extern_modules), - io: Instance::new_unchecked( - ScopedNameId( - NameId("".intern(), Id::new()), - self.original_base_module.name_id(), - ), - self.original_base_module, - self.original_base_module.source_location(), - ), - traces: SimTraces(Intern::intern_owned(self.traces.0)), - trace_memories: Interned::from_iter(self.memories.iter().map( - |&Memory { - mem: _, - memory, - trace, - ports: _, - }| (memory, trace), - )), - clocks_triggered, - } - } -} - -#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] -struct CompiledModule { - module_io: Interned<[CompiledValue]>, - trace_decls: TraceModule, -} - -#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] -pub struct Compiled { - insns: Interned>, - base_module: CompiledModule, - extern_modules: Interned<[CompiledExternModule]>, - io: Instance, - traces: SimTraces]>>, - trace_memories: Interned<[(StatePartIndex, TraceMem)]>, - clocks_triggered: Interned<[StatePartIndex]>, -} - -impl Compiled { - pub fn new(module: Interned>) -> Self { - Self::from_canonical(Compiler::new(module.canonical().intern()).compile()) - } - pub fn canonical(self) -> Compiled { - let Self { - insns, - base_module, - extern_modules, - io, - traces, - trace_memories, - clocks_triggered, - } = self; - Compiled { - insns, - base_module, - extern_modules, - io: Instance::from_canonical(io.canonical()), - traces, - trace_memories, - clocks_triggered, - } - } - pub fn from_canonical(canonical: Compiled) -> Self { - let Compiled { - insns, - base_module, - extern_modules, - io, - traces, - trace_memories, - clocks_triggered, - } = canonical; - Self { - insns, - base_module, - extern_modules, - io: Instance::from_canonical(io.canonical()), - traces, - trace_memories, - clocks_triggered, - } - } -} +pub use compiler::{Compiled, Compiler}; #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct TraceScalarId(usize); @@ -5794,14 +749,14 @@ where } #[derive(Clone, PartialEq, Eq, Hash, Debug)] -struct SimTrace { +pub(crate) struct SimTrace { kind: K, state: S, last_state: S, } #[derive(Copy, Clone, PartialEq, Eq, Hash)] -struct SimTraces(T); +pub(crate) struct SimTraces(T); impl fmt::Debug for SimTraces where @@ -5845,7 +800,7 @@ impl SimTraceDebug for SimTrace { } #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] -enum SimTraceKind { +pub(crate) enum SimTraceKind { BigUInt { index: StatePartIndex, ty: UInt, diff --git a/crates/fayalite/src/sim/compiler.rs b/crates/fayalite/src/sim/compiler.rs new file mode 100644 index 0000000..dd06267 --- /dev/null +++ b/crates/fayalite/src/sim/compiler.rs @@ -0,0 +1,5087 @@ +// SPDX-License-Identifier: LGPL-3.0-or-later +// See Notices.txt for copyright information + +//! Compiler to Interpreter IR for Fayalite Simulation + +use crate::{ + bundle::{BundleField, BundleType}, + enum_::{EnumType, EnumVariant}, + expr::{ + ExprEnum, Flow, ops, + target::{ + GetTarget, Target, TargetBase, TargetPathArrayElement, TargetPathBundleField, + TargetPathElement, + }, + }, + int::BoolOrIntType, + intern::{Intern, Interned, Memoize}, + memory::PortKind, + module::{ + AnnotatedModuleIO, Block, ExternModuleBody, Id, InstantiatedModule, ModuleBody, NameId, + NormalModuleBody, ScopedNameId, Stmt, StmtConnect, StmtDeclaration, StmtFormal, StmtIf, + StmtInstance, StmtMatch, StmtReg, StmtWire, TargetInInstantiatedModule, + transform::deduce_resets::deduce_resets, + }, + prelude::*, + reset::{ResetType, ResetTypeDispatch}, + sim::{ + ExternModuleSimulation, SimTrace, SimTraceKind, SimTraces, TraceArray, TraceAsyncReset, + TraceBool, TraceBundle, TraceClock, TraceDecl, TraceEnumDiscriminant, TraceEnumWithFields, + TraceFieldlessEnum, TraceInstance, TraceLocation, TraceMem, TraceMemPort, TraceMemoryId, + TraceMemoryLocation, TraceModule, TraceModuleIO, TraceReg, TraceSInt, TraceScalarId, + TraceScope, TraceSyncReset, TraceUInt, TraceWire, + interpreter::{ + Insn, InsnField, InsnFieldKind, InsnFieldType, InsnOrLabel, Insns, InsnsBuilding, + InsnsBuildingDone, InsnsBuildingKind, Label, MemoryData, SlotDebugData, SmallUInt, + StatePartArrayIndex, StatePartArrayIndexed, StatePartIndex, StatePartIndexRange, + StatePartKind, StatePartKindBigSlots, StatePartKindMemories, StatePartKindSmallSlots, + StatePartLayout, StatePartLen, StatePartsValue, TypeArrayIndex, TypeArrayIndexes, + TypeIndex, TypeIndexRange, TypeLayout, TypeLen, TypeParts, + }, + }, + ty::StaticType, + util::HashMap, +}; +use bitvec::vec::BitVec; +use num_bigint::BigInt; +use petgraph::{ + data::FromElements, + visit::{ + EdgeRef, GraphBase, IntoEdgeReferences, IntoNeighbors, IntoNeighborsDirected, + IntoNodeIdentifiers, IntoNodeReferences, NodeRef, VisitMap, Visitable, + }, +}; +use std::{collections::BTreeSet, fmt, hash::Hash, marker::PhantomData, mem, ops::IndexMut}; + +#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)] +enum CondBody { + IfTrue { + cond: CompiledValue, + }, + IfFalse { + cond: CompiledValue, + }, + MatchArm { + discriminant: StatePartIndex, + variant_index: usize, + }, +} + +#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)] +struct Cond { + body: CondBody, + source_location: SourceLocation, +} + +#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)] +pub(crate) struct CompiledBundleField { + pub(crate) offset: TypeIndex, + pub(crate) ty: CompiledTypeLayout, +} + +#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)] +pub(crate) enum CompiledTypeLayoutBody { + Scalar, + Array { + /// debug names are ignored, use parent's layout instead + element: Interned>, + }, + Bundle { + /// debug names are ignored, use parent's layout instead + fields: Interned<[CompiledBundleField]>, + }, +} + +#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)] +pub(crate) struct CompiledTypeLayout { + pub(crate) ty: T, + pub(crate) layout: TypeLayout, + pub(crate) body: CompiledTypeLayoutBody, +} + +impl CompiledTypeLayout { + fn with_prefixed_debug_names(self, prefix: &str) -> Self { + let Self { ty, layout, body } = self; + Self { + ty, + layout: layout.with_prefixed_debug_names(prefix), + body, + } + } + fn with_anonymized_debug_info(self) -> Self { + let Self { ty, layout, body } = self; + Self { + ty, + layout: layout.with_anonymized_debug_info(), + body, + } + } + fn get(ty: T) -> Self { + #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] + struct MyMemoize; + impl Memoize for MyMemoize { + type Input = CanonicalType; + type InputOwned = CanonicalType; + type Output = CompiledTypeLayout; + + fn inner(self, input: &Self::Input) -> Self::Output { + match input { + CanonicalType::UInt(_) + | CanonicalType::SInt(_) + | CanonicalType::Bool(_) + | CanonicalType::Enum(_) + | CanonicalType::AsyncReset(_) + | CanonicalType::SyncReset(_) + | CanonicalType::Reset(_) + | CanonicalType::Clock(_) => { + let mut layout = TypeLayout::empty(); + let debug_data = SlotDebugData { + name: Interned::default(), + ty: *input, + }; + layout.big_slots = StatePartLayout::scalar(debug_data, ()); + CompiledTypeLayout { + ty: *input, + layout: layout.into(), + body: CompiledTypeLayoutBody::Scalar, + } + } + CanonicalType::Array(array) => { + let mut layout = TypeLayout::empty(); + let element = CompiledTypeLayout::get(array.element()).intern_sized(); + for index in 0..array.len() { + layout.allocate( + &element + .layout + .with_prefixed_debug_names(&format!("[{index}]")), + ); + } + CompiledTypeLayout { + ty: *input, + layout: layout.into(), + body: CompiledTypeLayoutBody::Array { element }, + } + } + CanonicalType::PhantomConst(_) => { + let unit_layout = CompiledTypeLayout::get(()); + CompiledTypeLayout { + ty: *input, + layout: unit_layout.layout, + body: unit_layout.body, + } + } + CanonicalType::Bundle(bundle) => { + let mut layout = TypeLayout::empty(); + let fields = bundle + .fields() + .iter() + .map( + |BundleField { + name, + flipped: _, + ty, + }| { + let ty = CompiledTypeLayout::get(*ty); + let offset = layout + .allocate( + &ty.layout + .with_prefixed_debug_names(&format!(".{name}")), + ) + .start(); + CompiledBundleField { offset, ty } + }, + ) + .collect(); + CompiledTypeLayout { + ty: *input, + layout: layout.into(), + body: CompiledTypeLayoutBody::Bundle { fields }, + } + } + } + } + } + let CompiledTypeLayout { + ty: _, + layout, + body, + } = MyMemoize.get_owned(ty.canonical()); + Self { ty, layout, body } + } +} + +#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)] +pub(crate) struct CompiledValue { + pub(crate) layout: CompiledTypeLayout, + pub(crate) range: TypeIndexRange, + pub(crate) write: Option<(CompiledTypeLayout, TypeIndexRange)>, +} + +impl CompiledValue { + fn write(self) -> (CompiledTypeLayout, TypeIndexRange) { + self.write.unwrap_or((self.layout, self.range)) + } + fn write_value(self) -> Self { + let (layout, range) = self.write(); + Self { + layout, + range, + write: None, + } + } + fn map( + self, + mut f: impl FnMut( + CompiledTypeLayout, + TypeIndexRange, + ) -> (CompiledTypeLayout, TypeIndexRange), + ) -> CompiledValue { + let (layout, range) = f(self.layout, self.range); + CompiledValue { + layout, + range, + write: self.write.map(|(layout, range)| f(layout, range)), + } + } + pub(crate) fn map_ty(self, mut f: impl FnMut(T) -> U) -> CompiledValue { + self.map(|CompiledTypeLayout { ty, layout, body }, range| { + ( + CompiledTypeLayout { + ty: f(ty), + layout, + body, + }, + range, + ) + }) + } +} + +impl CompiledValue { + fn field_by_index(self, field_index: usize) -> CompiledValue { + self.map(|layout, range| { + let CompiledTypeLayout { + ty: _, + layout: _, + body: CompiledTypeLayoutBody::Bundle { fields }, + } = layout + else { + unreachable!(); + }; + ( + fields[field_index].ty, + range.slice(TypeIndexRange::new( + fields[field_index].offset, + fields[field_index].ty.layout.len(), + )), + ) + }) + } + pub(crate) fn field_by_name(self, name: Interned) -> CompiledValue { + self.field_by_index(self.layout.ty.name_indexes()[&name]) + } +} + +impl CompiledValue { + pub(crate) fn element(self, index: usize) -> CompiledValue { + self.map(|layout, range| { + let CompiledTypeLayoutBody::Array { element } = layout.body else { + unreachable!(); + }; + (*element, range.index_array(element.layout.len(), index)) + }) + } + fn element_dyn( + self, + index_slot: StatePartIndex, + ) -> CompiledExpr { + CompiledExpr::from(self).element_dyn(index_slot) + } +} + +#[derive(Debug, PartialEq, Eq, Hash, Copy, Clone)] +struct CompiledExpr { + static_part: CompiledValue, + indexes: TypeArrayIndexes, +} + +impl From> for CompiledExpr { + fn from(static_part: CompiledValue) -> Self { + Self { + static_part, + indexes: TypeArrayIndexes::default(), + } + } +} + +impl CompiledExpr { + fn map_ty(self, f: impl FnMut(T) -> U) -> CompiledExpr { + let Self { + static_part, + indexes, + } = self; + CompiledExpr { + static_part: static_part.map_ty(f), + indexes, + } + } + fn add_target_without_indexes_to_set(self, inputs: &mut SlotSet) { + let Self { + static_part, + indexes, + } = self; + indexes.as_ref().for_each_offset(|offset| { + inputs.extend([static_part.range.offset(offset)]); + }); + } + fn add_target_and_indexes_to_set(self, inputs: &mut SlotSet) { + let Self { + static_part: _, + indexes, + } = self; + self.add_target_without_indexes_to_set(inputs); + inputs.extend(indexes.as_ref().iter()); + } +} + +impl CompiledExpr { + fn field_by_index(self, field_index: usize) -> CompiledExpr { + CompiledExpr { + static_part: self.static_part.field_by_index(field_index), + indexes: self.indexes, + } + } + fn field_by_name(self, name: Interned) -> CompiledExpr { + CompiledExpr { + static_part: self.static_part.field_by_name(name), + indexes: self.indexes, + } + } +} + +impl CompiledExpr { + fn element(self, index: usize) -> CompiledExpr { + CompiledExpr { + static_part: self.static_part.element(index), + indexes: self.indexes, + } + } + fn element_dyn( + self, + index_slot: StatePartIndex, + ) -> CompiledExpr { + let CompiledTypeLayoutBody::Array { element } = self.static_part.layout.body else { + unreachable!(); + }; + let stride = element.layout.len(); + let indexes = self.indexes.join(TypeArrayIndex::from_parts( + index_slot, + self.static_part.layout.ty.len(), + stride, + )); + CompiledExpr { + static_part: self.static_part.map(|layout, range| { + let CompiledTypeLayoutBody::Array { element } = layout.body else { + unreachable!(); + }; + (*element, range.index_array(stride, 0)) + }), + indexes, + } + } +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] +enum AssignmentOrSlotIndex { + AssignmentIndex(usize), + SmallSlot(StatePartIndex), + BigSlot(StatePartIndex), +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] +enum AssignmentIO { + BigInput { + assignment_index: usize, + slot: StatePartIndex, + }, + SmallInput { + assignment_index: usize, + slot: StatePartIndex, + }, + BigOutput { + assignment_index: usize, + slot: StatePartIndex, + }, + SmallOutput { + assignment_index: usize, + slot: StatePartIndex, + }, +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] +enum AssignmentsEdge { + IO(AssignmentIO), + AssignmentImmediatePredecessor { + predecessor_assignment_index: usize, + assignment_index: usize, + }, +} + +#[derive(Debug)] +enum Assignments { + Accumulating { + assignments: Vec, + }, + Finalized { + assignments: Box<[Assignment]>, + slots_layout: TypeLayout, + slot_readers: SlotToAssignmentIndexFullMap, + slot_writers: SlotToAssignmentIndexFullMap, + assignment_immediate_predecessors: Box<[Box<[usize]>]>, + assignment_immediate_successors: Box<[Box<[usize]>]>, + }, +} + +impl Default for Assignments { + fn default() -> Self { + Self::Accumulating { + assignments: Vec::new(), + } + } +} + +impl Assignments { + fn finalize(&mut self, slots_layout: TypeLayout) { + let Self::Accumulating { assignments } = self else { + unreachable!("already finalized"); + }; + let assignments = mem::take(assignments).into_boxed_slice(); + let mut slot_readers = SlotToAssignmentIndexFullMap::new(slots_layout.len()); + let mut slot_writers = SlotToAssignmentIndexFullMap::new(slots_layout.len()); + let mut assignment_immediate_predecessors = vec![BTreeSet::new(); assignments.len()]; + let mut assignment_immediate_successors = vec![BTreeSet::new(); assignments.len()]; + for (assignment_index, assignment) in assignments.iter().enumerate() { + slot_readers + .keys_for_assignment(assignment_index) + .extend([&assignment.inputs]); + slot_readers + .keys_for_assignment(assignment_index) + .extend(&assignment.conditions); + let SlotSet(TypeParts { + small_slots, + big_slots, + }) = &assignment.outputs; + for &slot in small_slots { + if let Some(&pred) = slot_writers[slot].last() { + assignment_immediate_predecessors[assignment_index].insert(pred); + assignment_immediate_successors[pred].insert(assignment_index); + } + slot_writers[slot].push(assignment_index); + } + for &slot in big_slots { + if let Some(&pred) = slot_writers[slot].last() { + assignment_immediate_predecessors[assignment_index].insert(pred); + assignment_immediate_successors[pred].insert(assignment_index); + } + slot_writers[slot].push(assignment_index); + } + } + *self = Self::Finalized { + assignments, + slots_layout, + slot_readers, + slot_writers, + assignment_immediate_predecessors: assignment_immediate_predecessors + .into_iter() + .map(Box::from_iter) + .collect(), + assignment_immediate_successors: assignment_immediate_successors + .into_iter() + .map(Box::from_iter) + .collect(), + }; + } + fn push(&mut self, v: Assignment) { + let Self::Accumulating { assignments } = self else { + unreachable!("already finalized"); + }; + assignments.push(v); + } + fn assignments(&self) -> &[Assignment] { + let Self::Finalized { assignments, .. } = self else { + unreachable!("Assignments::finalize should have been called"); + }; + assignments + } + fn slots_layout(&self) -> TypeLayout { + let Self::Finalized { slots_layout, .. } = self else { + unreachable!("Assignments::finalize should have been called"); + }; + *slots_layout + } + fn slot_readers(&self) -> &SlotToAssignmentIndexFullMap { + let Self::Finalized { slot_readers, .. } = self else { + unreachable!("Assignments::finalize should have been called"); + }; + slot_readers + } + fn slot_writers(&self) -> &SlotToAssignmentIndexFullMap { + let Self::Finalized { slot_writers, .. } = self else { + unreachable!("Assignments::finalize should have been called"); + }; + slot_writers + } + fn assignment_immediate_predecessors(&self) -> &[Box<[usize]>] { + let Self::Finalized { + assignment_immediate_predecessors, + .. + } = self + else { + unreachable!("Assignments::finalize should have been called"); + }; + assignment_immediate_predecessors + } + fn assignment_immediate_successors(&self) -> &[Box<[usize]>] { + let Self::Finalized { + assignment_immediate_successors, + .. + } = self + else { + unreachable!("Assignments::finalize should have been called"); + }; + assignment_immediate_successors + } + fn elements(&self) -> AssignmentsElements<'_> { + let SlotToAssignmentIndexFullMap(TypeParts { + small_slots, + big_slots, + }) = self.slot_readers(); + AssignmentsElements { + node_indexes: HashMap::with_capacity_and_hasher( + self.assignments().len() + small_slots.len() + big_slots.len(), + Default::default(), + ), + nodes: self.node_references(), + edges: self.edge_references(), + } + } +} + +impl GraphBase for Assignments { + type EdgeId = AssignmentsEdge; + type NodeId = AssignmentOrSlotIndex; +} + +#[derive(Debug, Clone, Copy)] +enum AssignmentsNodeRef<'a> { + Assignment { + index: usize, + #[allow(dead_code, reason = "used in Debug impl")] + assignment: &'a Assignment, + }, + SmallSlot( + StatePartIndex, + #[allow(dead_code, reason = "used in Debug impl")] SlotDebugData, + ), + BigSlot( + StatePartIndex, + #[allow(dead_code, reason = "used in Debug impl")] SlotDebugData, + ), +} + +impl<'a> NodeRef for AssignmentsNodeRef<'a> { + type NodeId = AssignmentOrSlotIndex; + type Weight = AssignmentsNodeRef<'a>; + + fn id(&self) -> Self::NodeId { + match *self { + AssignmentsNodeRef::Assignment { + index, + assignment: _, + } => AssignmentOrSlotIndex::AssignmentIndex(index), + AssignmentsNodeRef::SmallSlot(slot, _) => AssignmentOrSlotIndex::SmallSlot(slot), + AssignmentsNodeRef::BigSlot(slot, _) => AssignmentOrSlotIndex::BigSlot(slot), + } + } + + fn weight(&self) -> &Self::Weight { + self + } +} + +impl<'a> petgraph::visit::Data for &'a Assignments { + type NodeWeight = AssignmentsNodeRef<'a>; + type EdgeWeight = AssignmentsEdge; +} + +struct AssignmentsElements<'a> { + node_indexes: HashMap, + nodes: AssignmentsNodes<'a>, + edges: AssignmentsEdges<'a>, +} + +impl<'a> Iterator for AssignmentsElements<'a> { + type Item = petgraph::data::Element< + <&'a Assignments as petgraph::visit::Data>::NodeWeight, + <&'a Assignments as petgraph::visit::Data>::EdgeWeight, + >; + + fn next(&mut self) -> Option { + let Self { + node_indexes, + nodes, + edges, + } = self; + if let Some(node) = nodes.next() { + node_indexes.insert(node.id(), node_indexes.len()); + return Some(petgraph::data::Element::Node { weight: node }); + } + let edge = edges.next()?; + Some(petgraph::data::Element::Edge { + source: node_indexes[&edge.source()], + target: node_indexes[&edge.target()], + weight: *edge.weight(), + }) + } +} + +#[derive(Clone)] +struct AssignmentsNodeIdentifiers { + assignment_indexes: std::ops::Range, + small_slots: std::ops::Range, + big_slots: std::ops::Range, +} + +impl AssignmentsNodeIdentifiers { + fn internal_iter<'a>(&'a mut self) -> impl Iterator + 'a { + let Self { + assignment_indexes, + small_slots, + big_slots, + } = self; + assignment_indexes + .map(AssignmentOrSlotIndex::AssignmentIndex) + .chain(small_slots.map(|value| { + AssignmentOrSlotIndex::SmallSlot(StatePartIndex { + value, + _phantom: PhantomData, + }) + })) + .chain(big_slots.map(|value| { + AssignmentOrSlotIndex::BigSlot(StatePartIndex { + value, + _phantom: PhantomData, + }) + })) + } +} + +impl Iterator for AssignmentsNodeIdentifiers { + type Item = AssignmentOrSlotIndex; + fn next(&mut self) -> Option { + self.internal_iter().next() + } + + fn nth(&mut self, n: usize) -> Option { + self.internal_iter().nth(n) + } +} + +impl<'a> IntoNodeIdentifiers for &'a Assignments { + type NodeIdentifiers = AssignmentsNodeIdentifiers; + + fn node_identifiers(self) -> Self::NodeIdentifiers { + let TypeLen { + small_slots, + big_slots, + } = self.slot_readers().len(); + AssignmentsNodeIdentifiers { + assignment_indexes: 0..self.assignments().len(), + small_slots: 0..small_slots.value, + big_slots: 0..big_slots.value, + } + } +} + +struct AssignmentsNodes<'a> { + assignments: &'a Assignments, + nodes: AssignmentsNodeIdentifiers, +} + +impl<'a> Iterator for AssignmentsNodes<'a> { + type Item = AssignmentsNodeRef<'a>; + + fn next(&mut self) -> Option { + self.nodes.next().map(|node| match node { + AssignmentOrSlotIndex::AssignmentIndex(index) => AssignmentsNodeRef::Assignment { + index, + assignment: &self.assignments.assignments()[index], + }, + AssignmentOrSlotIndex::SmallSlot(slot) => AssignmentsNodeRef::SmallSlot( + slot, + *self.assignments.slots_layout().small_slots.debug_data(slot), + ), + AssignmentOrSlotIndex::BigSlot(slot) => AssignmentsNodeRef::BigSlot( + slot, + *self.assignments.slots_layout().big_slots.debug_data(slot), + ), + }) + } +} + +impl<'a> IntoNodeReferences for &'a Assignments { + type NodeRef = AssignmentsNodeRef<'a>; + type NodeReferences = AssignmentsNodes<'a>; + + fn node_references(self) -> Self::NodeReferences { + AssignmentsNodes { + assignments: self, + nodes: self.node_identifiers(), + } + } +} + +struct AssignmentsNeighborsDirected<'a> { + assignment_indexes: std::slice::Iter<'a, usize>, + small_slots: std::collections::btree_set::Iter<'a, StatePartIndex>, + big_slots: std::collections::btree_set::Iter<'a, StatePartIndex>, +} + +impl Iterator for AssignmentsNeighborsDirected<'_> { + type Item = AssignmentOrSlotIndex; + fn next(&mut self) -> Option { + let Self { + assignment_indexes, + small_slots, + big_slots, + } = self; + if let retval @ Some(_) = assignment_indexes + .next() + .copied() + .map(AssignmentOrSlotIndex::AssignmentIndex) + { + retval + } else if let retval @ Some(_) = small_slots + .next() + .copied() + .map(AssignmentOrSlotIndex::SmallSlot) + { + retval + } else if let retval @ Some(_) = big_slots + .next() + .copied() + .map(AssignmentOrSlotIndex::BigSlot) + { + retval + } else { + None + } + } +} + +impl<'a> IntoNeighbors for &'a Assignments { + type Neighbors = AssignmentsNeighborsDirected<'a>; + + fn neighbors(self, n: Self::NodeId) -> Self::Neighbors { + self.neighbors_directed(n, petgraph::Direction::Outgoing) + } +} + +impl<'a> IntoNeighborsDirected for &'a Assignments { + type NeighborsDirected = AssignmentsNeighborsDirected<'a>; + + fn neighbors_directed( + self, + n: Self::NodeId, + d: petgraph::Direction, + ) -> Self::NeighborsDirected { + use petgraph::Direction::*; + let slot_map = match d { + Outgoing => self.slot_readers(), + Incoming => self.slot_writers(), + }; + match n { + AssignmentOrSlotIndex::AssignmentIndex(assignment_index) => { + let assignment = &self.assignments()[assignment_index]; + let ( + assignment_indexes, + SlotSet(TypeParts { + small_slots, + big_slots, + }), + ) = match d { + Outgoing => ( + &self.assignment_immediate_successors()[assignment_index], + &assignment.outputs, + ), + Incoming => ( + &self.assignment_immediate_predecessors()[assignment_index], + &assignment.inputs, + ), + }; + AssignmentsNeighborsDirected { + assignment_indexes: assignment_indexes.iter(), + small_slots: small_slots.iter(), + big_slots: big_slots.iter(), + } + } + AssignmentOrSlotIndex::SmallSlot(slot) => AssignmentsNeighborsDirected { + assignment_indexes: slot_map[slot].iter(), + small_slots: Default::default(), + big_slots: Default::default(), + }, + AssignmentOrSlotIndex::BigSlot(slot) => AssignmentsNeighborsDirected { + assignment_indexes: slot_map[slot].iter(), + small_slots: Default::default(), + big_slots: Default::default(), + }, + } + } +} + +impl EdgeRef for AssignmentsEdge { + type NodeId = AssignmentOrSlotIndex; + type EdgeId = AssignmentsEdge; + type Weight = AssignmentsEdge; + + fn source(&self) -> Self::NodeId { + match *self { + AssignmentsEdge::IO(AssignmentIO::BigInput { + assignment_index: _, + slot, + }) => AssignmentOrSlotIndex::BigSlot(slot), + AssignmentsEdge::IO(AssignmentIO::SmallInput { + assignment_index: _, + slot, + }) => AssignmentOrSlotIndex::SmallSlot(slot), + AssignmentsEdge::IO(AssignmentIO::BigOutput { + assignment_index, + slot: _, + }) => AssignmentOrSlotIndex::AssignmentIndex(assignment_index), + AssignmentsEdge::IO(AssignmentIO::SmallOutput { + assignment_index, + slot: _, + }) => AssignmentOrSlotIndex::AssignmentIndex(assignment_index), + AssignmentsEdge::AssignmentImmediatePredecessor { + predecessor_assignment_index, + assignment_index: _, + } => AssignmentOrSlotIndex::AssignmentIndex(predecessor_assignment_index), + } + } + + fn target(&self) -> Self::NodeId { + match *self { + AssignmentsEdge::IO(AssignmentIO::BigInput { + assignment_index, + slot: _, + }) => AssignmentOrSlotIndex::AssignmentIndex(assignment_index), + AssignmentsEdge::IO(AssignmentIO::SmallInput { + assignment_index, + slot: _, + }) => AssignmentOrSlotIndex::AssignmentIndex(assignment_index), + AssignmentsEdge::IO(AssignmentIO::BigOutput { + assignment_index: _, + slot, + }) => AssignmentOrSlotIndex::BigSlot(slot), + AssignmentsEdge::IO(AssignmentIO::SmallOutput { + assignment_index: _, + slot, + }) => AssignmentOrSlotIndex::SmallSlot(slot), + AssignmentsEdge::AssignmentImmediatePredecessor { + predecessor_assignment_index: _, + assignment_index, + } => AssignmentOrSlotIndex::AssignmentIndex(assignment_index), + } + } + + fn weight(&self) -> &Self::Weight { + self + } + + fn id(&self) -> Self::EdgeId { + *self + } +} + +struct AssignmentsEdges<'a> { + assignments: &'a Assignments, + nodes: AssignmentsNodeIdentifiers, + outgoing_neighbors: Option<(AssignmentOrSlotIndex, AssignmentsNeighborsDirected<'a>)>, +} + +impl Iterator for AssignmentsEdges<'_> { + type Item = AssignmentsEdge; + + fn next(&mut self) -> Option { + loop { + if let Some((node, outgoing_neighbors)) = &mut self.outgoing_neighbors { + if let Some(outgoing_neighbor) = outgoing_neighbors.next() { + return Some(match (*node, outgoing_neighbor) { + ( + AssignmentOrSlotIndex::SmallSlot(_) | AssignmentOrSlotIndex::BigSlot(_), + AssignmentOrSlotIndex::SmallSlot(_) | AssignmentOrSlotIndex::BigSlot(_), + ) => unreachable!(), + ( + AssignmentOrSlotIndex::AssignmentIndex(predecessor_assignment_index), + AssignmentOrSlotIndex::AssignmentIndex(assignment_index), + ) => AssignmentsEdge::AssignmentImmediatePredecessor { + predecessor_assignment_index, + assignment_index, + }, + ( + AssignmentOrSlotIndex::AssignmentIndex(assignment_index), + AssignmentOrSlotIndex::SmallSlot(slot), + ) => AssignmentsEdge::IO(AssignmentIO::SmallOutput { + assignment_index, + slot, + }), + ( + AssignmentOrSlotIndex::AssignmentIndex(assignment_index), + AssignmentOrSlotIndex::BigSlot(slot), + ) => AssignmentsEdge::IO(AssignmentIO::BigOutput { + assignment_index, + slot, + }), + ( + AssignmentOrSlotIndex::SmallSlot(slot), + AssignmentOrSlotIndex::AssignmentIndex(assignment_index), + ) => AssignmentsEdge::IO(AssignmentIO::SmallInput { + assignment_index, + slot, + }), + ( + AssignmentOrSlotIndex::BigSlot(slot), + AssignmentOrSlotIndex::AssignmentIndex(assignment_index), + ) => AssignmentsEdge::IO(AssignmentIO::BigInput { + assignment_index, + slot, + }), + }); + } + } + let node = self.nodes.next()?; + self.outgoing_neighbors = Some(( + node, + self.assignments + .neighbors_directed(node, petgraph::Direction::Outgoing), + )); + } + } +} + +impl<'a> IntoEdgeReferences for &'a Assignments { + type EdgeRef = AssignmentsEdge; + type EdgeReferences = AssignmentsEdges<'a>; + + fn edge_references(self) -> Self::EdgeReferences { + AssignmentsEdges { + assignments: self, + nodes: self.node_identifiers(), + outgoing_neighbors: None, + } + } +} + +struct AssignmentsVisitMap { + assignments: Vec, + slots: DenseSlotSet, +} + +impl VisitMap for AssignmentsVisitMap { + fn visit(&mut self, n: AssignmentOrSlotIndex) -> bool { + match n { + AssignmentOrSlotIndex::AssignmentIndex(assignment_index) => { + !mem::replace(&mut self.assignments[assignment_index], true) + } + AssignmentOrSlotIndex::SmallSlot(slot) => self.slots.insert(slot), + AssignmentOrSlotIndex::BigSlot(slot) => self.slots.insert(slot), + } + } + + fn is_visited(&self, n: &AssignmentOrSlotIndex) -> bool { + match *n { + AssignmentOrSlotIndex::AssignmentIndex(assignment_index) => { + self.assignments[assignment_index] + } + AssignmentOrSlotIndex::SmallSlot(slot) => self.slots.contains(slot), + AssignmentOrSlotIndex::BigSlot(slot) => self.slots.contains(slot), + } + } + + fn unvisit(&mut self, n: AssignmentOrSlotIndex) -> bool { + match n { + AssignmentOrSlotIndex::AssignmentIndex(assignment_index) => { + mem::replace(&mut self.assignments[assignment_index], false) + } + AssignmentOrSlotIndex::SmallSlot(slot) => self.slots.remove(slot), + AssignmentOrSlotIndex::BigSlot(slot) => self.slots.remove(slot), + } + } +} + +impl Visitable for Assignments { + type Map = AssignmentsVisitMap; + + fn visit_map(self: &Self) -> Self::Map { + AssignmentsVisitMap { + assignments: vec![false; self.assignments().len()], + slots: DenseSlotSet::new(self.slot_readers().len()), + } + } + + fn reset_map(self: &Self, map: &mut Self::Map) { + let AssignmentsVisitMap { assignments, slots } = map; + assignments.clear(); + assignments.resize(self.assignments().len(), false); + if slots.len() != self.slot_readers().len() { + *slots = DenseSlotSet::new(self.slot_readers().len()); + } else { + slots.clear(); + } + } +} + +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +struct DenseSlotSet(TypeParts); + +impl DenseSlotSet { + fn new(len: TypeLen) -> Self { + let TypeLen { + small_slots, + big_slots, + } = len; + Self(TypeParts { + small_slots: vec![false; small_slots.value.try_into().expect("length too big")] + .into_boxed_slice(), + big_slots: vec![false; big_slots.value.try_into().expect("length too big")] + .into_boxed_slice(), + }) + } + fn len(&self) -> TypeLen { + TypeLen { + small_slots: StatePartLen { + value: self.0.small_slots.len() as _, + _phantom: PhantomData, + }, + big_slots: StatePartLen { + value: self.0.big_slots.len() as _, + _phantom: PhantomData, + }, + } + } + fn clear(&mut self) { + let Self(TypeParts { + small_slots, + big_slots, + }) = self; + small_slots.fill(false); + big_slots.fill(false); + } +} + +impl StatePartsValue for DenseSlotSet { + type Value = Box<[bool]>; +} + +trait DenseSlotSetMethods: Extend> { + fn contains(&self, k: StatePartIndex) -> bool; + fn remove(&mut self, k: StatePartIndex) -> bool { + self.take(k).is_some() + } + fn take(&mut self, k: StatePartIndex) -> Option>; + fn replace(&mut self, k: StatePartIndex) -> Option>; + fn insert(&mut self, k: StatePartIndex) -> bool { + self.replace(k).is_none() + } +} + +impl Extend> for DenseSlotSet +where + Self: DenseSlotSetMethods, +{ + fn extend>>(&mut self, iter: T) { + iter.into_iter().for_each(|v| { + self.insert(v); + }); + } +} + +impl DenseSlotSetMethods for DenseSlotSet { + fn contains(&self, k: StatePartIndex) -> bool { + self.0.small_slots[k.as_usize()] + } + + fn take( + &mut self, + k: StatePartIndex, + ) -> Option> { + mem::replace(self.0.small_slots.get_mut(k.as_usize())?, false).then_some(k) + } + + fn replace( + &mut self, + k: StatePartIndex, + ) -> Option> { + mem::replace(&mut self.0.small_slots[k.as_usize()], true).then_some(k) + } +} + +impl DenseSlotSetMethods for DenseSlotSet { + fn contains(&self, k: StatePartIndex) -> bool { + self.0.big_slots[k.as_usize()] + } + + fn take( + &mut self, + k: StatePartIndex, + ) -> Option> { + mem::replace(self.0.big_slots.get_mut(k.as_usize())?, false).then_some(k) + } + + fn replace( + &mut self, + k: StatePartIndex, + ) -> Option> { + mem::replace(&mut self.0.big_slots[k.as_usize()], true).then_some(k) + } +} + +#[derive(Clone, Debug, Default, PartialEq, Eq, Hash)] +struct SlotVec(TypeParts); + +impl SlotVec { + fn is_empty(&self) -> bool { + let Self(TypeParts { + small_slots, + big_slots, + }) = self; + small_slots.is_empty() && big_slots.is_empty() + } +} + +impl StatePartsValue for SlotVec { + type Value = Vec>; +} + +#[derive(Clone, Debug, Default, PartialEq, Eq, Hash)] +struct SlotSet(TypeParts); + +impl SlotSet { + fn is_empty(&self) -> bool { + let Self(TypeParts { + small_slots, + big_slots, + }) = self; + small_slots.is_empty() && big_slots.is_empty() + } + fn for_each( + &self, + small_slots_fn: impl FnMut(StatePartIndex), + big_slots_fn: impl FnMut(StatePartIndex), + ) { + let Self(TypeParts { + small_slots, + big_slots, + }) = self; + small_slots.iter().copied().for_each(small_slots_fn); + big_slots.iter().copied().for_each(big_slots_fn); + } + fn all( + &self, + small_slots_fn: impl FnMut(StatePartIndex) -> bool, + big_slots_fn: impl FnMut(StatePartIndex) -> bool, + ) -> bool { + let Self(TypeParts { + small_slots, + big_slots, + }) = self; + small_slots.iter().copied().all(small_slots_fn) + && big_slots.iter().copied().all(big_slots_fn) + } +} + +impl StatePartsValue for SlotSet { + type Value = BTreeSet>; +} + +impl Extend> for SlotSet { + fn extend>>(&mut self, iter: T) { + self.0.small_slots.extend(iter); + } +} + +impl Extend> for SlotSet { + fn extend>>(&mut self, iter: T) { + self.0.big_slots.extend(iter); + } +} + +impl Extend> for SlotSet +where + Self: Extend>, +{ + fn extend>>(&mut self, iter: T) { + self.extend(iter.into_iter().flat_map(|v| v.iter())); + } +} + +impl Extend for SlotSet { + fn extend>(&mut self, iter: T) { + iter.into_iter().for_each( + |TypeIndexRange { + small_slots, + big_slots, + }| { + self.extend(small_slots.iter()); + self.extend(big_slots.iter()); + }, + ) + } +} + +impl Extend for SlotSet { + fn extend>(&mut self, iter: T) { + iter.into_iter().for_each( + |TypeArrayIndex { + small_slots, + big_slots, + }| { + self.extend([small_slots]); + self.extend([big_slots]); + }, + ) + } +} + +impl Extend> for SlotSet { + fn extend>>(&mut self, iter: T) { + self.extend(iter.into_iter().map(|v| v.index)); + } +} + +impl Extend for SlotSet { + fn extend>(&mut self, iter: T) { + iter.into_iter().for_each(|cond_body| match cond_body { + CondBody::IfTrue { cond } | CondBody::IfFalse { cond } => { + self.extend([cond.range]); + } + CondBody::MatchArm { + discriminant, + variant_index: _, + } => self.extend([discriminant]), + }) + } +} + +impl Extend for SlotSet { + fn extend>(&mut self, iter: T) { + self.extend(iter.into_iter().map(|v| v.body)) + } +} + +#[derive(Debug)] +struct Assignment { + inputs: SlotSet, + outputs: SlotSet, + conditions: Interned<[Cond]>, + insns: Vec, + source_location: SourceLocation, +} + +#[derive(Debug)] +struct SlotToAssignmentIndexFullMap(TypeParts); + +impl StatePartsValue for SlotToAssignmentIndexFullMap { + type Value = Box<[Vec]>; +} + +impl SlotToAssignmentIndexFullMap { + fn new(len: TypeLen) -> Self { + let TypeLen { + small_slots, + big_slots, + } = len; + Self(TypeParts { + small_slots: vec![Vec::new(); small_slots.value.try_into().expect("length too big")] + .into_boxed_slice(), + big_slots: vec![Vec::new(); big_slots.value.try_into().expect("length too big")] + .into_boxed_slice(), + }) + } + fn len(&self) -> TypeLen { + TypeLen { + small_slots: StatePartLen { + value: self.0.small_slots.len() as _, + _phantom: PhantomData, + }, + big_slots: StatePartLen { + value: self.0.big_slots.len() as _, + _phantom: PhantomData, + }, + } + } + fn keys_for_assignment( + &mut self, + assignment_index: usize, + ) -> SlotToAssignmentIndexFullMapKeysForAssignment<'_> { + SlotToAssignmentIndexFullMapKeysForAssignment { + map: self, + assignment_index, + } + } + fn for_each( + &self, + mut small_slots_fn: impl FnMut(StatePartIndex, &[usize]), + mut big_slots_fn: impl FnMut(StatePartIndex, &[usize]), + ) { + let Self(TypeParts { + small_slots, + big_slots, + }) = self; + small_slots.iter().enumerate().for_each(|(k, v)| { + small_slots_fn( + StatePartIndex { + value: k as _, + _phantom: PhantomData, + }, + v, + ) + }); + big_slots.iter().enumerate().for_each(|(k, v)| { + big_slots_fn( + StatePartIndex { + value: k as _, + _phantom: PhantomData, + }, + v, + ) + }); + } +} + +impl std::ops::Index> for SlotToAssignmentIndexFullMap { + type Output = Vec; + + fn index(&self, index: StatePartIndex) -> &Self::Output { + &self.0.small_slots[index.as_usize()] + } +} + +impl std::ops::IndexMut> for SlotToAssignmentIndexFullMap { + fn index_mut(&mut self, index: StatePartIndex) -> &mut Self::Output { + &mut self.0.small_slots[index.as_usize()] + } +} + +impl std::ops::Index> for SlotToAssignmentIndexFullMap { + type Output = Vec; + + fn index(&self, index: StatePartIndex) -> &Self::Output { + &self.0.big_slots[index.as_usize()] + } +} + +impl std::ops::IndexMut> for SlotToAssignmentIndexFullMap { + fn index_mut(&mut self, index: StatePartIndex) -> &mut Self::Output { + &mut self.0.big_slots[index.as_usize()] + } +} + +struct SlotToAssignmentIndexFullMapKeysForAssignment<'a> { + map: &'a mut SlotToAssignmentIndexFullMap, + assignment_index: usize, +} + +impl<'a, K: StatePartKind> Extend<&'a StatePartIndex> + for SlotToAssignmentIndexFullMapKeysForAssignment<'_> +where + Self: Extend>, +{ + fn extend>>(&mut self, iter: T) { + self.extend(iter.into_iter().copied()); + } +} + +impl Extend> + for SlotToAssignmentIndexFullMapKeysForAssignment<'_> +where + SlotToAssignmentIndexFullMap: IndexMut, Output = Vec>, +{ + fn extend>>(&mut self, iter: T) { + iter.into_iter() + .for_each(|slot| self.map[slot].push(self.assignment_index)); + } +} + +impl<'a> Extend<&'a SlotSet> for SlotToAssignmentIndexFullMapKeysForAssignment<'_> { + fn extend>(&mut self, iter: T) { + iter.into_iter().for_each( + |SlotSet(TypeParts { + small_slots, + big_slots, + })| { + self.extend(small_slots); + self.extend(big_slots); + }, + ); + } +} + +impl<'a> Extend<&'a Cond> for SlotToAssignmentIndexFullMapKeysForAssignment<'_> { + fn extend>(&mut self, iter: T) { + iter.into_iter().for_each(|cond| match cond.body { + CondBody::IfTrue { cond } | CondBody::IfFalse { cond } => { + let CompiledValue { + range: + TypeIndexRange { + small_slots, + big_slots, + }, + layout: _, + write: _, + } = cond; + self.extend(small_slots.iter()); + self.extend(big_slots.iter()); + } + CondBody::MatchArm { + discriminant, + variant_index: _, + } => self.extend([discriminant]), + }); + } +} + +impl Assignment { + fn new( + conditions: Interned<[Cond]>, + insns: Vec, + source_location: SourceLocation, + ) -> Self { + let mut inputs = SlotSet::default(); + let mut outputs = SlotSet::default(); + for insn in &insns { + let insn = match insn { + InsnOrLabel::Insn(insn) => insn, + InsnOrLabel::Label(_) => continue, + }; + for InsnField { ty, kind } in insn.fields() { + match (kind, ty) { + (InsnFieldKind::Input, InsnFieldType::SmallSlot(&slot)) => { + inputs.extend([slot]); + } + (InsnFieldKind::Input, InsnFieldType::BigSlot(&slot)) => { + inputs.extend([slot]); + } + ( + InsnFieldKind::Input, + InsnFieldType::SmallSlotArrayIndexed(&array_indexed), + ) => { + array_indexed.for_each_target(|slot| inputs.extend([slot])); + inputs.extend(array_indexed.indexes); + } + (InsnFieldKind::Input, InsnFieldType::BigSlotArrayIndexed(&array_indexed)) => { + array_indexed.for_each_target(|slot| inputs.extend([slot])); + inputs.extend(array_indexed.indexes); + } + (InsnFieldKind::Output, InsnFieldType::SmallSlot(&slot)) => { + outputs.extend([slot]); + } + (InsnFieldKind::Output, InsnFieldType::BigSlot(&slot)) => { + outputs.extend([slot]); + } + ( + InsnFieldKind::Output, + InsnFieldType::SmallSlotArrayIndexed(&array_indexed), + ) => { + array_indexed.for_each_target(|slot| { + outputs.extend([slot]); + }); + inputs.extend(array_indexed.indexes); + } + (InsnFieldKind::Output, InsnFieldType::BigSlotArrayIndexed(&array_indexed)) => { + array_indexed.for_each_target(|slot| { + outputs.extend([slot]); + }); + inputs.extend(array_indexed.indexes); + } + ( + _, + InsnFieldType::Memory(_) + | InsnFieldType::SmallUInt(_) + | InsnFieldType::SmallSInt(_) + | InsnFieldType::InternedBigInt(_) + | InsnFieldType::U8(_) + | InsnFieldType::USize(_) + | InsnFieldType::Empty(_), + ) + | ( + InsnFieldKind::Immediate + | InsnFieldKind::Memory + | InsnFieldKind::BranchTarget, + _, + ) => {} + } + } + } + Self { + inputs, + outputs, + conditions, + insns, + source_location, + } + } +} + +#[derive(Debug)] +struct RegisterReset { + is_async: bool, + init: CompiledValue, + rst: StatePartIndex, +} + +#[derive(Debug, Clone, Copy)] +struct ClockTrigger { + last_clk_was_low: StatePartIndex, + clk: StatePartIndex, + clk_triggered: StatePartIndex, + source_location: SourceLocation, +} + +#[derive(Debug)] +struct Register { + value: CompiledValue, + clk_triggered: StatePartIndex, + reset: Option, + source_location: SourceLocation, +} + +#[derive(Debug)] + +struct MemoryPort { + clk_triggered: StatePartIndex, + addr_delayed: Vec>, + en_delayed: Vec>, + #[allow(dead_code, reason = "used in Debug impl")] + data_layout: CompiledTypeLayout, + read_data_delayed: Vec, + write_data_delayed: Vec, + write_mask_delayed: Vec, + write_mode_delayed: Vec>, + write_insns: Vec, +} + +struct MemoryPortReadInsns<'a> { + addr: StatePartIndex, + en: StatePartIndex, + write_mode: Option>, + data: TypeIndexRange, + insns: &'a mut Vec, +} + +struct MemoryPortWriteInsns<'a> { + addr: StatePartIndex, + en: StatePartIndex, + write_mode: Option>, + data: TypeIndexRange, + mask: TypeIndexRange, + insns: &'a mut Vec, +} + +#[derive(Debug)] +struct Memory { + mem: Mem, + memory: StatePartIndex, + trace: TraceMem, + ports: Vec, +} + +#[derive(Copy, Clone)] +enum MakeTraceDeclTarget { + Expr(Expr), + Memory { + id: TraceMemoryId, + depth: usize, + stride: usize, + start: usize, + ty: CanonicalType, + }, +} + +impl MakeTraceDeclTarget { + fn flow(self) -> Flow { + match self { + MakeTraceDeclTarget::Expr(expr) => Expr::flow(expr), + MakeTraceDeclTarget::Memory { .. } => Flow::Duplex, + } + } + fn ty(self) -> CanonicalType { + match self { + MakeTraceDeclTarget::Expr(expr) => Expr::ty(expr), + MakeTraceDeclTarget::Memory { ty, .. } => ty, + } + } +} + +struct DebugOpaque(T); + +impl fmt::Debug for DebugOpaque { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("<...>") + } +} + +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] +pub(crate) struct CompiledExternModule { + pub(crate) module_io_targets: Interned<[Target]>, + pub(crate) module_io: Interned<[CompiledValue]>, + pub(crate) simulation: ExternModuleSimulation, +} + +#[derive(Debug)] +pub struct Compiler { + insns: Insns, + original_base_module: Interned>, + base_module: Interned>, + modules: HashMap, + extern_modules: Vec, + compiled_values: HashMap>, + compiled_exprs: HashMap, CompiledExpr>, + compiled_exprs_to_values: HashMap, CompiledValue>, + decl_conditions: HashMap>, + compiled_values_to_dyn_array_indexes: + HashMap, StatePartIndex>, + compiled_value_bool_dest_is_small_map: + HashMap, StatePartIndex>, + assignments: Assignments, + clock_triggers: Vec, + compiled_value_to_clock_trigger_map: HashMap, ClockTrigger>, + enum_discriminants: HashMap, StatePartIndex>, + registers: Vec, + traces: SimTraces>>, + memories: Vec, + dump_assignments_dot: Option>>, +} + +impl Compiler { + pub fn new(base_module: Interned>) -> Self { + let original_base_module = base_module; + let base_module = deduce_resets(base_module, true) + .unwrap_or_else(|e| panic!("failed to deduce reset types: {e}")); + Self { + insns: Insns::new(), + original_base_module, + base_module, + modules: HashMap::default(), + extern_modules: Vec::new(), + compiled_values: HashMap::default(), + compiled_exprs: HashMap::default(), + compiled_exprs_to_values: HashMap::default(), + decl_conditions: HashMap::default(), + compiled_values_to_dyn_array_indexes: HashMap::default(), + compiled_value_bool_dest_is_small_map: HashMap::default(), + assignments: Assignments::default(), + clock_triggers: Vec::new(), + compiled_value_to_clock_trigger_map: HashMap::default(), + enum_discriminants: HashMap::default(), + registers: Vec::new(), + traces: SimTraces(Vec::new()), + memories: Vec::new(), + dump_assignments_dot: None, + } + } + #[doc(hidden)] + /// This is explicitly unstable and may be changed/removed at any time + pub fn dump_assignments_dot(&mut self, callback: Box) { + self.dump_assignments_dot = Some(DebugOpaque(callback)); + } + fn new_sim_trace(&mut self, kind: SimTraceKind) -> TraceScalarId { + let id = TraceScalarId(self.traces.0.len()); + self.traces.0.push(SimTrace { + kind, + state: (), + last_state: (), + }); + id + } + fn make_trace_scalar_helper( + &mut self, + instantiated_module: InstantiatedModule, + target: MakeTraceDeclTarget, + source_location: SourceLocation, + small_kind: impl FnOnce(StatePartIndex) -> SimTraceKind, + big_kind: impl FnOnce(StatePartIndex) -> SimTraceKind, + ) -> TraceLocation { + match target { + MakeTraceDeclTarget::Expr(target) => { + let compiled_value = self.compile_expr(instantiated_module, target); + let compiled_value = self.compiled_expr_to_value(compiled_value, source_location); + TraceLocation::Scalar(self.new_sim_trace(match compiled_value.range.len() { + TypeLen::A_SMALL_SLOT => small_kind(compiled_value.range.small_slots.start), + TypeLen::A_BIG_SLOT => big_kind(compiled_value.range.big_slots.start), + _ => unreachable!(), + })) + } + MakeTraceDeclTarget::Memory { + id, + depth, + stride, + start, + ty, + } => TraceLocation::Memory(TraceMemoryLocation { + id, + depth, + stride, + start, + len: ty.bit_width(), + }), + } + } + fn make_trace_scalar( + &mut self, + instantiated_module: InstantiatedModule, + target: MakeTraceDeclTarget, + name: Interned, + source_location: SourceLocation, + ) -> TraceDecl { + let flow = target.flow(); + match target.ty() { + CanonicalType::UInt(ty) => TraceUInt { + location: self.make_trace_scalar_helper( + instantiated_module, + target, + source_location, + |index| SimTraceKind::SmallUInt { index, ty }, + |index| SimTraceKind::BigUInt { index, ty }, + ), + name, + ty, + flow, + } + .into(), + CanonicalType::SInt(ty) => TraceSInt { + location: self.make_trace_scalar_helper( + instantiated_module, + target, + source_location, + |index| SimTraceKind::SmallSInt { index, ty }, + |index| SimTraceKind::BigSInt { index, ty }, + ), + name, + ty, + flow, + } + .into(), + CanonicalType::Bool(_) => TraceBool { + location: self.make_trace_scalar_helper( + instantiated_module, + target, + source_location, + |index| SimTraceKind::SmallBool { index }, + |index| SimTraceKind::BigBool { index }, + ), + name, + flow, + } + .into(), + CanonicalType::Array(_) => unreachable!(), + CanonicalType::Enum(ty) => { + assert_eq!(ty.discriminant_bit_width(), ty.type_properties().bit_width); + let location = match target { + MakeTraceDeclTarget::Expr(target) => { + let compiled_value = self.compile_expr(instantiated_module, target); + let compiled_value = + self.compiled_expr_to_value(compiled_value, source_location); + let discriminant = self.compile_enum_discriminant( + compiled_value.map_ty(Enum::from_canonical), + source_location, + ); + TraceLocation::Scalar(self.new_sim_trace(SimTraceKind::EnumDiscriminant { + index: discriminant, + ty, + })) + } + MakeTraceDeclTarget::Memory { + id, + depth, + stride, + start, + ty: _, + } => TraceLocation::Memory(TraceMemoryLocation { + id, + depth, + stride, + start, + len: ty.type_properties().bit_width, + }), + }; + TraceFieldlessEnum { + location, + name, + ty, + flow, + } + .into() + } + CanonicalType::Bundle(_) | CanonicalType::PhantomConst(_) => unreachable!(), + CanonicalType::AsyncReset(_) => TraceAsyncReset { + location: self.make_trace_scalar_helper( + instantiated_module, + target, + source_location, + |index| SimTraceKind::SmallAsyncReset { index }, + |index| SimTraceKind::BigAsyncReset { index }, + ), + name, + flow, + } + .into(), + CanonicalType::SyncReset(_) => TraceSyncReset { + location: self.make_trace_scalar_helper( + instantiated_module, + target, + source_location, + |index| SimTraceKind::SmallSyncReset { index }, + |index| SimTraceKind::BigSyncReset { index }, + ), + name, + flow, + } + .into(), + CanonicalType::Reset(_) => unreachable!(), + CanonicalType::Clock(_) => TraceClock { + location: self.make_trace_scalar_helper( + instantiated_module, + target, + source_location, + |index| SimTraceKind::SmallClock { index }, + |index| SimTraceKind::BigClock { index }, + ), + name, + flow, + } + .into(), + } + } + fn make_trace_decl_child( + &mut self, + instantiated_module: InstantiatedModule, + target: MakeTraceDeclTarget, + name: Interned, + source_location: SourceLocation, + ) -> TraceDecl { + match target.ty() { + CanonicalType::Array(ty) => { + let elements = Interned::from_iter((0..ty.len()).map(|index| { + self.make_trace_decl_child( + instantiated_module, + match target { + MakeTraceDeclTarget::Expr(target) => MakeTraceDeclTarget::Expr( + Expr::::from_canonical(target)[index], + ), + MakeTraceDeclTarget::Memory { + id, + depth, + stride, + start, + ty: _, + } => MakeTraceDeclTarget::Memory { + id, + depth, + stride, + start: start + ty.element().bit_width() * index, + ty: ty.element(), + }, + }, + Intern::intern_owned(format!("[{index}]")), + source_location, + ) + })); + TraceArray { + name, + elements, + ty, + flow: target.flow(), + } + .into() + } + CanonicalType::Enum(ty) => { + if ty.variants().iter().all(|v| v.ty.is_none()) { + self.make_trace_scalar(instantiated_module, target, name, source_location) + } else { + let flow = target.flow(); + let location = match target { + MakeTraceDeclTarget::Expr(target) => { + let compiled_value = self.compile_expr(instantiated_module, target); + let compiled_value = + self.compiled_expr_to_value(compiled_value, source_location); + let discriminant = self.compile_enum_discriminant( + compiled_value.map_ty(Enum::from_canonical), + source_location, + ); + TraceLocation::Scalar(self.new_sim_trace( + SimTraceKind::EnumDiscriminant { + index: discriminant, + ty, + }, + )) + } + MakeTraceDeclTarget::Memory { + id, + depth, + stride, + start, + ty: _, + } => TraceLocation::Memory(TraceMemoryLocation { + id, + depth, + stride, + start, + len: ty.discriminant_bit_width(), + }), + }; + let discriminant = TraceEnumDiscriminant { + location, + name: "$tag".intern(), + ty, + flow, + }; + let non_empty_fields = + Interned::from_iter(ty.variants().into_iter().enumerate().flat_map( + |(variant_index, variant)| { + variant.ty.map(|variant_ty| { + self.make_trace_decl_child( + instantiated_module, + match target { + MakeTraceDeclTarget::Expr(target) => { + MakeTraceDeclTarget::Expr( + ops::VariantAccess::new_by_index( + Expr::::from_canonical(target), + variant_index, + ) + .to_expr(), + ) + } + MakeTraceDeclTarget::Memory { + id, + depth, + stride, + start, + ty: _, + } => MakeTraceDeclTarget::Memory { + id, + depth, + stride, + start: start + ty.discriminant_bit_width(), + ty: variant_ty, + }, + }, + variant.name, + source_location, + ) + }) + }, + )); + TraceEnumWithFields { + name, + discriminant, + non_empty_fields, + ty, + flow, + } + .into() + } + } + CanonicalType::Bundle(ty) => { + let fields = Interned::from_iter(ty.fields().iter().zip(ty.field_offsets()).map( + |(field, field_offset)| { + self.make_trace_decl_child( + instantiated_module, + match target { + MakeTraceDeclTarget::Expr(target) => { + MakeTraceDeclTarget::Expr(Expr::field( + Expr::::from_canonical(target), + &field.name, + )) + } + MakeTraceDeclTarget::Memory { + id, + depth, + stride, + start, + ty: _, + } => MakeTraceDeclTarget::Memory { + id, + depth, + stride, + start: start + field_offset, + ty: field.ty, + }, + }, + field.name, + source_location, + ) + }, + )); + TraceBundle { + name, + fields, + ty, + flow: target.flow(), + } + .into() + } + CanonicalType::UInt(_) + | CanonicalType::SInt(_) + | CanonicalType::Bool(_) + | CanonicalType::AsyncReset(_) + | CanonicalType::SyncReset(_) + | CanonicalType::Reset(_) + | CanonicalType::Clock(_) => { + self.make_trace_scalar(instantiated_module, target, name, source_location) + } + CanonicalType::PhantomConst(_) => TraceBundle { + name, + fields: Interned::default(), + ty: Bundle::new(Interned::default()), + flow: target.flow(), + } + .into(), + } + } + fn make_trace_decl( + &mut self, + instantiated_module: InstantiatedModule, + target_base: TargetBase, + ) -> TraceDecl { + let target = MakeTraceDeclTarget::Expr(target_base.to_expr()); + match target_base { + TargetBase::ModuleIO(module_io) => TraceModuleIO { + name: module_io.name(), + child: self + .make_trace_decl_child( + instantiated_module, + target, + module_io.name(), + module_io.source_location(), + ) + .intern(), + ty: module_io.ty(), + flow: module_io.flow(), + } + .into(), + TargetBase::MemPort(mem_port) => { + let name = Intern::intern_owned(mem_port.port_name().to_string()); + let TraceDecl::Scope(TraceScope::Bundle(bundle)) = self.make_trace_decl_child( + instantiated_module, + target, + name, + mem_port.source_location(), + ) else { + unreachable!() + }; + TraceMemPort { + name, + bundle, + ty: mem_port.ty(), + } + .into() + } + TargetBase::Reg(reg) => TraceReg { + name: reg.name(), + child: self + .make_trace_decl_child( + instantiated_module, + target, + reg.name(), + reg.source_location(), + ) + .intern(), + ty: reg.ty(), + } + .into(), + TargetBase::RegSync(reg) => TraceReg { + name: reg.name(), + child: self + .make_trace_decl_child( + instantiated_module, + target, + reg.name(), + reg.source_location(), + ) + .intern(), + ty: reg.ty(), + } + .into(), + TargetBase::RegAsync(reg) => TraceReg { + name: reg.name(), + child: self + .make_trace_decl_child( + instantiated_module, + target, + reg.name(), + reg.source_location(), + ) + .intern(), + ty: reg.ty(), + } + .into(), + TargetBase::Wire(wire) => TraceWire { + name: wire.name(), + child: self + .make_trace_decl_child( + instantiated_module, + target, + wire.name(), + wire.source_location(), + ) + .intern(), + ty: wire.ty(), + } + .into(), + TargetBase::Instance(instance) => { + let TraceDecl::Scope(TraceScope::Bundle(instance_io)) = self.make_trace_decl_child( + instantiated_module, + target, + instance.name(), + instance.source_location(), + ) else { + unreachable!() + }; + let compiled_module = &self.modules[&InstantiatedModule::Child { + parent: instantiated_module.intern(), + instance: instance.intern(), + }]; + TraceInstance { + name: instance.name(), + instance_io, + module: compiled_module.trace_decls, + ty: instance.ty(), + } + .into() + } + } + } + fn compile_value( + &mut self, + target: TargetInInstantiatedModule, + ) -> CompiledValue { + if let Some(&retval) = self.compiled_values.get(&target) { + return retval; + } + let retval = match target.target { + Target::Base(base) => { + let unprefixed_layout = CompiledTypeLayout::get(base.canonical_ty()); + let layout = unprefixed_layout.with_prefixed_debug_names(&format!( + "{:?}.{:?}", + target.instantiated_module, + base.target_name() + )); + let range = self.insns.allocate_variable(&layout.layout); + let write = match *base { + TargetBase::ModuleIO(_) + | TargetBase::MemPort(_) + | TargetBase::Wire(_) + | TargetBase::Instance(_) => None, + TargetBase::Reg(_) | TargetBase::RegSync(_) | TargetBase::RegAsync(_) => { + let write_layout = unprefixed_layout.with_prefixed_debug_names(&format!( + "{:?}.{:?}$next", + target.instantiated_module, + base.target_name() + )); + Some(( + write_layout, + self.insns.allocate_variable(&write_layout.layout), + )) + } + }; + CompiledValue { + range, + layout, + write, + } + } + Target::Child(target_child) => { + let parent = self.compile_value(TargetInInstantiatedModule { + instantiated_module: target.instantiated_module, + target: *target_child.parent(), + }); + match *target_child.path_element() { + TargetPathElement::BundleField(TargetPathBundleField { name }) => { + parent.map_ty(Bundle::from_canonical).field_by_name(name) + } + TargetPathElement::ArrayElement(TargetPathArrayElement { index }) => { + parent.map_ty(Array::from_canonical).element(index) + } + TargetPathElement::DynArrayElement(_) => unreachable!(), + } + } + }; + self.compiled_values.insert(target, retval); + retval + } + fn compiled_expr_to_value( + &mut self, + expr: CompiledExpr, + source_location: SourceLocation, + ) -> CompiledValue { + if let Some(&retval) = self.compiled_exprs_to_values.get(&expr) { + return retval; + } + assert!( + expr.static_part.layout.ty.is_passive(), + "invalid expression passed to compiled_expr_to_value -- type must be passive", + ); + let CompiledExpr { + static_part, + indexes, + } = expr; + let retval = if indexes.as_ref().is_empty() { + CompiledValue { + layout: static_part.layout, + range: static_part.range, + write: None, + } + } else { + let layout = static_part.layout.with_anonymized_debug_info(); + let retval = CompiledValue { + layout, + range: self.insns.allocate_variable(&layout.layout), + write: None, + }; + let TypeIndexRange { + small_slots, + big_slots, + } = retval.range; + self.add_assignment( + Interned::default(), + small_slots + .iter() + .zip(static_part.range.small_slots.iter()) + .map(|(dest, base)| Insn::ReadSmallIndexed { + dest, + src: StatePartArrayIndexed { + base, + indexes: indexes.small_slots, + }, + }) + .chain( + big_slots + .iter() + .zip(static_part.range.big_slots.iter()) + .map(|(dest, base)| Insn::ReadIndexed { + dest, + src: StatePartArrayIndexed { + base, + indexes: indexes.big_slots, + }, + }), + ), + source_location, + ); + retval + }; + self.compiled_exprs_to_values.insert(expr, retval); + retval + } + fn add_assignment>( + &mut self, + conditions: Interned<[Cond]>, + insns: impl IntoIterator, + source_location: SourceLocation, + ) { + let insns = Vec::from_iter(insns.into_iter().map(Into::into)); + self.assignments + .push(Assignment::new(conditions, insns, source_location)); + } + fn simple_big_expr_input( + &mut self, + instantiated_module: InstantiatedModule, + input: Expr, + ) -> StatePartIndex { + let input = self.compile_expr(instantiated_module, input); + let input = + self.compiled_expr_to_value(input, instantiated_module.leaf_module().source_location()); + assert_eq!(input.range.len(), TypeLen::A_BIG_SLOT); + input.range.big_slots.start + } + fn compile_expr_helper( + &mut self, + instantiated_module: InstantiatedModule, + dest_ty: CanonicalType, + make_insns: impl FnOnce(&mut Self, TypeIndexRange) -> Vec, + ) -> CompiledValue { + let layout = CompiledTypeLayout::get(dest_ty); + let range = self.insns.allocate_variable(&layout.layout); + let retval = CompiledValue { + layout, + range, + write: None, + }; + let insns = make_insns(self, range); + self.add_assignment( + Interned::default(), + insns, + instantiated_module.leaf_module().source_location(), + ); + retval + } + fn simple_nary_big_expr_helper( + &mut self, + instantiated_module: InstantiatedModule, + dest_ty: CanonicalType, + make_insns: impl FnOnce(StatePartIndex) -> Vec, + ) -> CompiledValue { + self.compile_expr_helper(instantiated_module, dest_ty, |_, dest| { + assert_eq!(dest.len(), TypeLen::A_BIG_SLOT); + make_insns(dest.big_slots.start) + }) + } + fn simple_nary_big_expr( + &mut self, + instantiated_module: InstantiatedModule, + dest_ty: CanonicalType, + inputs: [Expr; N], + make_insns: impl FnOnce( + StatePartIndex, + [StatePartIndex; N], + ) -> Vec, + ) -> CompiledValue { + let inputs = inputs.map(|input| self.simple_big_expr_input(instantiated_module, input)); + self.simple_nary_big_expr_helper(instantiated_module, dest_ty, |dest| { + make_insns(dest, inputs) + }) + } + fn compiled_value_to_dyn_array_index( + &mut self, + compiled_value: CompiledValue, + source_location: SourceLocation, + ) -> StatePartIndex { + if let Some(&retval) = self + .compiled_values_to_dyn_array_indexes + .get(&compiled_value) + { + return retval; + } + let mut ty = compiled_value.layout.ty; + ty.width = ty.width.min(SmallUInt::BITS as usize); + let retval = match compiled_value.range.len() { + TypeLen::A_SMALL_SLOT => compiled_value.range.small_slots.start, + TypeLen::A_BIG_SLOT => { + let debug_data = SlotDebugData { + name: Interned::default(), + ty: ty.canonical(), + }; + let dest = self + .insns + .allocate_variable(&TypeLayout { + small_slots: StatePartLayout::scalar(debug_data, ()), + big_slots: StatePartLayout::empty(), + }) + .small_slots + .start; + self.add_assignment( + Interned::default(), + vec![Insn::CastBigToArrayIndex { + dest, + src: compiled_value.range.big_slots.start, + }], + source_location, + ); + dest + } + _ => unreachable!(), + }; + self.compiled_values_to_dyn_array_indexes + .insert(compiled_value, retval); + retval + } + fn compiled_value_bool_dest_is_small( + &mut self, + compiled_value: CompiledValue, + source_location: SourceLocation, + ) -> StatePartIndex { + if let Some(&retval) = self + .compiled_value_bool_dest_is_small_map + .get(&compiled_value) + { + return retval; + } + let retval = match compiled_value.range.len() { + TypeLen::A_SMALL_SLOT => compiled_value.range.small_slots.start, + TypeLen::A_BIG_SLOT => { + let debug_data = SlotDebugData { + name: Interned::default(), + ty: Bool.canonical(), + }; + let dest = self + .insns + .allocate_variable(&TypeLayout { + small_slots: StatePartLayout::scalar(debug_data, ()), + big_slots: StatePartLayout::empty(), + }) + .small_slots + .start; + self.add_assignment( + Interned::default(), + vec![Insn::IsNonZeroDestIsSmall { + dest, + src: compiled_value.range.big_slots.start, + }], + source_location, + ); + dest + } + _ => unreachable!(), + }; + self.compiled_value_bool_dest_is_small_map + .insert(compiled_value, retval); + retval + } + fn compile_cast_scalar_to_bits( + &mut self, + instantiated_module: InstantiatedModule, + arg: Expr, + cast_fn: impl FnOnce(Expr) -> Expr, + ) -> CompiledValue { + let arg = Expr::::from_canonical(arg); + let retval = cast_fn(arg); + let retval = self.compile_expr(instantiated_module, Expr::canonical(retval)); + let retval = self + .compiled_expr_to_value(retval, instantiated_module.leaf_module().source_location()); + retval.map_ty(UInt::from_canonical) + } + fn compile_cast_aggregate_to_bits( + &mut self, + instantiated_module: InstantiatedModule, + parts: impl IntoIterator>, + ) -> CompiledValue { + let retval = parts + .into_iter() + .map(|part| part.cast_to_bits()) + .reduce(|accumulator, part| accumulator | (part << Expr::ty(accumulator).width)) + .unwrap_or_else(|| UInt[0].zero().to_expr()); + let retval = self.compile_expr(instantiated_module, Expr::canonical(retval)); + let retval = self + .compiled_expr_to_value(retval, instantiated_module.leaf_module().source_location()); + retval.map_ty(UInt::from_canonical) + } + fn compile_cast_to_bits( + &mut self, + instantiated_module: InstantiatedModule, + expr: ops::CastToBits, + ) -> CompiledValue { + match Expr::ty(expr.arg()) { + CanonicalType::UInt(_) => { + self.compile_cast_scalar_to_bits(instantiated_module, expr.arg(), |arg| arg) + } + CanonicalType::SInt(ty) => self.compile_cast_scalar_to_bits( + instantiated_module, + expr.arg(), + |arg: Expr| arg.cast_to(ty.as_same_width_uint()), + ), + CanonicalType::Bool(_) + | CanonicalType::AsyncReset(_) + | CanonicalType::SyncReset(_) + | CanonicalType::Reset(_) + | CanonicalType::Clock(_) => self.compile_cast_scalar_to_bits( + instantiated_module, + expr.arg(), + |arg: Expr| arg.cast_to(UInt[1]), + ), + CanonicalType::Array(ty) => self.compile_cast_aggregate_to_bits( + instantiated_module, + (0..ty.len()).map(|index| Expr::::from_canonical(expr.arg())[index]), + ), + CanonicalType::Enum(ty) => self + .simple_nary_big_expr( + instantiated_module, + UInt[ty.type_properties().bit_width].canonical(), + [Expr::canonical(expr.arg())], + |dest, [src]| vec![Insn::Copy { dest, src }], + ) + .map_ty(UInt::from_canonical), + CanonicalType::Bundle(ty) => self.compile_cast_aggregate_to_bits( + instantiated_module, + ty.fields().iter().map(|field| { + Expr::field(Expr::::from_canonical(expr.arg()), &field.name) + }), + ), + CanonicalType::PhantomConst(_) => { + self.compile_cast_aggregate_to_bits(instantiated_module, []) + } + } + } + fn compile_cast_bits_to( + &mut self, + instantiated_module: InstantiatedModule, + expr: ops::CastBitsTo, + ) -> CompiledValue { + let retval = match expr.ty() { + CanonicalType::UInt(_) => Expr::canonical(expr.arg()), + CanonicalType::SInt(ty) => Expr::canonical(expr.arg().cast_to(ty)), + CanonicalType::Bool(ty) => Expr::canonical(expr.arg().cast_to(ty)), + CanonicalType::Array(ty) => { + let stride = ty.element().bit_width(); + Expr::::canonical( + ops::ArrayLiteral::new( + ty.element(), + Interned::from_iter((0..ty.len()).map(|index| { + let start = stride * index; + let end = start + stride; + expr.arg()[start..end].cast_bits_to(ty.element()) + })), + ) + .to_expr(), + ) + } + ty @ CanonicalType::Enum(_) => { + return self.simple_nary_big_expr( + instantiated_module, + ty, + [Expr::canonical(expr.arg())], + |dest, [src]| vec![Insn::Copy { dest, src }], + ); + } + CanonicalType::Bundle(ty) => Expr::canonical( + ops::BundleLiteral::new( + ty, + Interned::from_iter(ty.field_offsets().iter().zip(&ty.fields()).map( + |(&offset, &field)| { + let end = offset + field.ty.bit_width(); + expr.arg()[offset..end].cast_bits_to(field.ty) + }, + )), + ) + .to_expr(), + ), + CanonicalType::AsyncReset(ty) => Expr::canonical(expr.arg().cast_to(ty)), + CanonicalType::SyncReset(ty) => Expr::canonical(expr.arg().cast_to(ty)), + CanonicalType::Reset(_) => unreachable!(), + CanonicalType::Clock(ty) => Expr::canonical(expr.arg().cast_to(ty)), + CanonicalType::PhantomConst(ty) => { + let _ = self.compile_expr(instantiated_module, Expr::canonical(expr.arg())); + Expr::canonical(ty.to_expr()) + } + }; + let retval = self.compile_expr(instantiated_module, Expr::canonical(retval)); + self.compiled_expr_to_value(retval, instantiated_module.leaf_module().source_location()) + } + fn compile_aggregate_literal( + &mut self, + instantiated_module: InstantiatedModule, + dest_ty: CanonicalType, + inputs: Interned<[Expr]>, + ) -> CompiledValue { + self.compile_expr_helper(instantiated_module, dest_ty, |this, dest| { + let mut insns = Vec::new(); + let mut offset = TypeIndex::ZERO; + for input in inputs { + let input = this.compile_expr(instantiated_module, input); + let input = this + .compiled_expr_to_value( + input, + instantiated_module.leaf_module().source_location(), + ) + .range; + insns.extend( + input.insns_for_copy_to(dest.slice(TypeIndexRange::new(offset, input.len()))), + ); + offset = offset.offset(input.len().as_index()); + } + insns + }) + } + fn compile_expr( + &mut self, + instantiated_module: InstantiatedModule, + expr: Expr, + ) -> CompiledExpr { + if let Some(&retval) = self.compiled_exprs.get(&expr) { + return retval; + } + let mut cast_bit = |arg: Expr| { + let src_signed = match Expr::ty(arg) { + CanonicalType::UInt(_) => false, + CanonicalType::SInt(_) => true, + CanonicalType::Bool(_) => false, + CanonicalType::Array(_) => unreachable!(), + CanonicalType::Enum(_) => unreachable!(), + CanonicalType::Bundle(_) => unreachable!(), + CanonicalType::AsyncReset(_) => false, + CanonicalType::SyncReset(_) => false, + CanonicalType::Reset(_) => false, + CanonicalType::Clock(_) => false, + CanonicalType::PhantomConst(_) => unreachable!(), + }; + let dest_signed = match Expr::ty(expr) { + CanonicalType::UInt(_) => false, + CanonicalType::SInt(_) => true, + CanonicalType::Bool(_) => false, + CanonicalType::Array(_) => unreachable!(), + CanonicalType::Enum(_) => unreachable!(), + CanonicalType::Bundle(_) => unreachable!(), + CanonicalType::AsyncReset(_) => false, + CanonicalType::SyncReset(_) => false, + CanonicalType::Reset(_) => false, + CanonicalType::Clock(_) => false, + CanonicalType::PhantomConst(_) => unreachable!(), + }; + self.simple_nary_big_expr(instantiated_module, Expr::ty(expr), [arg], |dest, [src]| { + match (src_signed, dest_signed) { + (false, false) | (true, true) => { + vec![Insn::Copy { dest, src }] + } + (false, true) => vec![Insn::CastToSInt { + dest, + src, + dest_width: 1, + }], + (true, false) => vec![Insn::CastToUInt { + dest, + src, + dest_width: 1, + }], + } + }) + .into() + }; + let retval: CompiledExpr<_> = match *Expr::expr_enum(expr) { + ExprEnum::UIntLiteral(expr) => self + .simple_nary_big_expr( + instantiated_module, + expr.ty().canonical(), + [], + |dest, []| { + vec![Insn::Const { + dest, + value: expr.to_bigint().intern_sized(), + }] + }, + ) + .into(), + ExprEnum::SIntLiteral(expr) => self + .simple_nary_big_expr( + instantiated_module, + expr.ty().canonical(), + [], + |dest, []| { + vec![Insn::Const { + dest, + value: expr.to_bigint().intern_sized(), + }] + }, + ) + .into(), + ExprEnum::BoolLiteral(expr) => self + .simple_nary_big_expr(instantiated_module, Bool.canonical(), [], |dest, []| { + vec![Insn::Const { + dest, + value: BigInt::from(expr).intern_sized(), + }] + }) + .into(), + ExprEnum::PhantomConst(_) => self + .compile_aggregate_literal(instantiated_module, Expr::ty(expr), Interned::default()) + .into(), + ExprEnum::BundleLiteral(literal) => self + .compile_aggregate_literal( + instantiated_module, + Expr::ty(expr), + literal.field_values(), + ) + .into(), + ExprEnum::ArrayLiteral(literal) => self + .compile_aggregate_literal( + instantiated_module, + Expr::ty(expr), + literal.element_values(), + ) + .into(), + ExprEnum::EnumLiteral(expr) => { + let enum_bits_ty = UInt[expr.ty().type_properties().bit_width]; + let enum_bits = if let Some(variant_value) = expr.variant_value() { + ( + UInt[expr.ty().discriminant_bit_width()] + .from_int_wrapping(expr.variant_index()), + variant_value, + ) + .cast_to_bits() + .cast_to(enum_bits_ty) + } else { + enum_bits_ty + .from_int_wrapping(expr.variant_index()) + .to_expr() + }; + self.compile_expr( + instantiated_module, + enum_bits.cast_bits_to(expr.ty().canonical()), + ) + } + ExprEnum::Uninit(expr) => self.compile_expr( + instantiated_module, + UInt[expr.ty().bit_width()].zero().cast_bits_to(expr.ty()), + ), + ExprEnum::NotU(expr) => self + .simple_nary_big_expr( + instantiated_module, + Expr::ty(expr.arg()).canonical(), + [Expr::canonical(expr.arg())], + |dest, [src]| { + vec![Insn::NotU { + dest, + src, + width: Expr::ty(expr.arg()).width(), + }] + }, + ) + .into(), + ExprEnum::NotS(expr) => self + .simple_nary_big_expr( + instantiated_module, + Expr::ty(expr.arg()).canonical(), + [Expr::canonical(expr.arg())], + |dest, [src]| vec![Insn::NotS { dest, src }], + ) + .into(), + ExprEnum::NotB(expr) => self + .simple_nary_big_expr( + instantiated_module, + Expr::ty(expr.arg()).canonical(), + [Expr::canonical(expr.arg())], + |dest, [src]| { + vec![Insn::NotU { + dest, + src, + width: 1, + }] + }, + ) + .into(), + ExprEnum::Neg(expr) => self + .simple_nary_big_expr( + instantiated_module, + expr.ty().canonical(), + [Expr::canonical(expr.arg())], + |dest, [src]| vec![Insn::Neg { dest, src }], + ) + .into(), + ExprEnum::BitAndU(expr) => self + .simple_nary_big_expr( + instantiated_module, + expr.ty().canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::And { dest, lhs, rhs }], + ) + .into(), + ExprEnum::BitAndS(expr) => self + .simple_nary_big_expr( + instantiated_module, + expr.ty().canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::And { dest, lhs, rhs }], + ) + .into(), + ExprEnum::BitAndB(expr) => self + .simple_nary_big_expr( + instantiated_module, + Bool.canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::And { dest, lhs, rhs }], + ) + .into(), + ExprEnum::BitOrU(expr) => self + .simple_nary_big_expr( + instantiated_module, + expr.ty().canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::Or { dest, lhs, rhs }], + ) + .into(), + ExprEnum::BitOrS(expr) => self + .simple_nary_big_expr( + instantiated_module, + expr.ty().canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::Or { dest, lhs, rhs }], + ) + .into(), + ExprEnum::BitOrB(expr) => self + .simple_nary_big_expr( + instantiated_module, + Bool.canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::Or { dest, lhs, rhs }], + ) + .into(), + ExprEnum::BitXorU(expr) => self + .simple_nary_big_expr( + instantiated_module, + expr.ty().canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::Xor { dest, lhs, rhs }], + ) + .into(), + ExprEnum::BitXorS(expr) => self + .simple_nary_big_expr( + instantiated_module, + expr.ty().canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::Xor { dest, lhs, rhs }], + ) + .into(), + ExprEnum::BitXorB(expr) => self + .simple_nary_big_expr( + instantiated_module, + Bool.canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::Xor { dest, lhs, rhs }], + ) + .into(), + ExprEnum::AddU(expr) => self + .simple_nary_big_expr( + instantiated_module, + expr.ty().canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::Add { dest, lhs, rhs }], + ) + .into(), + ExprEnum::AddS(expr) => self + .simple_nary_big_expr( + instantiated_module, + expr.ty().canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::Add { dest, lhs, rhs }], + ) + .into(), + ExprEnum::SubU(expr) => self + .simple_nary_big_expr( + instantiated_module, + expr.ty().canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| { + vec![Insn::SubU { + dest, + lhs, + rhs, + dest_width: expr.ty().width(), + }] + }, + ) + .into(), + ExprEnum::SubS(expr) => self + .simple_nary_big_expr( + instantiated_module, + expr.ty().canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::SubS { dest, lhs, rhs }], + ) + .into(), + ExprEnum::MulU(expr) => self + .simple_nary_big_expr( + instantiated_module, + expr.ty().canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::Mul { dest, lhs, rhs }], + ) + .into(), + ExprEnum::MulS(expr) => self + .simple_nary_big_expr( + instantiated_module, + expr.ty().canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::Mul { dest, lhs, rhs }], + ) + .into(), + ExprEnum::DivU(expr) => self + .simple_nary_big_expr( + instantiated_module, + expr.ty().canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::Div { dest, lhs, rhs }], + ) + .into(), + ExprEnum::DivS(expr) => self + .simple_nary_big_expr( + instantiated_module, + expr.ty().canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::Div { dest, lhs, rhs }], + ) + .into(), + ExprEnum::RemU(expr) => self + .simple_nary_big_expr( + instantiated_module, + expr.ty().canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::Rem { dest, lhs, rhs }], + ) + .into(), + ExprEnum::RemS(expr) => self + .simple_nary_big_expr( + instantiated_module, + expr.ty().canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::Rem { dest, lhs, rhs }], + ) + .into(), + ExprEnum::DynShlU(expr) => self + .simple_nary_big_expr( + instantiated_module, + expr.ty().canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::DynShl { dest, lhs, rhs }], + ) + .into(), + ExprEnum::DynShlS(expr) => self + .simple_nary_big_expr( + instantiated_module, + expr.ty().canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::DynShl { dest, lhs, rhs }], + ) + .into(), + ExprEnum::DynShrU(expr) => self + .simple_nary_big_expr( + instantiated_module, + expr.ty().canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::DynShr { dest, lhs, rhs }], + ) + .into(), + ExprEnum::DynShrS(expr) => self + .simple_nary_big_expr( + instantiated_module, + expr.ty().canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::DynShr { dest, lhs, rhs }], + ) + .into(), + ExprEnum::FixedShlU(expr) => self + .simple_nary_big_expr( + instantiated_module, + expr.ty().canonical(), + [Expr::canonical(expr.lhs())], + |dest, [lhs]| { + vec![Insn::Shl { + dest, + lhs, + rhs: expr.rhs(), + }] + }, + ) + .into(), + ExprEnum::FixedShlS(expr) => self + .simple_nary_big_expr( + instantiated_module, + expr.ty().canonical(), + [Expr::canonical(expr.lhs())], + |dest, [lhs]| { + vec![Insn::Shl { + dest, + lhs, + rhs: expr.rhs(), + }] + }, + ) + .into(), + ExprEnum::FixedShrU(expr) => self + .simple_nary_big_expr( + instantiated_module, + expr.ty().canonical(), + [Expr::canonical(expr.lhs())], + |dest, [lhs]| { + vec![Insn::Shr { + dest, + lhs, + rhs: expr.rhs(), + }] + }, + ) + .into(), + ExprEnum::FixedShrS(expr) => self + .simple_nary_big_expr( + instantiated_module, + expr.ty().canonical(), + [Expr::canonical(expr.lhs())], + |dest, [lhs]| { + vec![Insn::Shr { + dest, + lhs, + rhs: expr.rhs(), + }] + }, + ) + .into(), + ExprEnum::CmpLtB(expr) => self + .simple_nary_big_expr( + instantiated_module, + Bool.canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::CmpLt { dest, lhs, rhs }], + ) + .into(), + ExprEnum::CmpLeB(expr) => self + .simple_nary_big_expr( + instantiated_module, + Bool.canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::CmpLe { dest, lhs, rhs }], + ) + .into(), + ExprEnum::CmpGtB(expr) => self + .simple_nary_big_expr( + instantiated_module, + Bool.canonical(), + // swap both comparison direction and lhs/rhs + [Expr::canonical(expr.rhs()), Expr::canonical(expr.lhs())], + |dest, [lhs, rhs]| vec![Insn::CmpLt { dest, lhs, rhs }], + ) + .into(), + ExprEnum::CmpGeB(expr) => self + .simple_nary_big_expr( + instantiated_module, + Bool.canonical(), + // swap both comparison direction and lhs/rhs + [Expr::canonical(expr.rhs()), Expr::canonical(expr.lhs())], + |dest, [lhs, rhs]| vec![Insn::CmpLe { dest, lhs, rhs }], + ) + .into(), + ExprEnum::CmpEqB(expr) => self + .simple_nary_big_expr( + instantiated_module, + Bool.canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::CmpEq { dest, lhs, rhs }], + ) + .into(), + ExprEnum::CmpNeB(expr) => self + .simple_nary_big_expr( + instantiated_module, + Bool.canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::CmpNe { dest, lhs, rhs }], + ) + .into(), + ExprEnum::CmpLtU(expr) => self + .simple_nary_big_expr( + instantiated_module, + Bool.canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::CmpLt { dest, lhs, rhs }], + ) + .into(), + ExprEnum::CmpLeU(expr) => self + .simple_nary_big_expr( + instantiated_module, + Bool.canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::CmpLe { dest, lhs, rhs }], + ) + .into(), + ExprEnum::CmpGtU(expr) => self + .simple_nary_big_expr( + instantiated_module, + Bool.canonical(), + // swap both comparison direction and lhs/rhs + [Expr::canonical(expr.rhs()), Expr::canonical(expr.lhs())], + |dest, [lhs, rhs]| vec![Insn::CmpLt { dest, lhs, rhs }], + ) + .into(), + ExprEnum::CmpGeU(expr) => self + .simple_nary_big_expr( + instantiated_module, + Bool.canonical(), + // swap both comparison direction and lhs/rhs + [Expr::canonical(expr.rhs()), Expr::canonical(expr.lhs())], + |dest, [lhs, rhs]| vec![Insn::CmpLe { dest, lhs, rhs }], + ) + .into(), + ExprEnum::CmpEqU(expr) => self + .simple_nary_big_expr( + instantiated_module, + Bool.canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::CmpEq { dest, lhs, rhs }], + ) + .into(), + ExprEnum::CmpNeU(expr) => self + .simple_nary_big_expr( + instantiated_module, + Bool.canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::CmpNe { dest, lhs, rhs }], + ) + .into(), + ExprEnum::CmpLtS(expr) => self + .simple_nary_big_expr( + instantiated_module, + Bool.canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::CmpLt { dest, lhs, rhs }], + ) + .into(), + ExprEnum::CmpLeS(expr) => self + .simple_nary_big_expr( + instantiated_module, + Bool.canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::CmpLe { dest, lhs, rhs }], + ) + .into(), + ExprEnum::CmpGtS(expr) => self + .simple_nary_big_expr( + instantiated_module, + Bool.canonical(), + // swap both comparison direction and lhs/rhs + [Expr::canonical(expr.rhs()), Expr::canonical(expr.lhs())], + |dest, [lhs, rhs]| vec![Insn::CmpLt { dest, lhs, rhs }], + ) + .into(), + ExprEnum::CmpGeS(expr) => self + .simple_nary_big_expr( + instantiated_module, + Bool.canonical(), + // swap both comparison direction and lhs/rhs + [Expr::canonical(expr.rhs()), Expr::canonical(expr.lhs())], + |dest, [lhs, rhs]| vec![Insn::CmpLe { dest, lhs, rhs }], + ) + .into(), + ExprEnum::CmpEqS(expr) => self + .simple_nary_big_expr( + instantiated_module, + Bool.canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::CmpEq { dest, lhs, rhs }], + ) + .into(), + ExprEnum::CmpNeS(expr) => self + .simple_nary_big_expr( + instantiated_module, + Bool.canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::CmpNe { dest, lhs, rhs }], + ) + .into(), + ExprEnum::CastUIntToUInt(expr) => self + .simple_nary_big_expr( + instantiated_module, + expr.ty().canonical(), + [Expr::canonical(expr.arg())], + |dest, [src]| { + vec![Insn::CastToUInt { + dest, + src, + dest_width: expr.ty().width(), + }] + }, + ) + .into(), + ExprEnum::CastUIntToSInt(expr) => self + .simple_nary_big_expr( + instantiated_module, + expr.ty().canonical(), + [Expr::canonical(expr.arg())], + |dest, [src]| { + vec![Insn::CastToSInt { + dest, + src, + dest_width: expr.ty().width(), + }] + }, + ) + .into(), + ExprEnum::CastSIntToUInt(expr) => self + .simple_nary_big_expr( + instantiated_module, + expr.ty().canonical(), + [Expr::canonical(expr.arg())], + |dest, [src]| { + vec![Insn::CastToUInt { + dest, + src, + dest_width: expr.ty().width(), + }] + }, + ) + .into(), + ExprEnum::CastSIntToSInt(expr) => self + .simple_nary_big_expr( + instantiated_module, + expr.ty().canonical(), + [Expr::canonical(expr.arg())], + |dest, [src]| { + vec![Insn::CastToSInt { + dest, + src, + dest_width: expr.ty().width(), + }] + }, + ) + .into(), + ExprEnum::CastBoolToUInt(expr) => cast_bit(Expr::canonical(expr.arg())), + ExprEnum::CastBoolToSInt(expr) => cast_bit(Expr::canonical(expr.arg())), + ExprEnum::CastUIntToBool(expr) => cast_bit(Expr::canonical(expr.arg())), + ExprEnum::CastSIntToBool(expr) => cast_bit(Expr::canonical(expr.arg())), + ExprEnum::CastBoolToSyncReset(expr) => cast_bit(Expr::canonical(expr.arg())), + ExprEnum::CastUIntToSyncReset(expr) => cast_bit(Expr::canonical(expr.arg())), + ExprEnum::CastSIntToSyncReset(expr) => cast_bit(Expr::canonical(expr.arg())), + ExprEnum::CastBoolToAsyncReset(expr) => cast_bit(Expr::canonical(expr.arg())), + ExprEnum::CastUIntToAsyncReset(expr) => cast_bit(Expr::canonical(expr.arg())), + ExprEnum::CastSIntToAsyncReset(expr) => cast_bit(Expr::canonical(expr.arg())), + ExprEnum::CastSyncResetToBool(expr) => cast_bit(Expr::canonical(expr.arg())), + ExprEnum::CastSyncResetToUInt(expr) => cast_bit(Expr::canonical(expr.arg())), + ExprEnum::CastSyncResetToSInt(expr) => cast_bit(Expr::canonical(expr.arg())), + ExprEnum::CastSyncResetToReset(expr) => cast_bit(Expr::canonical(expr.arg())), + ExprEnum::CastAsyncResetToBool(expr) => cast_bit(Expr::canonical(expr.arg())), + ExprEnum::CastAsyncResetToUInt(expr) => cast_bit(Expr::canonical(expr.arg())), + ExprEnum::CastAsyncResetToSInt(expr) => cast_bit(Expr::canonical(expr.arg())), + ExprEnum::CastAsyncResetToReset(expr) => cast_bit(Expr::canonical(expr.arg())), + ExprEnum::CastResetToBool(expr) => cast_bit(Expr::canonical(expr.arg())), + ExprEnum::CastResetToUInt(expr) => cast_bit(Expr::canonical(expr.arg())), + ExprEnum::CastResetToSInt(expr) => cast_bit(Expr::canonical(expr.arg())), + ExprEnum::CastBoolToClock(expr) => cast_bit(Expr::canonical(expr.arg())), + ExprEnum::CastUIntToClock(expr) => cast_bit(Expr::canonical(expr.arg())), + ExprEnum::CastSIntToClock(expr) => cast_bit(Expr::canonical(expr.arg())), + ExprEnum::CastClockToBool(expr) => cast_bit(Expr::canonical(expr.arg())), + ExprEnum::CastClockToUInt(expr) => cast_bit(Expr::canonical(expr.arg())), + ExprEnum::CastClockToSInt(expr) => cast_bit(Expr::canonical(expr.arg())), + ExprEnum::FieldAccess(expr) => self + .compile_expr(instantiated_module, Expr::canonical(expr.base())) + .map_ty(Bundle::from_canonical) + .field_by_index(expr.field_index()), + ExprEnum::VariantAccess(variant_access) => { + let start = Expr::ty(variant_access.base()).discriminant_bit_width(); + let len = Expr::ty(expr).bit_width(); + self.compile_expr( + instantiated_module, + variant_access.base().cast_to_bits()[start..start + len] + .cast_bits_to(Expr::ty(expr)), + ) + } + ExprEnum::ArrayIndex(expr) => self + .compile_expr(instantiated_module, Expr::canonical(expr.base())) + .map_ty(Array::from_canonical) + .element(expr.element_index()), + ExprEnum::DynArrayIndex(expr) => { + let element_index = + self.compile_expr(instantiated_module, Expr::canonical(expr.element_index())); + let element_index = self.compiled_expr_to_value( + element_index, + instantiated_module.leaf_module().source_location(), + ); + let index_slot = self.compiled_value_to_dyn_array_index( + element_index.map_ty(UInt::from_canonical), + instantiated_module.leaf_module().source_location(), + ); + self.compile_expr(instantiated_module, Expr::canonical(expr.base())) + .map_ty(Array::from_canonical) + .element_dyn(index_slot) + } + ExprEnum::ReduceBitAndU(expr) => if Expr::ty(expr.arg()).width() == 0 { + self.compile_expr(instantiated_module, Expr::canonical(true.to_expr())) + } else { + self.compile_expr( + instantiated_module, + Expr::canonical( + expr.arg() + .cmp_eq(Expr::ty(expr.arg()).from_int_wrapping(-1)), + ), + ) + } + .into(), + ExprEnum::ReduceBitAndS(expr) => if Expr::ty(expr.arg()).width() == 0 { + self.compile_expr(instantiated_module, Expr::canonical(true.to_expr())) + } else { + self.compile_expr( + instantiated_module, + Expr::canonical( + expr.arg() + .cmp_eq(Expr::ty(expr.arg()).from_int_wrapping(-1)), + ), + ) + } + .into(), + ExprEnum::ReduceBitOrU(expr) => if Expr::ty(expr.arg()).width() == 0 { + self.compile_expr(instantiated_module, Expr::canonical(false.to_expr())) + } else { + self.compile_expr( + instantiated_module, + Expr::canonical(expr.arg().cmp_ne(Expr::ty(expr.arg()).from_int_wrapping(0))), + ) + } + .into(), + ExprEnum::ReduceBitOrS(expr) => if Expr::ty(expr.arg()).width() == 0 { + self.compile_expr(instantiated_module, Expr::canonical(false.to_expr())) + } else { + self.compile_expr( + instantiated_module, + Expr::canonical(expr.arg().cmp_ne(Expr::ty(expr.arg()).from_int_wrapping(0))), + ) + } + .into(), + ExprEnum::ReduceBitXorU(expr) => self + .simple_nary_big_expr( + instantiated_module, + UInt::<1>::TYPE.canonical(), + [Expr::canonical(expr.arg())], + |dest, [src]| { + vec![Insn::ReduceBitXor { + dest, + src, + input_width: Expr::ty(expr.arg()).width(), + }] + }, + ) + .into(), + ExprEnum::ReduceBitXorS(expr) => self + .simple_nary_big_expr( + instantiated_module, + UInt::<1>::TYPE.canonical(), + [Expr::canonical(expr.arg())], + |dest, [src]| { + vec![Insn::ReduceBitXor { + dest, + src, + input_width: Expr::ty(expr.arg()).width(), + }] + }, + ) + .into(), + ExprEnum::SliceUInt(expr) => self + .simple_nary_big_expr( + instantiated_module, + UInt::new_dyn(expr.range().len()).canonical(), + [Expr::canonical(expr.base())], + |dest, [src]| { + vec![Insn::SliceInt { + dest, + src, + start: expr.range().start, + len: expr.range().len(), + }] + }, + ) + .into(), + ExprEnum::SliceSInt(expr) => self + .simple_nary_big_expr( + instantiated_module, + UInt::new_dyn(expr.range().len()).canonical(), + [Expr::canonical(expr.base())], + |dest, [src]| { + vec![Insn::SliceInt { + dest, + src, + start: expr.range().start, + len: expr.range().len(), + }] + }, + ) + .into(), + ExprEnum::CastToBits(expr) => self + .compile_cast_to_bits(instantiated_module, expr) + .map_ty(CanonicalType::UInt) + .into(), + ExprEnum::CastBitsTo(expr) => { + self.compile_cast_bits_to(instantiated_module, expr).into() + } + ExprEnum::ModuleIO(expr) => self + .compile_value(TargetInInstantiatedModule { + instantiated_module, + target: expr.into(), + }) + .into(), + ExprEnum::Instance(expr) => self + .compile_value(TargetInInstantiatedModule { + instantiated_module, + target: expr.into(), + }) + .into(), + ExprEnum::Wire(expr) => self + .compile_value(TargetInInstantiatedModule { + instantiated_module, + target: expr.into(), + }) + .into(), + ExprEnum::Reg(expr) => self + .compile_value(TargetInInstantiatedModule { + instantiated_module, + target: expr.into(), + }) + .into(), + ExprEnum::RegSync(expr) => self + .compile_value(TargetInInstantiatedModule { + instantiated_module, + target: expr.into(), + }) + .into(), + ExprEnum::RegAsync(expr) => self + .compile_value(TargetInInstantiatedModule { + instantiated_module, + target: expr.into(), + }) + .into(), + ExprEnum::MemPort(expr) => self + .compile_value(TargetInInstantiatedModule { + instantiated_module, + target: expr.into(), + }) + .into(), + }; + self.compiled_exprs.insert(expr, retval); + retval + } + fn compile_simple_connect( + &mut self, + conditions: Interned<[Cond]>, + lhs: CompiledExpr, + rhs: CompiledValue, + source_location: SourceLocation, + ) { + let CompiledExpr { + static_part: lhs_static_part, + indexes, + } = lhs; + let (lhs_layout, lhs_range) = lhs_static_part.write(); + assert!( + lhs_layout.ty.is_passive(), + "invalid expression passed to compile_simple_connect -- type must be passive", + ); + let TypeIndexRange { + small_slots, + big_slots, + } = lhs_range; + self.add_assignment( + conditions, + small_slots + .iter() + .zip(rhs.range.small_slots.iter()) + .map(|(base, src)| { + if indexes.small_slots.is_empty() { + Insn::CopySmall { dest: base, src } + } else { + Insn::WriteSmallIndexed { + dest: StatePartArrayIndexed { + base, + indexes: indexes.small_slots, + }, + src, + } + } + }) + .chain( + big_slots + .iter() + .zip(rhs.range.big_slots.iter()) + .map(|(base, src)| { + if indexes.big_slots.is_empty() { + Insn::Copy { dest: base, src } + } else { + Insn::WriteIndexed { + dest: StatePartArrayIndexed { + base, + indexes: indexes.big_slots, + }, + src, + } + } + }), + ), + source_location, + ); + } + fn compile_connect( + &mut self, + lhs_instantiated_module: InstantiatedModule, + lhs_conditions: Interned<[Cond]>, + lhs: Expr, + rhs_instantiated_module: InstantiatedModule, + rhs_conditions: Interned<[Cond]>, + mut rhs: Expr, + source_location: SourceLocation, + ) { + if Expr::ty(lhs) != Expr::ty(rhs) || !Expr::ty(lhs).is_passive() { + match Expr::ty(lhs) { + CanonicalType::UInt(lhs_ty) => { + rhs = Expr::canonical(Expr::::from_canonical(rhs).cast_to(lhs_ty)); + } + CanonicalType::SInt(lhs_ty) => { + rhs = Expr::canonical(Expr::::from_canonical(rhs).cast_to(lhs_ty)); + } + CanonicalType::Bool(_) => unreachable!(), + CanonicalType::Array(lhs_ty) => { + let CanonicalType::Array(rhs_ty) = Expr::ty(rhs) else { + unreachable!(); + }; + assert_eq!(lhs_ty.len(), rhs_ty.len()); + let lhs = Expr::::from_canonical(lhs); + let rhs = Expr::::from_canonical(rhs); + for index in 0..lhs_ty.len() { + self.compile_connect( + lhs_instantiated_module, + lhs_conditions, + lhs[index], + rhs_instantiated_module, + rhs_conditions, + rhs[index], + source_location, + ); + } + return; + } + CanonicalType::Enum(lhs_ty) => { + let CanonicalType::Enum(rhs_ty) = Expr::ty(rhs) else { + unreachable!(); + }; + todo!("handle connect with different enum types"); + } + CanonicalType::Bundle(lhs_ty) => { + let CanonicalType::Bundle(rhs_ty) = Expr::ty(rhs) else { + unreachable!(); + }; + assert_eq!(lhs_ty.fields().len(), rhs_ty.fields().len()); + let lhs = Expr::::from_canonical(lhs); + let rhs = Expr::::from_canonical(rhs); + for ( + field_index, + ( + BundleField { + name, + flipped, + ty: _, + }, + rhs_field, + ), + ) in lhs_ty.fields().into_iter().zip(rhs_ty.fields()).enumerate() + { + assert_eq!(name, rhs_field.name); + assert_eq!(flipped, rhs_field.flipped); + let lhs_expr = ops::FieldAccess::new_by_index(lhs, field_index).to_expr(); + let rhs_expr = ops::FieldAccess::new_by_index(rhs, field_index).to_expr(); + if flipped { + // swap lhs/rhs + self.compile_connect( + rhs_instantiated_module, + rhs_conditions, + rhs_expr, + lhs_instantiated_module, + lhs_conditions, + lhs_expr, + source_location, + ); + } else { + self.compile_connect( + lhs_instantiated_module, + lhs_conditions, + lhs_expr, + rhs_instantiated_module, + rhs_conditions, + rhs_expr, + source_location, + ); + } + } + return; + } + CanonicalType::AsyncReset(_) => unreachable!(), + CanonicalType::SyncReset(_) => unreachable!(), + CanonicalType::Reset(_) => unreachable!(), + CanonicalType::Clock(_) => unreachable!(), + CanonicalType::PhantomConst(_) => unreachable!("PhantomConst mismatch"), + } + } + let Some(target) = lhs.target() else { + unreachable!("connect lhs must have target"); + }; + let lhs_decl_conditions = self.decl_conditions[&TargetInInstantiatedModule { + instantiated_module: lhs_instantiated_module, + target: target.base().into(), + }]; + let lhs = self.compile_expr(lhs_instantiated_module, lhs); + let rhs = self.compile_expr(rhs_instantiated_module, rhs); + let rhs = self.compiled_expr_to_value(rhs, source_location); + self.compile_simple_connect( + lhs_conditions[lhs_decl_conditions.len()..].intern(), + lhs, + rhs, + source_location, + ); + } + fn compile_clock( + &mut self, + clk: CompiledValue, + source_location: SourceLocation, + ) -> ClockTrigger { + if let Some(&retval) = self.compiled_value_to_clock_trigger_map.get(&clk) { + return retval; + } + let mut alloc_small_slot = |part_name: &str| { + self.insns + .state_layout + .ty + .small_slots + .allocate(&StatePartLayout::scalar( + SlotDebugData { + name: Interned::default(), + ty: Bool.canonical(), + }, + (), + )) + .start + }; + let last_clk_was_low = alloc_small_slot("last_clk_was_low"); + let clk_triggered = alloc_small_slot("clk_triggered"); + let retval = ClockTrigger { + last_clk_was_low, + clk: self.compiled_value_bool_dest_is_small( + clk.map_ty(CanonicalType::Clock), + source_location, + ), + clk_triggered, + source_location, + }; + self.add_assignment( + Interned::default(), + [Insn::AndSmall { + dest: clk_triggered, + lhs: retval.clk, + rhs: last_clk_was_low, + }], + source_location, + ); + self.clock_triggers.push(retval); + self.compiled_value_to_clock_trigger_map.insert(clk, retval); + retval + } + fn compile_enum_discriminant( + &mut self, + enum_value: CompiledValue, + source_location: SourceLocation, + ) -> StatePartIndex { + if let Some(&retval) = self.enum_discriminants.get(&enum_value) { + return retval; + } + let retval_ty = Enum::new( + enum_value + .layout + .ty + .variants() + .iter() + .map(|variant| EnumVariant { + name: variant.name, + ty: None, + }) + .collect(), + ); + let retval = if retval_ty == enum_value.layout.ty + && enum_value.range.len() == TypeLen::A_SMALL_SLOT + { + enum_value.range.small_slots.start + } else { + let retval = self + .insns + .state_layout + .ty + .small_slots + .allocate(&StatePartLayout::scalar( + SlotDebugData { + name: Interned::default(), + ty: retval_ty.canonical(), + }, + (), + )) + .start; + let discriminant_bit_width = enum_value.layout.ty.discriminant_bit_width(); + let discriminant_mask = !(!0u64 << discriminant_bit_width); + let insn = match enum_value.range.len() { + TypeLen::A_BIG_SLOT => Insn::AndBigWithSmallImmediate { + dest: retval, + lhs: enum_value.range.big_slots.start, + rhs: discriminant_mask, + }, + TypeLen::A_SMALL_SLOT => { + if discriminant_bit_width == enum_value.layout.ty.type_properties().bit_width { + Insn::CopySmall { + dest: retval, + src: enum_value.range.small_slots.start, + } + } else { + Insn::AndSmallImmediate { + dest: retval, + lhs: enum_value.range.small_slots.start, + rhs: discriminant_mask, + } + } + } + _ => unreachable!(), + }; + self.add_assignment(Interned::default(), [insn], source_location); + retval + }; + self.enum_discriminants.insert(enum_value, retval); + retval + } + fn compile_stmt_reg( + &mut self, + stmt_reg: StmtReg, + instantiated_module: InstantiatedModule, + value: CompiledValue, + ) { + let StmtReg { annotations, reg } = stmt_reg; + let clk = self.compile_expr(instantiated_module, Expr::canonical(reg.clock_domain().clk)); + let clk = self + .compiled_expr_to_value(clk, reg.source_location()) + .map_ty(Clock::from_canonical); + let clk = self.compile_clock(clk, reg.source_location()); + struct Dispatch; + impl ResetTypeDispatch for Dispatch { + type Input = (); + + type Output = bool; + + fn reset(self, _input: Self::Input) -> Self::Output { + unreachable!() + } + + fn sync_reset(self, _input: Self::Input) -> Self::Output { + false + } + + fn async_reset(self, _input: Self::Input) -> Self::Output { + true + } + } + let reset = if let Some(init) = reg.init() { + let init = self.compile_expr(instantiated_module, init); + let init = self.compiled_expr_to_value(init, reg.source_location()); + let rst = + self.compile_expr(instantiated_module, Expr::canonical(reg.clock_domain().rst)); + let rst = self.compiled_expr_to_value(rst, reg.source_location()); + let rst = self.compiled_value_bool_dest_is_small(rst, reg.source_location()); + let is_async = R::dispatch((), Dispatch); + if is_async { + let cond = Expr::canonical(reg.clock_domain().rst.cast_to(Bool)); + let cond = self.compile_expr(instantiated_module, cond); + let cond = self.compiled_expr_to_value(cond, reg.source_location()); + let cond = cond.map_ty(Bool::from_canonical); + // write to the register's current value since asynchronous reset is combinational + let lhs = CompiledValue { + layout: value.layout, + range: value.range, + write: None, + } + .into(); + self.compile_simple_connect( + [Cond { + body: CondBody::IfTrue { cond }, + source_location: reg.source_location(), + }][..] + .intern(), + lhs, + init, + reg.source_location(), + ); + } + Some(RegisterReset { + is_async, + init, + rst, + }) + } else { + None + }; + self.registers.push(Register { + value, + clk_triggered: clk.clk_triggered, + reset, + source_location: reg.source_location(), + }); + } + fn compile_declaration( + &mut self, + declaration: StmtDeclaration, + parent_module: Interned, + conditions: Interned<[Cond]>, + ) -> TraceDecl { + let target_base: TargetBase = match &declaration { + StmtDeclaration::Wire(v) => v.wire.into(), + StmtDeclaration::Reg(v) => v.reg.into(), + StmtDeclaration::RegSync(v) => v.reg.into(), + StmtDeclaration::RegAsync(v) => v.reg.into(), + StmtDeclaration::Instance(v) => v.instance.into(), + }; + let target = TargetInInstantiatedModule { + instantiated_module: *parent_module, + target: target_base.into(), + }; + self.decl_conditions.insert(target, conditions); + let compiled_value = self.compile_value(target); + match declaration { + StmtDeclaration::Wire(StmtWire { annotations, wire }) => {} + StmtDeclaration::Reg(_) => { + unreachable!("Reset types were already replaced by SyncReset or AsyncReset"); + } + StmtDeclaration::RegSync(stmt_reg) => { + self.compile_stmt_reg(stmt_reg, *parent_module, compiled_value) + } + StmtDeclaration::RegAsync(stmt_reg) => { + self.compile_stmt_reg(stmt_reg, *parent_module, compiled_value) + } + StmtDeclaration::Instance(StmtInstance { + annotations, + instance, + }) => { + let inner_instantiated_module = InstantiatedModule::Child { + parent: parent_module, + instance: instance.intern_sized(), + } + .intern_sized(); + let instance_expr = instance.to_expr(); + self.compile_module(inner_instantiated_module); + for (field_index, module_io) in + instance.instantiated().module_io().into_iter().enumerate() + { + let instance_field = + ops::FieldAccess::new_by_index(instance_expr, field_index).to_expr(); + match Expr::flow(instance_field) { + Flow::Source => { + // we need to supply the value to the instance since the + // parent module expects to read from the instance + self.compile_connect( + *parent_module, + conditions, + instance_field, + *inner_instantiated_module, + Interned::default(), + module_io.module_io.to_expr(), + instance.source_location(), + ); + } + Flow::Sink => { + // we need to take the value from the instance since the + // parent module expects to write to the instance + self.compile_connect( + *inner_instantiated_module, + Interned::default(), + module_io.module_io.to_expr(), + *parent_module, + conditions, + instance_field, + instance.source_location(), + ); + } + Flow::Duplex => unreachable!(), + } + } + } + } + self.make_trace_decl(*parent_module, target_base) + } + fn allocate_delay_chain( + &mut self, + len: usize, + layout: &TypeLayout, + first: Option, + last: Option, + mut from_allocation: impl FnMut(TypeIndexRange) -> T, + ) -> Vec { + match (len, first, last) { + (0, _, _) => Vec::new(), + (1, Some(v), _) | (1, None, Some(v)) => vec![v], + (2, Some(first), Some(last)) => vec![first, last], + (len, first, last) => { + let inner_len = len - first.is_some() as usize - last.is_some() as usize; + first + .into_iter() + .chain( + (0..inner_len) + .map(|_| from_allocation(self.insns.allocate_variable(layout))), + ) + .chain(last) + .collect() + } + } + } + fn allocate_delay_chain_small( + &mut self, + len: usize, + ty: CanonicalType, + first: Option>, + last: Option>, + ) -> Vec> { + self.allocate_delay_chain( + len, + &TypeLayout { + small_slots: StatePartLayout::scalar( + SlotDebugData { + name: Interned::default(), + ty, + }, + (), + ), + big_slots: StatePartLayout::empty(), + }, + first, + last, + |range| range.small_slots.start, + ) + } + fn compile_memory_port_rw_helper( + &mut self, + memory: StatePartIndex, + stride: usize, + mut start: usize, + data_layout: CompiledTypeLayout, + mask_layout: CompiledTypeLayout, + mut read: Option>, + mut write: Option>, + ) { + match data_layout.body { + CompiledTypeLayoutBody::Scalar => { + let CompiledTypeLayoutBody::Scalar = mask_layout.body else { + unreachable!(); + }; + let signed = match data_layout.ty { + CanonicalType::UInt(_) => false, + CanonicalType::SInt(_) => true, + CanonicalType::Bool(_) => false, + CanonicalType::Array(_) => unreachable!(), + CanonicalType::Enum(_) => false, + CanonicalType::Bundle(_) => unreachable!(), + CanonicalType::AsyncReset(_) => false, + CanonicalType::SyncReset(_) => false, + CanonicalType::Reset(_) => false, + CanonicalType::Clock(_) => false, + CanonicalType::PhantomConst(_) => unreachable!(), + }; + let width = data_layout.ty.bit_width(); + if let Some(MemoryPortReadInsns { + addr, + en: _, + write_mode: _, + data, + insns, + }) = read + { + insns.push( + match data.len() { + TypeLen::A_BIG_SLOT => { + let dest = data.big_slots.start; + if signed { + Insn::MemoryReadSInt { + dest, + memory, + addr, + stride, + start, + width, + } + } else { + Insn::MemoryReadUInt { + dest, + memory, + addr, + stride, + start, + width, + } + } + } + TypeLen::A_SMALL_SLOT => { + let _dest = data.small_slots.start; + todo!("memory ports' data are always big for now"); + } + _ => unreachable!(), + } + .into(), + ); + } + if let Some(MemoryPortWriteInsns { + addr, + en: _, + write_mode: _, + data, + mask, + insns, + }) = write + { + let end_label = self.insns.new_label(); + insns.push( + match mask.len() { + TypeLen::A_BIG_SLOT => Insn::BranchIfZero { + target: end_label.0, + value: mask.big_slots.start, + }, + TypeLen::A_SMALL_SLOT => Insn::BranchIfSmallZero { + target: end_label.0, + value: mask.small_slots.start, + }, + _ => unreachable!(), + } + .into(), + ); + insns.push( + match data.len() { + TypeLen::A_BIG_SLOT => { + let value = data.big_slots.start; + if signed { + Insn::MemoryWriteSInt { + value, + memory, + addr, + stride, + start, + width, + } + } else { + Insn::MemoryWriteUInt { + value, + memory, + addr, + stride, + start, + width, + } + } + } + TypeLen::A_SMALL_SLOT => { + let _value = data.small_slots.start; + todo!("memory ports' data are always big for now"); + } + _ => unreachable!(), + } + .into(), + ); + insns.push(end_label.into()); + } + } + CompiledTypeLayoutBody::Array { element } => { + let CompiledTypeLayoutBody::Array { + element: mask_element, + } = mask_layout.body + else { + unreachable!(); + }; + let ty = ::from_canonical(data_layout.ty); + let element_bit_width = ty.element().bit_width(); + let element_size = element.layout.len(); + let mask_element_size = mask_element.layout.len(); + for element_index in 0..ty.len() { + self.compile_memory_port_rw_helper( + memory, + stride, + start, + *element, + *mask_element, + read.as_mut().map( + |MemoryPortReadInsns { + addr, + en, + write_mode, + data, + insns, + }| MemoryPortReadInsns { + addr: *addr, + en: *en, + write_mode: *write_mode, + data: data.index_array(element_size, element_index), + insns, + }, + ), + write.as_mut().map( + |MemoryPortWriteInsns { + addr, + en, + write_mode, + data, + mask, + insns, + }| { + MemoryPortWriteInsns { + addr: *addr, + en: *en, + write_mode: *write_mode, + data: data.index_array(element_size, element_index), + mask: mask.index_array(mask_element_size, element_index), + insns, + } + }, + ), + ); + start += element_bit_width; + } + } + CompiledTypeLayoutBody::Bundle { fields } => { + let CompiledTypeLayoutBody::Bundle { + fields: mask_fields, + } = mask_layout.body + else { + unreachable!(); + }; + assert_eq!(fields.len(), mask_fields.len()); + for (field, mask_field) in fields.into_iter().zip(mask_fields) { + let field_index_range = + TypeIndexRange::new(field.offset, field.ty.layout.len()); + let mask_field_index_range = + TypeIndexRange::new(mask_field.offset, mask_field.ty.layout.len()); + self.compile_memory_port_rw_helper( + memory, + stride, + start, + field.ty, + mask_field.ty, + read.as_mut().map( + |MemoryPortReadInsns { + addr, + en, + write_mode, + data, + insns, + }| MemoryPortReadInsns { + addr: *addr, + en: *en, + write_mode: *write_mode, + data: data.slice(field_index_range), + insns, + }, + ), + write.as_mut().map( + |MemoryPortWriteInsns { + addr, + en, + write_mode, + data, + mask, + insns, + }| { + MemoryPortWriteInsns { + addr: *addr, + en: *en, + write_mode: *write_mode, + data: data.slice(field_index_range), + mask: mask.slice(mask_field_index_range), + insns, + } + }, + ), + ); + start = start + field.ty.ty.bit_width(); + } + } + } + } + fn compile_memory_port_rw( + &mut self, + memory: StatePartIndex, + data_layout: CompiledTypeLayout, + mask_layout: CompiledTypeLayout, + mut read: Option>, + mut write: Option>, + ) { + let read_else_label = read.as_mut().map( + |MemoryPortReadInsns { + addr: _, + en, + write_mode, + data: _, + insns, + }| { + let else_label = self.insns.new_label(); + insns.push( + Insn::BranchIfSmallZero { + target: else_label.0, + value: *en, + } + .into(), + ); + if let Some(write_mode) = *write_mode { + insns.push( + Insn::BranchIfSmallNonZero { + target: else_label.0, + value: write_mode, + } + .into(), + ); + } + else_label + }, + ); + let write_end_label = write.as_mut().map( + |MemoryPortWriteInsns { + addr: _, + en, + write_mode, + data: _, + mask: _, + insns, + }| { + let end_label = self.insns.new_label(); + insns.push( + Insn::BranchIfSmallZero { + target: end_label.0, + value: *en, + } + .into(), + ); + if let Some(write_mode) = *write_mode { + insns.push( + Insn::BranchIfSmallZero { + target: end_label.0, + value: write_mode, + } + .into(), + ); + } + end_label + }, + ); + self.compile_memory_port_rw_helper( + memory, + data_layout.ty.bit_width(), + 0, + data_layout, + mask_layout, + read.as_mut().map( + |MemoryPortReadInsns { + addr, + en, + write_mode, + data, + insns, + }| MemoryPortReadInsns { + addr: *addr, + en: *en, + write_mode: *write_mode, + data: *data, + insns: *insns, + }, + ), + write.as_mut().map( + |MemoryPortWriteInsns { + addr, + en, + write_mode, + data, + mask, + insns, + }| MemoryPortWriteInsns { + addr: *addr, + en: *en, + write_mode: *write_mode, + data: *data, + mask: *mask, + insns: *insns, + }, + ), + ); + if let ( + Some(else_label), + Some(MemoryPortReadInsns { + addr: _, + en: _, + write_mode: _, + data, + insns, + }), + ) = (read_else_label, read) + { + let end_label = self.insns.new_label(); + insns.push( + Insn::Branch { + target: end_label.0, + } + .into(), + ); + insns.push(else_label.into()); + let TypeIndexRange { + small_slots, + big_slots, + } = data; + for dest in small_slots.iter() { + insns.push(Insn::ConstSmall { dest, value: 0 }.into()); + } + for dest in big_slots.iter() { + insns.push( + Insn::Const { + dest, + value: BigInt::ZERO.intern_sized(), + } + .into(), + ); + } + insns.push(end_label.into()); + } + if let (Some(end_label), Some(write)) = (write_end_label, write) { + write.insns.push(end_label.into()); + } + } + fn compile_memory( + &mut self, + mem: Mem, + instantiated_module: InstantiatedModule, + conditions: Interned<[Cond]>, + trace_decls: &mut Vec, + ) { + let data_layout = CompiledTypeLayout::get(mem.array_type().element()); + let mask_layout = CompiledTypeLayout::get(mem.array_type().element().mask_type()); + let read_latency_plus_1 = mem + .read_latency() + .checked_add(1) + .expect("read latency too big"); + let write_latency_plus_1 = mem + .write_latency() + .get() + .checked_add(1) + .expect("write latency too big"); + let read_cycle = match mem.read_under_write() { + ReadUnderWrite::Old => 0, + ReadUnderWrite::New => mem.read_latency(), + ReadUnderWrite::Undefined => mem.read_latency() / 2, // something other than Old or New + }; + let memory = self + .insns + .state_layout + .memories + .allocate(&StatePartLayout::scalar( + (), + MemoryData { + array_type: mem.array_type(), + data: mem.initial_value().unwrap_or_else(|| { + Intern::intern_owned(BitVec::repeat( + false, + mem.array_type().type_properties().bit_width, + )) + }), + }, + )) + .start; + let (ports, trace_ports) = mem + .ports() + .iter() + .map(|&port| { + let target_base = TargetBase::MemPort(port); + let target = TargetInInstantiatedModule { + instantiated_module, + target: target_base.into(), + }; + self.decl_conditions.insert(target, conditions); + let TraceDecl::Scope(TraceScope::MemPort(trace_port)) = + self.make_trace_decl(instantiated_module, target_base) + else { + unreachable!(); + }; + let clk = Expr::field(port.to_expr(), "clk"); + let clk = self.compile_expr(instantiated_module, clk); + let clk = self.compiled_expr_to_value(clk, mem.source_location()); + let clk_triggered = self + .compile_clock(clk.map_ty(Clock::from_canonical), mem.source_location()) + .clk_triggered; + let en = Expr::field(port.to_expr(), "en"); + let en = self.compile_expr(instantiated_module, en); + let en = self.compiled_expr_to_value(en, mem.source_location()); + let en = self.compiled_value_bool_dest_is_small(en, mem.source_location()); + let addr = Expr::field(port.to_expr(), "addr"); + let addr = self.compile_expr(instantiated_module, addr); + let addr = self.compiled_expr_to_value(addr, mem.source_location()); + let addr_ty = addr.layout.ty; + let addr = self.compiled_value_to_dyn_array_index( + addr.map_ty(UInt::from_canonical), + mem.source_location(), + ); + let read_data = port.port_kind().rdata_name().map(|name| { + let read_data = + self.compile_expr(instantiated_module, Expr::field(port.to_expr(), name)); + let read_data = self.compiled_expr_to_value(read_data, mem.source_location()); + read_data.range + }); + let write_data = port.port_kind().wdata_name().map(|name| { + let write_data = + self.compile_expr(instantiated_module, Expr::field(port.to_expr(), name)); + let write_data = self.compiled_expr_to_value(write_data, mem.source_location()); + write_data.range + }); + let write_mask = port.port_kind().wmask_name().map(|name| { + let write_mask = + self.compile_expr(instantiated_module, Expr::field(port.to_expr(), name)); + let write_mask = self.compiled_expr_to_value(write_mask, mem.source_location()); + write_mask.range + }); + let write_mode = port.port_kind().wmode_name().map(|name| { + let write_mode = + self.compile_expr(instantiated_module, Expr::field(port.to_expr(), name)); + let write_mode = self.compiled_expr_to_value(write_mode, mem.source_location()); + self.compiled_value_bool_dest_is_small(write_mode, mem.source_location()) + }); + struct PortParts { + en_delayed_len: usize, + addr_delayed_len: usize, + read_data_delayed_len: usize, + write_data_delayed_len: usize, + write_mask_delayed_len: usize, + write_mode_delayed_len: usize, + read_cycle: Option, + write_cycle: Option, + } + let PortParts { + en_delayed_len, + addr_delayed_len, + read_data_delayed_len, + write_data_delayed_len, + write_mask_delayed_len, + write_mode_delayed_len, + read_cycle, + write_cycle, + } = match port.port_kind() { + PortKind::ReadOnly => PortParts { + en_delayed_len: read_cycle + 1, + addr_delayed_len: read_cycle + 1, + read_data_delayed_len: read_latency_plus_1 - read_cycle, + write_data_delayed_len: 0, + write_mask_delayed_len: 0, + write_mode_delayed_len: 0, + read_cycle: Some(read_cycle), + write_cycle: None, + }, + PortKind::WriteOnly => PortParts { + en_delayed_len: write_latency_plus_1, + addr_delayed_len: write_latency_plus_1, + read_data_delayed_len: 0, + write_data_delayed_len: write_latency_plus_1, + write_mask_delayed_len: write_latency_plus_1, + write_mode_delayed_len: 0, + read_cycle: None, + write_cycle: Some(mem.write_latency().get()), + }, + PortKind::ReadWrite => { + let can_rw_at_end = match mem.read_under_write() { + ReadUnderWrite::Old => false, + ReadUnderWrite::New | ReadUnderWrite::Undefined => true, + }; + let latency_plus_1 = read_latency_plus_1; + if latency_plus_1 != write_latency_plus_1 || !can_rw_at_end { + todo!( + "not sure what to do, issue: \ + https://github.com/chipsalliance/firrtl-spec/issues/263" + ); + } + PortParts { + en_delayed_len: latency_plus_1, + addr_delayed_len: latency_plus_1, + read_data_delayed_len: 1, + write_data_delayed_len: latency_plus_1, + write_mask_delayed_len: latency_plus_1, + write_mode_delayed_len: latency_plus_1, + read_cycle: Some(latency_plus_1 - 1), + write_cycle: Some(latency_plus_1 - 1), + } + } + }; + let addr_delayed = self.allocate_delay_chain_small( + addr_delayed_len, + addr_ty.canonical(), + Some(addr), + None, + ); + let en_delayed = self.allocate_delay_chain_small( + en_delayed_len, + Bool.canonical(), + Some(en), + None, + ); + let read_data_delayed = self.allocate_delay_chain( + read_data_delayed_len, + &data_layout.layout, + None, + read_data, + |v| v, + ); + let write_data_delayed = self.allocate_delay_chain( + write_data_delayed_len, + &data_layout.layout, + write_data, + None, + |v| v, + ); + let write_mask_delayed = self.allocate_delay_chain( + write_mask_delayed_len, + &mask_layout.layout, + write_mask, + None, + |v| v, + ); + let write_mode_delayed = self.allocate_delay_chain_small( + write_mode_delayed_len, + Bool.canonical(), + write_mode, + None, + ); + let mut read_insns = Vec::new(); + let mut write_insns = Vec::new(); + self.compile_memory_port_rw( + memory, + data_layout, + mask_layout, + read_cycle.map(|read_cycle| MemoryPortReadInsns { + addr: addr_delayed[read_cycle], + en: en_delayed[read_cycle], + write_mode: write_mode_delayed.get(read_cycle).copied(), + data: read_data_delayed[0], + insns: &mut read_insns, + }), + write_cycle.map(|write_cycle| MemoryPortWriteInsns { + addr: addr_delayed[write_cycle], + en: en_delayed[write_cycle], + write_mode: write_mode_delayed.get(write_cycle).copied(), + data: write_data_delayed[write_cycle], + mask: write_mask_delayed[write_cycle], + insns: &mut write_insns, + }), + ); + self.add_assignment(Interned::default(), read_insns, mem.source_location()); + ( + MemoryPort { + clk_triggered, + addr_delayed, + en_delayed, + data_layout, + read_data_delayed, + write_data_delayed, + write_mask_delayed, + write_mode_delayed, + write_insns, + }, + trace_port, + ) + }) + .unzip(); + let name = mem.scoped_name().1.0; + let id = TraceMemoryId(self.memories.len()); + let stride = mem.array_type().element().bit_width(); + let trace = TraceMem { + id, + name, + stride, + element_type: self + .make_trace_decl_child( + instantiated_module, + MakeTraceDeclTarget::Memory { + id, + depth: mem.array_type().len(), + stride, + start: 0, + ty: mem.array_type().element(), + }, + name, + mem.source_location(), + ) + .intern_sized(), + ports: Intern::intern_owned(trace_ports), + array_type: mem.array_type(), + }; + trace_decls.push(trace.into()); + self.memories.push(Memory { + mem, + memory, + trace, + ports, + }); + } + fn compile_block( + &mut self, + parent_module: Interned, + block: Block, + conditions: Interned<[Cond]>, + trace_decls: &mut Vec, + ) { + let Block { memories, stmts } = block; + for memory in memories { + self.compile_memory(memory, *parent_module, conditions, trace_decls); + } + for stmt in stmts { + match stmt { + Stmt::Connect(StmtConnect { + lhs, + rhs, + source_location, + }) => self.compile_connect( + *parent_module, + conditions, + lhs, + *parent_module, + conditions, + rhs, + source_location, + ), + Stmt::Formal(StmtFormal { .. }) => todo!("implement simulating formal statements"), + Stmt::If(StmtIf { + cond, + source_location, + blocks: [then_block, else_block], + }) => { + let cond = self.compile_expr(*parent_module, Expr::canonical(cond)); + let cond = self.compiled_expr_to_value(cond, source_location); + let cond = cond.map_ty(Bool::from_canonical); + self.compile_block( + parent_module, + then_block, + Interned::from_iter(conditions.iter().copied().chain([Cond { + body: CondBody::IfTrue { cond }, + source_location, + }])), + trace_decls, + ); + self.compile_block( + parent_module, + else_block, + Interned::from_iter(conditions.iter().copied().chain([Cond { + body: CondBody::IfFalse { cond }, + source_location, + }])), + trace_decls, + ); + } + Stmt::Match(StmtMatch { + expr, + source_location, + blocks, + }) => { + let enum_expr = self.compile_expr(*parent_module, Expr::canonical(expr)); + let enum_expr = self.compiled_expr_to_value(enum_expr, source_location); + let enum_expr = enum_expr.map_ty(Enum::from_canonical); + let discriminant = self.compile_enum_discriminant(enum_expr, source_location); + for (variant_index, block) in blocks.into_iter().enumerate() { + self.compile_block( + parent_module, + block, + Interned::from_iter(conditions.iter().copied().chain([Cond { + body: CondBody::MatchArm { + discriminant, + variant_index, + }, + source_location, + }])), + trace_decls, + ); + } + } + Stmt::Declaration(declaration) => { + trace_decls.push(self.compile_declaration( + declaration, + parent_module, + conditions, + )); + } + } + } + } + fn compile_module(&mut self, module: Interned) -> &CompiledModule { + let mut trace_decls = Vec::new(); + let module_io = module + .leaf_module() + .module_io() + .iter() + .map( + |&AnnotatedModuleIO { + annotations: _, + module_io, + }| { + let target = TargetInInstantiatedModule { + instantiated_module: *module, + target: Target::from(module_io), + }; + self.decl_conditions.insert(target, Interned::default()); + trace_decls.push(self.make_trace_decl(*module, module_io.into())); + self.compile_value(target) + }, + ) + .collect(); + match module.leaf_module().body() { + ModuleBody::Normal(NormalModuleBody { body }) => { + self.compile_block(module, body, Interned::default(), &mut trace_decls); + } + ModuleBody::Extern(ExternModuleBody { + verilog_name: _, + parameters: _, + simulation, + }) => { + let Some(simulation) = simulation else { + panic!( + "can't simulate extern module without extern_module_simulation: {}", + module.leaf_module().source_location() + ); + }; + self.extern_modules.push(CompiledExternModule { + module_io_targets: module + .leaf_module() + .module_io() + .iter() + .map(|v| Target::from(v.module_io)) + .collect(), + module_io, + simulation, + }); + } + } + let hashbrown::hash_map::Entry::Vacant(entry) = self.modules.entry(*module) else { + unreachable!("compiled same instantiated module twice"); + }; + entry.insert(CompiledModule { + module_io, + trace_decls: TraceModule { + name: module.leaf_module().name(), + children: Intern::intern_owned(trace_decls), + }, + }) + } + fn process_assignments(&mut self) { + self.assignments + .finalize(self.insns.state_layout().ty.clone().into()); + if let Some(DebugOpaque(dump_assignments_dot)) = &self.dump_assignments_dot { + let graph = + petgraph::graph::DiGraph::<_, _, usize>::from_elements(self.assignments.elements()); + dump_assignments_dot(&petgraph::dot::Dot::new(&graph)); + } + let assignments_order: Vec<_> = match petgraph::algo::toposort(&self.assignments, None) { + Ok(nodes) => nodes + .into_iter() + .filter_map(|n| match n { + AssignmentOrSlotIndex::AssignmentIndex(v) => Some(v), + _ => None, + }) + .collect(), + Err(e) => match e.node_id() { + AssignmentOrSlotIndex::AssignmentIndex(assignment_index) => panic!( + "combinatorial logic cycle detected at: {}", + self.assignments.assignments()[assignment_index].source_location, + ), + AssignmentOrSlotIndex::SmallSlot(slot) => panic!( + "combinatorial logic cycle detected through: {}", + self.insns.state_layout().ty.small_slots.debug_data[slot.as_usize()].name, + ), + AssignmentOrSlotIndex::BigSlot(slot) => panic!( + "combinatorial logic cycle detected through: {}", + self.insns.state_layout().ty.big_slots.debug_data[slot.as_usize()].name, + ), + }, + }; + struct CondStackEntry<'a> { + cond: &'a Cond, + end_label: Label, + } + let mut cond_stack = Vec::>::new(); + for assignment_index in assignments_order { + let Assignment { + inputs: _, + outputs: _, + conditions, + insns, + source_location, + } = &self.assignments.assignments()[assignment_index]; + let mut same_len = 0; + for (index, (entry, cond)) in cond_stack.iter().zip(conditions).enumerate() { + if entry.cond != cond { + break; + } + same_len = index + 1; + } + while cond_stack.len() > same_len { + let CondStackEntry { cond: _, end_label } = + cond_stack.pop().expect("just checked len"); + self.insns.define_label_at_next_insn(end_label); + } + for cond in &conditions[cond_stack.len()..] { + let end_label = self.insns.new_label(); + match cond.body { + CondBody::IfTrue { cond: cond_value } + | CondBody::IfFalse { cond: cond_value } => { + let (branch_if_zero, branch_if_non_zero) = match cond_value.range.len() { + TypeLen::A_SMALL_SLOT => ( + Insn::BranchIfSmallZero { + target: end_label.0, + value: cond_value.range.small_slots.start, + }, + Insn::BranchIfSmallNonZero { + target: end_label.0, + value: cond_value.range.small_slots.start, + }, + ), + TypeLen::A_BIG_SLOT => ( + Insn::BranchIfZero { + target: end_label.0, + value: cond_value.range.big_slots.start, + }, + Insn::BranchIfNonZero { + target: end_label.0, + value: cond_value.range.big_slots.start, + }, + ), + _ => unreachable!(), + }; + self.insns.push( + if let CondBody::IfTrue { .. } = cond.body { + branch_if_zero + } else { + branch_if_non_zero + }, + cond.source_location, + ); + } + CondBody::MatchArm { + discriminant, + variant_index, + } => { + self.insns.push( + Insn::BranchIfSmallNeImmediate { + target: end_label.0, + lhs: discriminant, + rhs: variant_index as _, + }, + cond.source_location, + ); + } + } + cond_stack.push(CondStackEntry { cond, end_label }); + } + self.insns.extend(insns.iter().copied(), *source_location); + } + for CondStackEntry { cond: _, end_label } in cond_stack { + self.insns.define_label_at_next_insn(end_label); + } + } + fn process_clocks(&mut self) -> Interned<[StatePartIndex]> { + mem::take(&mut self.clock_triggers) + .into_iter() + .map( + |ClockTrigger { + last_clk_was_low, + clk, + clk_triggered, + source_location, + }| { + self.insns.push( + Insn::XorSmallImmediate { + dest: last_clk_was_low, + lhs: clk, + rhs: 1, + }, + source_location, + ); + clk_triggered + }, + ) + .collect() + } + fn process_registers(&mut self) { + for Register { + value, + clk_triggered, + reset, + source_location, + } in mem::take(&mut self.registers) + { + match reset { + Some(RegisterReset { + is_async, + init, + rst, + }) => { + let reg_end = self.insns.new_label(); + let reg_reset = self.insns.new_label(); + let branch_if_reset = Insn::BranchIfSmallNonZero { + target: reg_reset.0, + value: rst, + }; + let branch_if_not_triggered = Insn::BranchIfSmallZero { + target: reg_end.0, + value: clk_triggered, + }; + if is_async { + self.insns.push(branch_if_reset, source_location); + self.insns.push(branch_if_not_triggered, source_location); + } else { + self.insns.push(branch_if_not_triggered, source_location); + self.insns.push(branch_if_reset, source_location); + } + self.insns.extend( + value.range.insns_for_copy_from(value.write_value().range), + source_location, + ); + self.insns + .push(Insn::Branch { target: reg_end.0 }, source_location); + self.insns.define_label_at_next_insn(reg_reset); + self.insns + .extend(value.range.insns_for_copy_from(init.range), source_location); + self.insns.define_label_at_next_insn(reg_end); + } + None => { + let reg_end = self.insns.new_label(); + self.insns.push( + Insn::BranchIfSmallZero { + target: reg_end.0, + value: clk_triggered, + }, + source_location, + ); + self.insns.extend( + value.range.insns_for_copy_from(value.write_value().range), + source_location, + ); + self.insns.define_label_at_next_insn(reg_end); + } + } + } + } + fn process_memories(&mut self) { + for memory_index in 0..self.memories.len() { + let Memory { + mem, + memory: _, + trace: _, + ref mut ports, + } = self.memories[memory_index]; + for MemoryPort { + clk_triggered, + addr_delayed, + en_delayed, + data_layout: _, + read_data_delayed, + write_data_delayed, + write_mask_delayed, + write_mode_delayed, + write_insns, + } in mem::take(ports) + { + let port_end = self.insns.new_label(); + let small_shift_reg = + |this: &mut Self, values: &[StatePartIndex]| { + for pair in values.windows(2).rev() { + this.insns.push( + Insn::CopySmall { + dest: pair[1], + src: pair[0], + }, + mem.source_location(), + ); + } + }; + let shift_reg = |this: &mut Self, values: &[TypeIndexRange]| { + for pair in values.windows(2).rev() { + this.insns + .extend(pair[0].insns_for_copy_to(pair[1]), mem.source_location()); + } + }; + self.insns.push( + Insn::BranchIfSmallZero { + target: port_end.0, + value: clk_triggered, + }, + mem.source_location(), + ); + small_shift_reg(self, &addr_delayed); + small_shift_reg(self, &en_delayed); + shift_reg(self, &write_data_delayed); + shift_reg(self, &write_mask_delayed); + small_shift_reg(self, &write_mode_delayed); + shift_reg(self, &read_data_delayed); + self.insns.extend(write_insns, mem.source_location()); + self.insns.define_label_at_next_insn(port_end); + } + } + } + pub fn compile(mut self) -> Compiled { + let base_module = + *self.compile_module(InstantiatedModule::Base(self.base_module).intern_sized()); + self.process_assignments(); + self.process_registers(); + self.process_memories(); + let clocks_triggered = self.process_clocks(); + self.insns + .push(Insn::Return, self.base_module.source_location()); + Compiled { + insns: Insns::from(self.insns).intern_sized(), + base_module, + extern_modules: Intern::intern_owned(self.extern_modules), + io: Instance::new_unchecked( + ScopedNameId( + NameId("".intern(), Id::new()), + self.original_base_module.name_id(), + ), + self.original_base_module, + self.original_base_module.source_location(), + ), + traces: SimTraces(Intern::intern_owned(self.traces.0)), + trace_memories: Interned::from_iter(self.memories.iter().map( + |&Memory { + mem: _, + memory, + trace, + ports: _, + }| (memory, trace), + )), + clocks_triggered, + } + } +} + +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] +pub(crate) struct CompiledModule { + pub(crate) module_io: Interned<[CompiledValue]>, + pub(crate) trace_decls: TraceModule, +} + +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] +pub struct Compiled { + pub(crate) insns: Interned>, + pub(crate) base_module: CompiledModule, + pub(crate) extern_modules: Interned<[CompiledExternModule]>, + pub(crate) io: Instance, + pub(crate) traces: SimTraces]>>, + pub(crate) trace_memories: Interned<[(StatePartIndex, TraceMem)]>, + pub(crate) clocks_triggered: Interned<[StatePartIndex]>, +} + +impl Compiled { + pub fn new(module: Interned>) -> Self { + Self::from_canonical(Compiler::new(module.canonical().intern()).compile()) + } + pub fn canonical(self) -> Compiled { + let Self { + insns, + base_module, + extern_modules, + io, + traces, + trace_memories, + clocks_triggered, + } = self; + Compiled { + insns, + base_module, + extern_modules, + io: Instance::from_canonical(io.canonical()), + traces, + trace_memories, + clocks_triggered, + } + } + pub fn from_canonical(canonical: Compiled) -> Self { + let Compiled { + insns, + base_module, + extern_modules, + io, + traces, + trace_memories, + clocks_triggered, + } = canonical; + Self { + insns, + base_module, + extern_modules, + io: Instance::from_canonical(io.canonical()), + traces, + trace_memories, + clocks_triggered, + } + } +} From 6d36698adfd854f2c21508eef9ccdd575a6881ea Mon Sep 17 00:00:00 2001 From: Jacob Lifshay Date: Tue, 26 Aug 2025 19:23:21 -0700 Subject: [PATCH 2/3] move public paths of sim::{Compiled,Compiler} to sim::compiler --- crates/fayalite/src/sim.rs | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/crates/fayalite/src/sim.rs b/crates/fayalite/src/sim.rs index 596e323..d91427f 100644 --- a/crates/fayalite/src/sim.rs +++ b/crates/fayalite/src/sim.rs @@ -17,7 +17,8 @@ use crate::{ reset::ResetType, sim::{ compiler::{ - CompiledBundleField, CompiledExternModule, CompiledTypeLayoutBody, CompiledValue, + Compiled, CompiledBundleField, CompiledExternModule, CompiledTypeLayoutBody, + CompiledValue, }, interpreter::{ BreakAction, BreakpointsSet, RunResult, SmallUInt, State, StatePartIndex, @@ -47,14 +48,12 @@ use std::{ task::Poll, }; -mod compiler; +pub mod compiler; mod interpreter; pub mod time; pub mod value; pub mod vcd; -pub use compiler::{Compiled, Compiler}; - #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct TraceScalarId(usize); From ab21db4103224dce9ca98fa48bb2e5b5ffeef07d Mon Sep 17 00:00:00 2001 From: Jacob Lifshay Date: Mon, 1 Sep 2025 04:46:24 -0700 Subject: [PATCH 3/3] WIP switched to OpaqueSimValueWriter --- crates/fayalite/src/array.rs | 59 +- crates/fayalite/src/bundle.rs | 46 +- crates/fayalite/src/clock.rs | 20 +- crates/fayalite/src/enum_.rs | 20 +- crates/fayalite/src/int.rs | 40 +- crates/fayalite/src/int/uint_in_range.rs | 40 +- crates/fayalite/src/phantom_const.rs | 31 +- crates/fayalite/src/reset.rs | 20 +- crates/fayalite/src/sim/value.rs | 625 ++++++++++++++++-- .../src/sim/value/sim_only_value_unsafe.rs | 351 ++++++++++ crates/fayalite/src/ty.rs | 320 ++++++++- crates/fayalite/src/util.rs | 4 + crates/fayalite/src/util/type_id_serde.rs | 101 +++ 13 files changed, 1546 insertions(+), 131 deletions(-) create mode 100644 crates/fayalite/src/sim/value/sim_only_value_unsafe.rs create mode 100644 crates/fayalite/src/util/type_id_serde.rs diff --git a/crates/fayalite/src/array.rs b/crates/fayalite/src/array.rs index c953aea..7f24a9d 100644 --- a/crates/fayalite/src/array.rs +++ b/crates/fayalite/src/array.rs @@ -12,7 +12,8 @@ use crate::{ sim::value::{SimValue, SimValuePartialEq}, source_location::SourceLocation, ty::{ - CanonicalType, MatchVariantWithoutScope, StaticType, Type, TypeProperties, TypeWithDeref, + CanonicalType, MatchVariantWithoutScope, OpaqueSimValueSlice, OpaqueSimValueWriter, + OpaqueSimValueWritten, StaticType, Type, TypeProperties, TypeWithDeref, serde_impls::SerdeCanonicalType, }, util::ConstUsize, @@ -48,6 +49,7 @@ impl ArrayType { is_storable, is_castable_from_bits, bit_width, + sim_only_value_types, } = element; let Some(bit_width) = bit_width.checked_mul(len) else { panic!("array too big"); @@ -194,42 +196,71 @@ impl Type for ArrayType { SourceLocation::builtin() } - fn sim_value_from_bits(&self, bits: &BitSlice) -> Self::SimValue { + fn sim_value_from_bits( + &self, + bits: &BitSlice, + sim_only_values: &[DynSimOnlyValue], + ) -> Self::SimValue { assert_eq!(bits.len(), self.type_properties.bit_width); let element = self.element(); - let element_bit_width = element.canonical().bit_width(); + let element_props = element.canonical().type_properties(); + let element_bit_width = element_props.bit_width; + let element_sim_only_types_count = element_props.sim_only_value_types_ref().len(); TryFrom::try_from(Vec::from_iter((0..self.len()).map(|i| { - SimValue::from_bitslice(element, &bits[i * element_bit_width..][..element_bit_width]) + SimValue::from_bitslice_and_sim_only_values( + element, + &bits[i * element_props.bit_width..][..element_props.bit_width], + &sim_only_values[i * element_sim_only_types_count][..element_sim_only_types_count], + ) }))) .ok() .expect("used correct length") } - fn sim_value_clone_from_bits(&self, value: &mut Self::SimValue, bits: &BitSlice) { + fn sim_value_clone_from_bits( + &self, + value: &mut Self::SimValue, + bits: &BitSlice, + sim_only_values: &[DynSimOnlyValue], + ) { assert_eq!(bits.len(), self.type_properties.bit_width); let element_ty = self.element(); - let element_bit_width = element_ty.canonical().bit_width(); + let element_props = element_ty.canonical().type_properties(); + let element_bit_width = element_props.bit_width; + let element_sim_only_types_count = element_props.sim_only_value_types_ref().len(); let value: &mut [SimValue] = value.as_mut(); assert_eq!(self.len(), value.len()); for (i, element_value) in value.iter_mut().enumerate() { assert_eq!(SimValue::ty(element_value), element_ty); - SimValue::bits_mut(element_value) + let (bits_mut, sim_only_values_mut) = SimValue::opaque_mut(element_value).parts_mut(); + bits_mut .bits_mut() .copy_from_bitslice(&bits[i * element_bit_width..][..element_bit_width]); + sim_only_values_mut.clone_from_slice( + &sim_only_values[i * element_sim_only_types_count..] + [..element_sim_only_types_count], + ); } } - fn sim_value_to_bits(&self, value: &Self::SimValue, bits: &mut BitSlice) { - assert_eq!(bits.len(), self.type_properties.bit_width); + fn sim_value_to_opaque<'w>( + &self, + value: &Self::SimValue, + mut writer: OpaqueSimValueWriter<'w>, + ) -> OpaqueSimValueWritten<'w> { let element_ty = self.element(); - let element_bit_width = element_ty.canonical().bit_width(); - let value: &[SimValue] = value.as_ref(); + let element_props = element_ty.canonical().type_properties(); + let element_bit_width = element_props.bit_width; + let element_sim_only_types_count = element_props.sim_only_value_types_ref().len(); + let value = AsRef::<[SimValue]>::as_ref(&value); assert_eq!(self.len(), value.len()); - for (i, element_value) in value.iter().enumerate() { + for element_value in value { assert_eq!(SimValue::ty(element_value), element_ty); - bits[i * element_bit_width..][..element_bit_width] - .copy_from_bitslice(SimValue::bits(element_value).bits()); + writer.fill_prefix_with(element_bit_width, element_sim_only_types_count, |writer| { + writer.fill_cloned_from_slice(SimValue::opaque(element_value).as_slice()) + }); } + writer.fill_cloned_from_slice(OpaqueSimValueSlice::empty()) } } diff --git a/crates/fayalite/src/bundle.rs b/crates/fayalite/src/bundle.rs index 30a70d5..cf08eb9 100644 --- a/crates/fayalite/src/bundle.rs +++ b/crates/fayalite/src/bundle.rs @@ -241,16 +241,30 @@ impl Type for Bundle { fn source_location() -> SourceLocation { SourceLocation::builtin() } - fn sim_value_from_bits(&self, bits: &BitSlice) -> Self::SimValue { + fn sim_value_from_bits( + &self, + bits: &BitSlice, + sim_only_values: &[DynSimOnlyValue], + ) -> Self::SimValue { assert_eq!(bits.len(), self.type_properties().bit_width); OpaqueSimValue::from_bitslice(bits) } - fn sim_value_clone_from_bits(&self, value: &mut Self::SimValue, bits: &BitSlice) { + fn sim_value_clone_from_bits( + &self, + value: &mut Self::SimValue, + bits: &BitSlice, + sim_only_values: &[DynSimOnlyValue], + ) { assert_eq!(bits.len(), self.type_properties().bit_width); assert_eq!(value.bit_width(), self.type_properties().bit_width); value.bits_mut().bits_mut().copy_from_bitslice(bits); } - fn sim_value_to_bits(&self, value: &Self::SimValue, bits: &mut BitSlice) { + fn sim_value_to_bits<'w>( + &self, + value: &Self::SimValue, + bits: &mut BitSlice, + sim_only_values: DynSimOnlyValuesWriter<'w>, + ) -> DynSimOnlyValuesWritten<'w> { assert_eq!(bits.len(), self.type_properties().bit_width); assert_eq!(value.bit_width(), self.type_properties().bit_width); bits.copy_from_bitslice(value.bits().bits()); @@ -495,19 +509,33 @@ macro_rules! impl_tuples { fn source_location() -> SourceLocation { SourceLocation::builtin() } - fn sim_value_from_bits(&self, bits: &BitSlice) -> Self::SimValue { + fn sim_value_from_bits( + &self, + bits: &BitSlice, + sim_only_values: &[DynSimOnlyValue], + ) -> Self::SimValue { #![allow(unused_mut, unused_variables)] let mut v = BundleSimValueFromBits::new(*self, bits); $(let $var = v.field_from_bits();)* ($($var,)*) } - fn sim_value_clone_from_bits(&self, value: &mut Self::SimValue, bits: &BitSlice) { + fn sim_value_clone_from_bits( + &self, + value: &mut Self::SimValue, + bits: &BitSlice, + sim_only_values: &[DynSimOnlyValue], + ) { #![allow(unused_mut, unused_variables)] let mut v = BundleSimValueFromBits::new(*self, bits); let ($($var,)*) = value; $(v.field_clone_from_bits($var);)* } - fn sim_value_to_bits(&self, value: &Self::SimValue, bits: &mut BitSlice) { + fn sim_value_to_bits<'w>( + &self, + value: &Self::SimValue, + bits: &mut BitSlice, + sim_only_values: DynSimOnlyValuesWriter<'w>, + ) -> DynSimOnlyValuesWritten<'w> { #![allow(unused_mut, unused_variables)] let mut v = BundleSimValueToBits::new(*self, bits); let ($($var,)*) = value; @@ -723,7 +751,11 @@ impl Type for PhantomData { fn source_location() -> SourceLocation { SourceLocation::builtin() } - fn sim_value_from_bits(&self, bits: &BitSlice) -> Self::SimValue { + fn sim_value_from_bits( + &self, + bits: &BitSlice, + sim_only_values: &[DynSimOnlyValue], + ) -> Self::SimValue { assert!(bits.is_empty()); *self } diff --git a/crates/fayalite/src/clock.rs b/crates/fayalite/src/clock.rs index 66b0e20..dee0891 100644 --- a/crates/fayalite/src/clock.rs +++ b/crates/fayalite/src/clock.rs @@ -39,17 +39,31 @@ impl Type for Clock { retval } - fn sim_value_from_bits(&self, bits: &BitSlice) -> Self::SimValue { + fn sim_value_from_bits( + &self, + bits: &BitSlice, + sim_only_values: &[DynSimOnlyValue], + ) -> Self::SimValue { assert_eq!(bits.len(), 1); bits[0] } - fn sim_value_clone_from_bits(&self, value: &mut Self::SimValue, bits: &BitSlice) { + fn sim_value_clone_from_bits( + &self, + value: &mut Self::SimValue, + bits: &BitSlice, + sim_only_values: &[DynSimOnlyValue], + ) { assert_eq!(bits.len(), 1); *value = bits[0]; } - fn sim_value_to_bits(&self, value: &Self::SimValue, bits: &mut BitSlice) { + fn sim_value_to_bits<'w>( + &self, + value: &Self::SimValue, + bits: &mut BitSlice, + sim_only_values: DynSimOnlyValuesWriter<'w>, + ) -> DynSimOnlyValuesWritten<'w> { assert_eq!(bits.len(), 1); bits.set(0, *value); } diff --git a/crates/fayalite/src/enum_.rs b/crates/fayalite/src/enum_.rs index 283e4ff..a402f6f 100644 --- a/crates/fayalite/src/enum_.rs +++ b/crates/fayalite/src/enum_.rs @@ -381,16 +381,30 @@ impl Type for Enum { fn source_location() -> SourceLocation { SourceLocation::builtin() } - fn sim_value_from_bits(&self, bits: &BitSlice) -> Self::SimValue { + fn sim_value_from_bits( + &self, + bits: &BitSlice, + sim_only_values: &[DynSimOnlyValue], + ) -> Self::SimValue { assert_eq!(bits.len(), self.type_properties().bit_width); OpaqueSimValue::from_bitslice(bits) } - fn sim_value_clone_from_bits(&self, value: &mut Self::SimValue, bits: &BitSlice) { + fn sim_value_clone_from_bits( + &self, + value: &mut Self::SimValue, + bits: &BitSlice, + sim_only_values: &[DynSimOnlyValue], + ) { assert_eq!(bits.len(), self.type_properties().bit_width); assert_eq!(value.bit_width(), self.type_properties().bit_width); value.bits_mut().bits_mut().copy_from_bitslice(bits); } - fn sim_value_to_bits(&self, value: &Self::SimValue, bits: &mut BitSlice) { + fn sim_value_to_bits<'w>( + &self, + value: &Self::SimValue, + bits: &mut BitSlice, + sim_only_values: DynSimOnlyValuesWriter<'w>, + ) -> DynSimOnlyValuesWritten<'w> { assert_eq!(bits.len(), self.type_properties().bit_width); assert_eq!(value.bit_width(), self.type_properties().bit_width); bits.copy_from_bitslice(value.bits().bits()); diff --git a/crates/fayalite/src/int.rs b/crates/fayalite/src/int.rs index c491cdc..694adcf 100644 --- a/crates/fayalite/src/int.rs +++ b/crates/fayalite/src/int.rs @@ -678,16 +678,30 @@ macro_rules! impl_int { fn source_location() -> SourceLocation { SourceLocation::builtin() } - fn sim_value_from_bits(&self, bits: &BitSlice) -> Self::SimValue { + fn sim_value_from_bits( + &self, + bits: &BitSlice, + sim_only_values: &[DynSimOnlyValue], + ) -> Self::SimValue { assert_eq!(bits.len(), self.width()); $value::new(Arc::new(bits.to_bitvec())) } - fn sim_value_clone_from_bits(&self, value: &mut Self::SimValue, bits: &BitSlice) { + fn sim_value_clone_from_bits( + &self, + value: &mut Self::SimValue, + bits: &BitSlice, + sim_only_values: &[DynSimOnlyValue], + ) { assert_eq!(bits.len(), self.width()); assert_eq!(value.width(), self.width()); value.bits_mut().copy_from_bitslice(bits); } - fn sim_value_to_bits(&self, value: &Self::SimValue, bits: &mut BitSlice) { + fn sim_value_to_bits<'w>( + &self, + value: &Self::SimValue, + bits: &mut BitSlice, + sim_only_values: DynSimOnlyValuesWriter<'w>, + ) -> DynSimOnlyValuesWritten<'w> { assert_eq!(bits.len(), self.width()); assert_eq!(value.width(), self.width()); bits.copy_from_bitslice(value.bits()); @@ -1252,15 +1266,29 @@ impl Type for Bool { fn source_location() -> SourceLocation { SourceLocation::builtin() } - fn sim_value_from_bits(&self, bits: &BitSlice) -> Self::SimValue { + fn sim_value_from_bits( + &self, + bits: &BitSlice, + sim_only_values: &[DynSimOnlyValue], + ) -> Self::SimValue { assert_eq!(bits.len(), 1); bits[0] } - fn sim_value_clone_from_bits(&self, value: &mut Self::SimValue, bits: &BitSlice) { + fn sim_value_clone_from_bits( + &self, + value: &mut Self::SimValue, + bits: &BitSlice, + sim_only_values: &[DynSimOnlyValue], + ) { assert_eq!(bits.len(), 1); *value = bits[0]; } - fn sim_value_to_bits(&self, value: &Self::SimValue, bits: &mut BitSlice) { + fn sim_value_to_bits<'w>( + &self, + value: &Self::SimValue, + bits: &mut BitSlice, + sim_only_values: DynSimOnlyValuesWriter<'w>, + ) -> DynSimOnlyValuesWritten<'w> { assert_eq!(bits.len(), 1); bits.set(0, *value); } diff --git a/crates/fayalite/src/int/uint_in_range.rs b/crates/fayalite/src/int/uint_in_range.rs index ae80a93..c9e92a9 100644 --- a/crates/fayalite/src/int/uint_in_range.rs +++ b/crates/fayalite/src/int/uint_in_range.rs @@ -70,15 +70,29 @@ impl Type for UIntInRangeMaskType { SourceLocation::builtin() } - fn sim_value_from_bits(&self, bits: &BitSlice) -> Self::SimValue { + fn sim_value_from_bits( + &self, + bits: &BitSlice, + sim_only_values: &[DynSimOnlyValue], + ) -> Self::SimValue { Bool.sim_value_from_bits(bits) } - fn sim_value_clone_from_bits(&self, value: &mut Self::SimValue, bits: &BitSlice) { + fn sim_value_clone_from_bits( + &self, + value: &mut Self::SimValue, + bits: &BitSlice, + sim_only_values: &[DynSimOnlyValue], + ) { Bool.sim_value_clone_from_bits(value, bits); } - fn sim_value_to_bits(&self, value: &Self::SimValue, bits: &mut BitSlice) { + fn sim_value_to_bits<'w>( + &self, + value: &Self::SimValue, + bits: &mut BitSlice, + sim_only_values: DynSimOnlyValuesWriter<'w>, + ) -> DynSimOnlyValuesWritten<'w> { Bool.sim_value_to_bits(value, bits); } } @@ -353,17 +367,31 @@ macro_rules! define_uint_in_range_type { SourceLocation::builtin() } - fn sim_value_from_bits(&self, bits: &BitSlice) -> Self::SimValue { + fn sim_value_from_bits( + &self, + bits: &BitSlice, + sim_only_values: &[DynSimOnlyValue], + ) -> Self::SimValue { let mut retval = 0usize; retval.view_bits_mut::()[..bits.len()].clone_from_bitslice(bits); retval } - fn sim_value_clone_from_bits(&self, value: &mut Self::SimValue, bits: &BitSlice) { + fn sim_value_clone_from_bits( + &self, + value: &mut Self::SimValue, + bits: &BitSlice, + sim_only_values: &[DynSimOnlyValue], + ) { *value = self.sim_value_from_bits(bits); } - fn sim_value_to_bits(&self, value: &Self::SimValue, bits: &mut BitSlice) { + fn sim_value_to_bits<'w>( + &self, + value: &Self::SimValue, + bits: &mut BitSlice, + sim_only_values: DynSimOnlyValuesWriter<'w>, + ) -> DynSimOnlyValuesWritten<'w> { bits.clone_from_bitslice(&value.view_bits::()[..bits.len()]); } } diff --git a/crates/fayalite/src/phantom_const.rs b/crates/fayalite/src/phantom_const.rs index 44b36ca..e6be72d 100644 --- a/crates/fayalite/src/phantom_const.rs +++ b/crates/fayalite/src/phantom_const.rs @@ -8,7 +8,10 @@ use crate::{ }, int::Bool, intern::{Intern, Interned, InternedCompare, LazyInterned, LazyInternedTrait, Memoize}, - sim::value::{SimValue, SimValuePartialEq, ToSimValue, ToSimValueWithType}, + sim::value::{ + DynSimOnlyValue, DynSimOnlyValuesWriter, DynSimOnlyValuesWritten, SimValue, + SimValuePartialEq, ToSimValue, ToSimValueWithType, assert_matching_sim_values, + }, source_location::SourceLocation, ty::{ CanonicalType, StaticType, Type, TypeProperties, impl_match_variant_as_self, @@ -284,18 +287,32 @@ impl Type for PhantomConst { SourceLocation::builtin() } - fn sim_value_from_bits(&self, bits: &BitSlice) -> Self::SimValue { - assert!(bits.is_empty()); + fn sim_value_from_bits( + &self, + bits: &BitSlice, + sim_only_values: &[DynSimOnlyValue], + ) -> Self::SimValue { + assert_matching_sim_values(0, &[], bits, sim_only_values); *self } - fn sim_value_clone_from_bits(&self, value: &mut Self::SimValue, bits: &BitSlice) { - assert!(bits.is_empty()); + fn sim_value_clone_from_bits( + &self, + value: &mut Self::SimValue, + bits: &BitSlice, + sim_only_values: &[DynSimOnlyValue], + ) { + assert_matching_sim_values(0, &[], bits, sim_only_values); assert_eq!(*value, *self); } - fn sim_value_to_bits(&self, value: &Self::SimValue, bits: &mut BitSlice) { - assert!(bits.is_empty()); + fn sim_value_to_bits<'w>( + &self, + value: &Self::SimValue, + bits: &mut BitSlice, + sim_only_values: DynSimOnlyValuesWriter<'w>, + ) -> DynSimOnlyValuesWritten<'w> { + assert_matching_sim_values(0, &[], bits, sim_only_values); assert_eq!(*value, *self); } } diff --git a/crates/fayalite/src/reset.rs b/crates/fayalite/src/reset.rs index f3392a2..ef6366d 100644 --- a/crates/fayalite/src/reset.rs +++ b/crates/fayalite/src/reset.rs @@ -69,17 +69,31 @@ macro_rules! reset_type { retval } - fn sim_value_from_bits(&self, bits: &BitSlice) -> Self::SimValue { + fn sim_value_from_bits( + &self, + bits: &BitSlice, + sim_only_values: &[DynSimOnlyValue], + ) -> Self::SimValue { assert_eq!(bits.len(), 1); bits[0] } - fn sim_value_clone_from_bits(&self, value: &mut Self::SimValue, bits: &BitSlice) { + fn sim_value_clone_from_bits( + &self, + value: &mut Self::SimValue, + bits: &BitSlice, + sim_only_values: &[DynSimOnlyValue], + ) { assert_eq!(bits.len(), 1); *value = bits[0]; } - fn sim_value_to_bits(&self, value: &Self::SimValue, bits: &mut BitSlice) { + fn sim_value_to_bits<'w>( + &self, + value: &Self::SimValue, + bits: &mut BitSlice, + sim_only_values: DynSimOnlyValuesWriter<'w>, + ) -> DynSimOnlyValuesWritten<'w> { assert_eq!(bits.len(), 1); bits.set(0, *value); } diff --git a/crates/fayalite/src/sim/value.rs b/crates/fayalite/src/sim/value.rs index 70cb943..02190d2 100644 --- a/crates/fayalite/src/sim/value.rs +++ b/crates/fayalite/src/sim/value.rs @@ -8,61 +8,81 @@ use crate::{ enum_::{Enum, EnumType}, expr::{CastBitsTo, Expr, ToExpr}, int::{Bool, IntType, KnownSize, SInt, SIntType, SIntValue, Size, UInt, UIntType, UIntValue}, + intern::Interned, reset::{AsyncReset, Reset, SyncReset}, - ty::{CanonicalType, StaticType, Type}, + source_location::SourceLocation, + ty::{ + CanonicalType, OpaqueSimValue, OpaqueSimValueSlice, OpaqueSimValueWriter, StaticType, Type, + TypeProperties, impl_match_variant_as_self, + }, util::{ ConstUsize, alternating_cell::{AlternatingCell, AlternatingCellMethods}, }, }; use bitvec::{slice::BitSlice, vec::BitVec}; -use serde::{Deserialize, Deserializer, Serialize, Serializer}; +use hashbrown::{HashMap, hash_map::Entry}; +use serde::{Deserialize, Deserializer, Serialize, Serializer, de::Error as _, ser::Error as _}; use std::{ - fmt, + borrow::Cow, + fmt::{self, Write}, + hash::{BuildHasher, Hash, Hasher, RandomState}, + marker::PhantomData, ops::{Deref, DerefMut}, - sync::Arc, + sync::{Arc, Mutex}, +}; + +pub(crate) mod sim_only_value_unsafe; + +pub use sim_only_value_unsafe::{ + DynSimOnlyValue, DynSimOnlyValueType, SimOnlyValue, SimOnlyValueTrait, SimOnlyValueType, }; #[derive(Copy, Clone, Eq, PartialEq)] enum ValidFlags { BothValid = 0, OnlyValueValid = 1, - OnlyBitsValid = 2, + OnlyOpaqueValid = 2, } #[derive(Clone)] struct SimValueInner { value: T::SimValue, - bits: UIntValue, + opaque: OpaqueSimValue, valid_flags: ValidFlags, ty: T, + sim_only_values_len: usize, } impl SimValueInner { - fn fill_bits(&mut self) { + fn fill_opaque(&mut self) { match self.valid_flags { - ValidFlags::BothValid | ValidFlags::OnlyBitsValid => {} + ValidFlags::BothValid | ValidFlags::OnlyOpaqueValid => {} ValidFlags::OnlyValueValid => { - self.ty.sim_value_to_bits(&self.value, self.bits.bits_mut()); + OpaqueSimValueWriter::rewrite_with( + self.sim_only_values_len, + &mut self.opaque, + |writer| self.ty.sim_value_to_opaque(&self.value, writer), + ); self.valid_flags = ValidFlags::BothValid; } } } - fn into_bits(mut self) -> UIntValue { - self.fill_bits(); - self.bits + fn into_opaque(mut self) -> OpaqueSimValue { + self.fill_opaque(); + self.opaque } - fn bits_mut(&mut self) -> &mut UIntValue { - self.fill_bits(); - self.valid_flags = ValidFlags::OnlyBitsValid; - &mut self.bits + fn opaque_mut(&mut self) -> &mut OpaqueSimValue { + self.fill_opaque(); + self.valid_flags = ValidFlags::OnlyOpaqueValid; + &mut self.opaque } fn fill_value(&mut self) { match self.valid_flags { ValidFlags::BothValid | ValidFlags::OnlyValueValid => {} - ValidFlags::OnlyBitsValid => { + ValidFlags::OnlyOpaqueValid => { self.ty - .sim_value_clone_from_bits(&mut self.value, self.bits.bits()); + .sim_value_clone_from_opaque(&mut self.value, self.opaque.as_slice()); self.valid_flags = ValidFlags::BothValid; } } @@ -82,12 +102,14 @@ impl AlternatingCellMethods for SimValueInner { fn unique_to_shared(&mut self) { match self.valid_flags { ValidFlags::BothValid => return, - ValidFlags::OnlyValueValid => { - self.ty.sim_value_to_bits(&self.value, self.bits.bits_mut()) - } - ValidFlags::OnlyBitsValid => self + ValidFlags::OnlyValueValid => OpaqueSimValueWriter::rewrite_with( + self.sim_only_values_len, + &mut self.opaque, + |writer| self.ty.sim_value_to_opaque(&self.value, writer), + ), + ValidFlags::OnlyOpaqueValid => self .ty - .sim_value_clone_from_bits(&mut self.value, self.bits.bits()), + .sim_value_clone_from_opaque(&mut self.value, self.opaque.as_slice()), } self.valid_flags = ValidFlags::BothValid; } @@ -143,13 +165,15 @@ impl Clone for SimValue { impl SimValue { #[track_caller] - pub fn from_bits(ty: T, bits: UIntValue) -> Self { - assert_eq!(ty.canonical().bit_width(), bits.width()); + pub fn from_opaque(ty: T, opaque: OpaqueSimValue) -> Self { + let type_properties = ty.canonical().type_properties(); + type_properties.assert_matching_opaque_sim_value(opaque.as_slice()); let inner = SimValueInner { - value: ty.sim_value_from_bits(bits.bits()), - bits, + value: ty.sim_value_from_opaque(opaque.as_slice()), + opaque, valid_flags: ValidFlags::BothValid, ty, + sim_only_values_len: type_properties.sim_only_values_len, }; Self { inner: AlternatingCell::new_shared(inner), @@ -157,14 +181,30 @@ impl SimValue { } #[track_caller] pub fn from_bitslice(ty: T, bits: &BitSlice) -> Self { - Self::from_bits(ty, UIntValue::new(Arc::new(bits.to_bitvec()))) + Self::from_bitslice_and_sim_only_values(ty, bits, Vec::new()) + } + #[track_caller] + pub fn from_bitslice_and_sim_only_values( + ty: T, + bits: &BitSlice, + sim_only_values: Vec, + ) -> Self { + Self::from_opaque( + ty, + OpaqueSimValue::from_bitslice_and_sim_only_values(bits, sim_only_values), + ) } pub fn from_value(ty: T, value: T::SimValue) -> Self { + let type_properties = ty.canonical().type_properties(); let inner = SimValueInner { - bits: UIntValue::new_dyn(Arc::new(BitVec::repeat(false, ty.canonical().bit_width()))), + opaque: OpaqueSimValue::from_bits_and_sim_only_values( + UIntValue::new_dyn(Arc::new(BitVec::repeat(false, type_properties.bit_width))), + Vec::with_capacity(type_properties.sim_only_values_len), + ), value, valid_flags: ValidFlags::OnlyValueValid, ty, + sim_only_values_len: type_properties.sim_only_values_len, }; Self { inner: AlternatingCell::new_unique(inner), @@ -173,18 +213,24 @@ impl SimValue { pub fn ty(this: &Self) -> T { this.inner.share().ty } - pub fn into_bits(this: Self) -> UIntValue { - this.inner.into_inner().into_bits() + pub fn into_opaque(this: Self) -> OpaqueSimValue { + this.inner.into_inner().into_opaque() } - pub fn into_ty_and_bits(this: Self) -> (T, UIntValue) { + pub fn into_ty_and_opaque(this: Self) -> (T, OpaqueSimValue) { let inner = this.inner.into_inner(); - (inner.ty, inner.into_bits()) + (inner.ty, inner.into_opaque()) + } + pub fn opaque(this: &Self) -> &OpaqueSimValue { + &this.inner.share().opaque + } + pub fn opaque_mut(this: &mut Self) -> &mut OpaqueSimValue { + &mut this.inner.unique().opaque } pub fn bits(this: &Self) -> &UIntValue { - &this.inner.share().bits + Self::opaque(this).bits() } pub fn bits_mut(this: &mut Self) -> &mut UIntValue { - this.inner.unique().bits_mut() + Self::opaque_mut(this).bits_mut() } pub fn into_value(this: Self) -> T::SimValue { this.inner.into_inner().into_value() @@ -197,59 +243,59 @@ impl SimValue { } #[track_caller] pub fn from_canonical(v: SimValue) -> Self { - let (ty, bits) = SimValue::into_ty_and_bits(v); - Self::from_bits(T::from_canonical(ty), bits) + let (ty, opaque) = SimValue::into_ty_and_opaque(v); + Self::from_opaque(T::from_canonical(ty), opaque) } pub fn into_canonical(this: Self) -> SimValue { - let (ty, bits) = Self::into_ty_and_bits(this); - SimValue::from_bits(ty.canonical(), bits) + let (ty, opaque) = Self::into_ty_and_opaque(this); + SimValue::from_opaque(ty.canonical(), opaque) } pub fn canonical(this: &Self) -> SimValue { - SimValue::from_bits(Self::ty(this).canonical(), Self::bits(this).clone()) + SimValue::from_opaque(Self::ty(this).canonical(), Self::opaque(this).clone()) } #[track_caller] pub fn from_dyn_int(v: SimValue) -> Self where T: IntType, { - let (ty, bits) = SimValue::into_ty_and_bits(v); - SimValue::from_bits(T::from_dyn_int(ty), bits) + let (ty, opaque) = SimValue::into_ty_and_opaque(v); + SimValue::from_opaque(T::from_dyn_int(ty), opaque) } pub fn into_dyn_int(this: Self) -> SimValue where T: IntType, { - let (ty, bits) = Self::into_ty_and_bits(this); - SimValue::from_bits(ty.as_dyn_int(), bits) + let (ty, opaque) = Self::into_ty_and_opaque(this); + SimValue::from_opaque(ty.as_dyn_int(), opaque) } pub fn to_dyn_int(this: &Self) -> SimValue where T: IntType, { - SimValue::from_bits(Self::ty(this).as_dyn_int(), Self::bits(&this).clone()) + SimValue::from_opaque(Self::ty(this).as_dyn_int(), Self::opaque(&this).clone()) } #[track_caller] pub fn from_bundle(v: SimValue) -> Self where T: BundleType, { - let (ty, bits) = SimValue::into_ty_and_bits(v); - SimValue::from_bits(T::from_canonical(CanonicalType::Bundle(ty)), bits) + let (ty, opaque) = SimValue::into_ty_and_opaque(v); + SimValue::from_opaque(T::from_canonical(CanonicalType::Bundle(ty)), opaque) } pub fn into_bundle(this: Self) -> SimValue where T: BundleType, { - let (ty, bits) = Self::into_ty_and_bits(this); - SimValue::from_bits(Bundle::from_canonical(ty.canonical()), bits) + let (ty, opaque) = Self::into_ty_and_opaque(this); + SimValue::from_opaque(Bundle::from_canonical(ty.canonical()), opaque) } pub fn to_bundle(this: &Self) -> SimValue where T: BundleType, { - SimValue::from_bits( + SimValue::from_opaque( Bundle::from_canonical(Self::ty(this).canonical()), - Self::bits(&this).clone(), + Self::opaque(&this).clone(), ) } #[track_caller] @@ -257,23 +303,23 @@ impl SimValue { where T: EnumType, { - let (ty, bits) = SimValue::into_ty_and_bits(v); - SimValue::from_bits(T::from_canonical(CanonicalType::Enum(ty)), bits) + let (ty, opaque) = SimValue::into_ty_and_opaque(v); + SimValue::from_opaque(T::from_canonical(CanonicalType::Enum(ty)), opaque) } pub fn into_enum(this: Self) -> SimValue where T: EnumType, { - let (ty, bits) = Self::into_ty_and_bits(this); - SimValue::from_bits(Enum::from_canonical(ty.canonical()), bits) + let (ty, opaque) = Self::into_ty_and_opaque(this); + SimValue::from_opaque(Enum::from_canonical(ty.canonical()), opaque) } pub fn to_enum(this: &Self) -> SimValue where T: EnumType, { - SimValue::from_bits( + SimValue::from_opaque( Enum::from_canonical(Self::ty(this).canonical()), - Self::bits(&this).clone(), + Self::opaque(&this).clone(), ) } } @@ -308,7 +354,11 @@ impl ToExpr for SimValue { #[track_caller] fn to_expr(&self) -> Expr { let inner = self.inner.share(); - inner.bits.cast_bits_to(inner.ty) + assert!( + inner.sim_only_value_types.is_empty(), + "can't convert sim-only values to Expr" + ); + inner.opaque.bits().cast_bits_to(inner.ty) } } @@ -443,12 +493,15 @@ impl ToSimValueWithType for BitVec { #[track_caller] fn arc_into_sim_value_with_type(self: Arc, ty: T) -> SimValue { - SimValue::from_bits(ty, UIntValue::new_dyn(self)) + SimValue::from_opaque(ty, OpaqueSimValue::from_bits(UIntValue::new_dyn(self))) } #[track_caller] fn arc_to_sim_value_with_type(self: &Arc, ty: T) -> SimValue { - SimValue::from_bits(ty, UIntValue::new_dyn(self.clone())) + SimValue::from_opaque( + ty, + OpaqueSimValue::from_bits(UIntValue::new_dyn(self.clone())), + ) } } @@ -792,16 +845,18 @@ impl ToSimValueWithType for bool { | CanonicalType::Array(_) | CanonicalType::Enum(_) | CanonicalType::Bundle(_) - | CanonicalType::PhantomConst(_) => { + | CanonicalType::PhantomConst(_) + | CanonicalType::DynSimOnlyValueType(_) => { panic!("can't create SimValue from bool: expected value of type: {ty:?}"); } CanonicalType::Bool(_) | CanonicalType::AsyncReset(_) | CanonicalType::SyncReset(_) | CanonicalType::Reset(_) - | CanonicalType::Clock(_) => { - SimValue::from_bits(ty, UIntValue::new(Arc::new(BitVec::repeat(*self, 1)))) - } + | CanonicalType::Clock(_) => SimValue::from_opaque( + ty, + OpaqueSimValue::from_bits(UIntValue::new(Arc::new(BitVec::repeat(*self, 1)))), + ), } } } @@ -911,3 +966,445 @@ macro_rules! impl_to_sim_value_for_int_value { impl_to_sim_value_for_int_value!(UIntValue, UInt, UIntType); impl_to_sim_value_for_int_value!(SIntValue, SInt, SIntType); + +#[derive(Default)] +struct DynSimOnlyValueTypeSerdeTableRest { + from_serde: HashMap, + serde_id_random_state: RandomState, + buffer: String, +} + +impl DynSimOnlyValueTypeSerdeTableRest { + #[cold] + fn add_new(&mut self, ty: DynSimOnlyValueType) -> DynSimOnlyValueTypeSerdeId { + let mut try_number = 0u64; + let mut hasher = self.serde_id_random_state.build_hasher(); + // extract more bits of randomness from TypeId -- its Hash impl only hashes 64-bits + write!(self.buffer, "{:?}", ty.type_id()).expect("shouldn't ever fail"); + self.buffer.hash(&mut hasher); + loop { + let mut hasher = hasher.clone(); + try_number.hash(&mut hasher); + try_number += 1; + let retval = DynSimOnlyValueTypeSerdeId(std::array::from_fn(|i| { + let mut hasher = hasher.clone(); + i.hash(&mut hasher); + hasher.finish() as u32 + })); + match self.from_serde.entry(retval) { + Entry::Occupied(_) => continue, + Entry::Vacant(e) => { + e.insert(ty); + return retval; + } + } + } + } +} + +#[derive(Default)] +struct DynSimOnlyValueTypeSerdeTable { + to_serde: HashMap, + rest: DynSimOnlyValueTypeSerdeTableRest, +} + +static DYN_SIM_ONLY_VALUE_TYPE_SERDE_TABLE: Mutex> = + Mutex::new(None); + +#[derive(Copy, Clone, Eq, PartialEq, Hash, Debug, Serialize, Deserialize)] +#[serde(transparent)] +struct DynSimOnlyValueTypeSerdeId([u32; 4]); + +impl From for DynSimOnlyValueTypeSerdeId { + fn from(ty: DynSimOnlyValueType) -> Self { + let mut locked = DYN_SIM_ONLY_VALUE_TYPE_SERDE_TABLE + .lock() + .expect("shouldn't be poison"); + let DynSimOnlyValueTypeSerdeTable { to_serde, rest } = locked.get_or_insert_default(); + match to_serde.entry(ty) { + Entry::Occupied(occupied_entry) => *occupied_entry.get(), + Entry::Vacant(vacant_entry) => *vacant_entry.insert(rest.add_new(ty)), + } + } +} + +impl DynSimOnlyValueTypeSerdeId { + fn ty(self) -> Option { + let locked = DYN_SIM_ONLY_VALUE_TYPE_SERDE_TABLE + .lock() + .expect("shouldn't be poison"); + Some(*locked.as_ref()?.rest.from_serde.get(&self)?) + } +} + +#[derive(Clone, Eq, PartialEq, Hash, Debug, Serialize, Deserialize)] +struct DynSimOnlyValueTypeSerde<'a> { + random_id: DynSimOnlyValueTypeSerdeId, + #[serde(borrow)] + type_name: Cow<'a, str>, +} + +impl Serialize for DynSimOnlyValueType { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + DynSimOnlyValueTypeSerde { + random_id: (*self).into(), + type_name: Cow::Borrowed(self.type_name()), + } + .serialize(serializer) + } +} + +impl<'de> Deserialize<'de> for DynSimOnlyValueType { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let deserialized = DynSimOnlyValueTypeSerde::deserialize(deserializer)?; + let retval = deserialized + .random_id + .ty() + .filter(|ty| ty.type_name() == deserialized.type_name); + retval.ok_or_else(|| D::Error::custom("doesn't match any DynSimOnlyValueType that was serialized this time this program was run")) + } +} + +impl DynSimOnlyValueType { + pub const fn type_properties(self) -> TypeProperties { + TypeProperties { + is_passive: true, + is_storable: true, + is_castable_from_bits: false, + bit_width: 0, + sim_only_values_len: 1, + } + } +} + +impl Type for DynSimOnlyValueType { + type BaseType = DynSimOnlyValueType; + type MaskType = Bool; + type SimValue = DynSimOnlyValue; + + impl_match_variant_as_self!(); + + fn mask_type(&self) -> Self::MaskType { + Bool + } + + fn canonical(&self) -> CanonicalType { + CanonicalType::DynSimOnlyValueType(*self) + } + + fn from_canonical(canonical_type: CanonicalType) -> Self { + let CanonicalType::DynSimOnlyValueType(v) = canonical_type else { + panic!("expected DynSimOnlyValueType"); + }; + v + } + + fn source_location() -> SourceLocation { + SourceLocation::builtin() + } + + fn sim_value_from_opaque(&self, opaque: OpaqueSimValueSlice<'_>) -> Self::SimValue { + self.type_properties() + .assert_matching_opaque_sim_value(opaque); + opaque.sim_only_values()[0].clone() + } + + fn sim_value_clone_from_opaque( + &self, + value: &mut Self::SimValue, + opaque: OpaqueSimValueSlice<'_>, + ) { + self.type_properties() + .assert_matching_opaque_sim_value(opaque); + value.clone_from(&opaque.sim_only_values()[0]); + } + + fn sim_value_to_opaque<'w>( + &self, + value: &Self::SimValue, + writer: OpaqueSimValueWriter<'w>, + ) -> crate::ty::OpaqueSimValueWritten<'w> { + writer.fill_cloned_from_slice(OpaqueSimValueSlice::from_parts( + BitSlice::empty(), + std::array::from_ref(value), + )) + } +} + +impl Type for SimOnlyValueType { + type BaseType = DynSimOnlyValueType; + type MaskType = Bool; + type SimValue = SimOnlyValue; + + impl_match_variant_as_self!(); + + fn mask_type(&self) -> Self::MaskType { + Bool + } + + fn canonical(&self) -> CanonicalType { + DynSimOnlyValueType::from(*self).canonical() + } + + fn from_canonical(canonical_type: CanonicalType) -> Self { + DynSimOnlyValueType::from_canonical(canonical_type) + .downcast() + .expect("got wrong SimOnlyValueType") + } + + fn source_location() -> SourceLocation { + SourceLocation::builtin() + } + + fn sim_value_from_opaque(&self, opaque: OpaqueSimValueSlice<'_>) -> Self::SimValue { + DynSimOnlyValueType::from(*self) + .type_properties() + .assert_matching_opaque_sim_value(opaque); + SimOnlyValue::new( + opaque.sim_only_values()[0] + .downcast_ref::() + .expect("type mismatch") + .clone(), + ) + } + + fn sim_value_clone_from_opaque( + &self, + value: &mut Self::SimValue, + opaque: OpaqueSimValueSlice<'_>, + ) { + DynSimOnlyValueType::from(*self) + .type_properties() + .assert_matching_opaque_sim_value(opaque); + (**value).clone_from( + &opaque.sim_only_values()[0] + .downcast_ref::() + .expect("already checked type"), + ) + } + + fn sim_value_to_opaque<'w>( + &self, + value: &Self::SimValue, + writer: OpaqueSimValueWriter<'w>, + ) -> crate::ty::OpaqueSimValueWritten<'w> { + SimOnlyValue::with_dyn_ref(value, |value| { + writer.fill_cloned_from_slice(OpaqueSimValueSlice::from_parts( + BitSlice::empty(), + std::array::from_ref(value), + )) + }) + } +} + +impl fmt::Debug for SimOnlyValue { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + Self::with_dyn_ref(self, |this| fmt::Debug::fmt(this, f)) + } +} + +#[derive(Serialize, Deserialize)] +#[serde(rename = "SimOnlyValue")] +struct SerdeSimOnlyValue<'a> { + ty: DynSimOnlyValueType, + #[serde(borrow)] + value: Cow<'a, str>, +} + +impl Serialize for DynSimOnlyValue { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + SerdeSimOnlyValue { + ty: self.ty(), + value: Cow::Owned(self.serialize_to_json_string().map_err(S::Error::custom)?), + } + .serialize(serializer) + } +} + +impl<'de> Deserialize<'de> for DynSimOnlyValue { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let SerdeSimOnlyValue { ty, value } = Deserialize::deserialize(deserializer)?; + ty.deserialize_from_json_string(&value) + .map_err(D::Error::custom) + } +} + +impl ToSimValueWithType for DynSimOnlyValue { + #[track_caller] + fn to_sim_value_with_type(&self, ty: DynSimOnlyValueType) -> SimValue { + assert_eq!(self.ty(), ty, "mismatched type"); + SimValue::from_value(ty, self.clone()) + } + #[track_caller] + fn into_sim_value_with_type(self, ty: DynSimOnlyValueType) -> SimValue { + assert_eq!(self.ty(), ty, "mismatched type"); + SimValue::from_value(ty, self) + } +} + +impl ToSimValueWithType> for SimOnlyValue { + fn to_sim_value_with_type(&self, ty: SimOnlyValueType) -> SimValue> { + SimValue::from_value(ty, self.clone()) + } + fn into_sim_value_with_type(self, ty: SimOnlyValueType) -> SimValue> { + SimValue::from_value(ty, self) + } +} + +impl ToSimValue for DynSimOnlyValue { + type Type = DynSimOnlyValueType; + + fn to_sim_value(&self) -> SimValue { + SimValue::from_value(self.ty(), self.clone()) + } + + fn into_sim_value(self) -> SimValue { + SimValue::from_value(self.ty(), self) + } +} + +impl ToSimValue for SimOnlyValue { + type Type = SimOnlyValueType; + + fn to_sim_value(&self) -> SimValue { + SimValue::from_value(Default::default(), self.clone()) + } + + fn into_sim_value(self) -> SimValue { + SimValue::from_value(Default::default(), self) + } +} + +#[derive(Debug)] +pub struct DynSimOnlyValuesWriter<'a> { + types: &'static [DynSimOnlyValueType], + values: &'a mut Vec, + values_offset: usize, +} + +#[derive(Debug)] +pub struct DynSimOnlyValuesWritten<'a> { + _phantom: PhantomData<&'a ()>, +} + +impl<'a> DynSimOnlyValuesWriter<'a> { + pub fn rewrite_with( + types: Interned<[DynSimOnlyValueType]>, + values: &mut Vec, + f: F, + ) where + F: for<'b> FnOnce(DynSimOnlyValuesWriter<'b>) -> DynSimOnlyValuesWritten<'b>, // 'b is used as a brand + { + Self::rewrite_with_static(Interned::into_inner(types), values, f); + } + pub fn rewrite_with_static( + types: &'static [DynSimOnlyValueType], + values: &mut Vec, + f: F, + ) where + F: for<'b> FnOnce(DynSimOnlyValuesWriter<'b>) -> DynSimOnlyValuesWritten<'b>, // 'b is used as a brand + { + let DynSimOnlyValuesWritten { + _phantom: PhantomData, + } = f(DynSimOnlyValuesWriter::rewrite_helper(types, values)); + } + pub(crate) fn rewrite_helper( + types: &'static [DynSimOnlyValueType], + values: &'a mut Vec, + ) -> Self { + assert!(values.len() <= types.len(), "too many values"); + values.reserve_exact(types.len() - values.len()); + Self { + types, + values, + values_offset: 0, + } + } + pub fn types(&self) -> &'static [DynSimOnlyValueType] { + self.types + } + pub fn len(&self) -> usize { + self.types.len() + } + pub fn is_empty(&self) -> bool { + self.types.is_empty() + } + pub fn fill_cloned_from_slice(self, values: &[DynSimOnlyValue]) -> DynSimOnlyValuesWritten<'a> { + assert_matching_sim_only_values(self.types, values); + let (clone_from_src, clone_src) = + values.split_at((self.values.len() - self.values_offset).min(values.len())); + self.values[self.values_offset..][..clone_from_src.len()].clone_from_slice(clone_from_src); + self.values.extend_from_slice(clone_src); + DynSimOnlyValuesWritten { + _phantom: PhantomData, + } + } + pub fn fill_prefix_with(&mut self, prefix_len: usize, f: F) + where + F: for<'b> FnOnce(DynSimOnlyValuesWriter<'b>) -> DynSimOnlyValuesWritten<'b>, // 'b is used as a brand + { + let DynSimOnlyValuesWritten { + _phantom: PhantomData, + } = f(self.fill_prefix_setup(prefix_len)); + self.fill_prefix_finish(prefix_len); + } + /// the first part of [`fill_prefix_with`] + pub(crate) fn fill_prefix_setup<'b>( + &'b mut self, + prefix_len: usize, + ) -> DynSimOnlyValuesWriter<'b> { + let prefix_types = &self.types[..prefix_len]; + DynSimOnlyValuesWriter::<'b> { + types: prefix_types, + values: self.values, + values_offset: self.values_offset, + } + } + /// the last part of [`fill_prefix_with`] + pub(crate) fn fill_prefix_finish(&mut self, prefix_len: usize) { + assert!(self.values.len() >= self.values_offset + prefix_len); + self.values_offset += prefix_len; + self.types = &self.types[prefix_len..]; + } +} + +#[track_caller] +#[inline] +pub(crate) fn assert_matching_sim_values( + bit_width: usize, + sim_only_value_types: &[DynSimOnlyValueType], + bits: &BitSlice, + sim_only_values: &[DynSimOnlyValue], +) { + assert_matching_sim_only_values(sim_only_value_types, sim_only_values); + assert_eq!(bit_width, bits.len()); +} + +#[track_caller] +#[inline] +pub(crate) fn assert_matching_sim_only_values( + sim_only_value_types: &[DynSimOnlyValueType], + sim_only_values: &[DynSimOnlyValue], +) { + assert!( + sim_only_value_types + .iter() + .copied() + .eq(sim_only_values.iter().map(DynSimOnlyValue::ty)), + "sim-only values don't match expected types: {:?} != {:?}", + sim_only_value_types, + Vec::from_iter(sim_only_values.iter().map(DynSimOnlyValue::ty)), + ); +} diff --git a/crates/fayalite/src/sim/value/sim_only_value_unsafe.rs b/crates/fayalite/src/sim/value/sim_only_value_unsafe.rs new file mode 100644 index 0000000..ea2ba35 --- /dev/null +++ b/crates/fayalite/src/sim/value/sim_only_value_unsafe.rs @@ -0,0 +1,351 @@ +// SPDX-License-Identifier: LGPL-3.0-or-later +// See Notices.txt for copyright information + +//! `unsafe` parts of [`DynSimOnlyValue`] + +use serde::{Serialize, de::DeserializeOwned}; +use std::{ + alloc::{Layout, alloc, dealloc, handle_alloc_error}, + any::TypeId, + fmt, + hash::{Hash, Hasher}, + marker::PhantomData, + mem::ManuallyDrop, + ptr::{self, NonNull}, +}; + +struct SimOnlyValueVTable { + layout: Layout, + // TODO: replace with TypeId once TypeId::of is const-stable + type_id: fn() -> TypeId, + type_name: fn() -> &'static str, + drop_in_place: unsafe fn(this: NonNull<()>), + eq: unsafe fn(this: NonNull<()>, other: NonNull<()>) -> bool, + hash: unsafe fn(this: NonNull<()>, hasher: &mut dyn Hasher), + debug_fmt: unsafe fn(this: NonNull<()>, f: &mut fmt::Formatter<'_>) -> fmt::Result, + serialize_to_json_string: unsafe fn(this: NonNull<()>) -> serde_json::Result, + deserialize_into_uninit_from_json_string: + unsafe fn(this: NonNull<()>, json_str: &str) -> serde_json::Result<()>, + clone_into_uninit: unsafe fn(target: NonNull<()>, src: NonNull<()>), + clone_from: unsafe fn(this: NonNull<()>, src: NonNull<()>), +} + +pub trait SimOnlyValueTrait: + 'static + Eq + Hash + fmt::Debug + Serialize + DeserializeOwned + Clone +{ +} + +impl SimOnlyValueTrait + for T +{ +} + +unsafe trait GetSimOnlyValueVTable: SimOnlyValueTrait { + const VTABLE: &'static SimOnlyValueVTable; +} + +unsafe impl GetSimOnlyValueVTable for T { + const VTABLE: &'static SimOnlyValueVTable = &SimOnlyValueVTable { + layout: Layout::new::(), + type_id: TypeId::of::, + type_name: std::any::type_name::, + drop_in_place: |this| unsafe { + this.cast::().drop_in_place(); + }, + eq: |this, other| unsafe { this.cast::().as_ref() == other.cast::().as_ref() }, + hash: |this, mut hasher| unsafe { this.cast::().as_ref().hash(&mut hasher) }, + debug_fmt: |this, f| unsafe { fmt::Debug::fmt(this.cast::().as_ref(), f) }, + serialize_to_json_string: |this| unsafe { + serde_json::to_string(this.cast::().as_ref()) + }, + deserialize_into_uninit_from_json_string: |this, json_str| unsafe { + serde_json::from_str(json_str).map(|v| this.cast::().write(v)) + }, + clone_into_uninit: |target, src| unsafe { + target + .cast::() + .write(Clone::clone(src.cast::().as_ref())); + }, + clone_from: |this, src| unsafe { + Clone::clone_from(this.cast::().as_mut(), src.cast::().as_ref()); + }, + }; +} + +#[derive(Copy, Clone)] +pub struct DynSimOnlyValueType { + vtable: &'static SimOnlyValueVTable, +} + +struct DynSimOnlyValueUninit { + ty: DynSimOnlyValueType, + value: NonNull<()>, +} + +impl DynSimOnlyValueUninit { + fn new(ty: DynSimOnlyValueType) -> Self { + let layout = ty.vtable.layout; + let value = if layout.size() == 0 { + ptr::without_provenance_mut(layout.align()) + } else { + unsafe { alloc(layout).cast() } + }; + let Some(value) = NonNull::new(value) else { + handle_alloc_error(layout) + }; + Self { ty, value } + } + unsafe fn assume_init(self) -> DynSimOnlyValue { + let this = ManuallyDrop::new(self); + DynSimOnlyValue { + ty: this.ty, + value: this.value, + } + } +} + +impl Drop for DynSimOnlyValueUninit { + fn drop(&mut self) { + let layout = self.ty.vtable.layout; + if layout.size() != 0 { + unsafe { + dealloc(self.value.as_ptr().cast(), layout); + } + } + } +} + +impl DynSimOnlyValueType { + pub const fn of() -> Self { + Self { + vtable: ::VTABLE, + } + } + pub fn type_id(self) -> TypeId { + (self.vtable.type_id)() + } + pub fn type_name(self) -> &'static str { + (self.vtable.type_name)() + } + pub fn is(self) -> bool { + self.type_id() == TypeId::of::() + } + pub fn downcast(self) -> Option> { + self.is::().then_some(SimOnlyValueType::default()) + } + pub fn deserialize_from_json_string( + self, + json_str: &str, + ) -> serde_json::Result { + let retval = DynSimOnlyValueUninit::new(self); + unsafe { + (self.vtable.deserialize_into_uninit_from_json_string)(retval.value, json_str)?; + Ok(retval.assume_init()) + } + } +} + +impl PartialEq for DynSimOnlyValueType { + fn eq(&self, other: &Self) -> bool { + if ptr::eq(self.vtable, other.vtable) { + true + } else if self.vtable.layout != other.vtable.layout { + false + } else { + (self.vtable.type_id)() == (other.vtable.type_id)() + } + } +} + +impl Eq for DynSimOnlyValueType {} + +impl Hash for DynSimOnlyValueType { + fn hash(&self, state: &mut H) { + (self.vtable.type_id)().hash(state); + } +} + +impl fmt::Debug for DynSimOnlyValueType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "SimOnlyValueType<{}>", (self.vtable.type_name)()) + } +} + +impl From> for DynSimOnlyValueType { + fn from(value: SimOnlyValueType) -> Self { + let SimOnlyValueType(PhantomData) = value; + Self { + vtable: ::VTABLE, + } + } +} + +#[derive(Clone, Eq, PartialEq, Hash, Debug)] +pub struct SimOnlyValueType(PhantomData T>); + +impl Copy for SimOnlyValueType {} + +impl Default for SimOnlyValueType { + fn default() -> Self { + Self(PhantomData) + } +} + +#[derive(Clone, Eq, PartialEq, Hash, Default, PartialOrd, Ord)] +pub struct SimOnlyValue(Box); + +impl SimOnlyValue { + pub fn with_dyn_ref R, R>(&self, f: F) -> R { + let dyn_ref = ManuallyDrop::new(DynSimOnlyValue { + ty: SimOnlyValueType::::default().into(), + value: NonNull::::from_ref(&self.0).cast(), + }); + f(&dyn_ref) + } + pub fn from_box(v: Box) -> Self { + Self(v) + } + pub fn new(v: T) -> Self { + Self(Box::new(v)) + } + pub fn into_inner(this: Self) -> T { + *this.0 + } + pub fn into_inner_box(this: Self) -> Box { + this.0 + } + pub fn into_dyn(this: Self) -> DynSimOnlyValue { + DynSimOnlyValue::from(this) + } +} + +impl std::ops::Deref for SimOnlyValue { + type Target = T; + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl std::ops::DerefMut for SimOnlyValue { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +pub struct DynSimOnlyValue { + ty: DynSimOnlyValueType, + value: NonNull<()>, +} + +struct DebugDynSimOnlyValueInner<'a>(&'a DynSimOnlyValue); + +impl<'a> fmt::Debug for DebugDynSimOnlyValueInner<'a> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + unsafe { (self.0.ty.vtable.debug_fmt)(self.0.value, f) } + } +} + +impl fmt::Debug for DynSimOnlyValue { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "SimOnlyValue<{}>", self.ty.type_name())?; + f.debug_tuple("") + .field(&DebugDynSimOnlyValueInner(self)) + .finish() + } +} + +impl PartialEq for DynSimOnlyValue { + fn eq(&self, other: &Self) -> bool { + self.ty == other.ty && unsafe { (self.ty.vtable.eq)(self.value, other.value) } + } +} + +impl Eq for DynSimOnlyValue {} + +impl Hash for DynSimOnlyValue { + fn hash(&self, state: &mut H) { + self.ty.hash(state); + unsafe { (self.ty.vtable.hash)(self.value, state) }; + } +} + +impl Clone for DynSimOnlyValue { + fn clone(&self) -> Self { + let retval = DynSimOnlyValueUninit::new(self.ty); + unsafe { + (self.ty.vtable.clone_into_uninit)(retval.value, self.value); + retval.assume_init() + } + } + fn clone_from(&mut self, source: &Self) { + if self.ty == source.ty { + unsafe { (self.ty.vtable.clone_from)(self.value, source.value) }; + } else { + *self = source.clone(); + } + } +} + +impl Drop for DynSimOnlyValue { + fn drop(&mut self) { + unsafe { + ptr::read(self).drop_in_place_and_keep_alloc(); + } + } +} + +impl From> for DynSimOnlyValue { + fn from(value: SimOnlyValue) -> Self { + unsafe { + Self { + ty: SimOnlyValueType::::default().into(), + value: NonNull::new_unchecked(Box::into_raw(value.0)).cast::<()>(), + } + } + } +} + +impl DynSimOnlyValue { + pub fn ty(&self) -> DynSimOnlyValueType { + self.ty + } + pub fn type_id(&self) -> TypeId { + self.ty.type_id() + } + pub fn is(&self) -> bool { + self.ty.is::() + } + pub fn downcast(self) -> Result, DynSimOnlyValue> { + let Some(_) = self.ty.downcast::() else { + return Err(self); + }; + Ok(SimOnlyValue(unsafe { + Box::from_raw(ManuallyDrop::new(self).value.as_ptr().cast::()) + })) + } + pub fn downcast_ref(&self) -> Option<&T> { + self.ty + .downcast::() + .map(|_| unsafe { &*self.value.as_ptr().cast::() }) + } + pub fn downcast_mut(&mut self) -> Option<&mut T> { + self.ty + .downcast::() + .map(|_| unsafe { &mut *self.value.as_ptr().cast::() }) + } + pub fn serialize_to_json_string(&self) -> serde_json::Result { + unsafe { (self.ty.vtable.serialize_to_json_string)(self.value) } + } + fn forget_and_keep_alloc(self) -> DynSimOnlyValueUninit { + let this = ManuallyDrop::new(self); + DynSimOnlyValueUninit { + ty: this.ty, + value: this.value, + } + } + fn drop_in_place_and_keep_alloc(self) -> DynSimOnlyValueUninit { + let retval = self.forget_and_keep_alloc(); + unsafe { (retval.ty.vtable.drop_in_place)(retval.value) }; + retval + } +} diff --git a/crates/fayalite/src/ty.rs b/crates/fayalite/src/ty.rs index 787869d..02df46a 100644 --- a/crates/fayalite/src/ty.rs +++ b/crates/fayalite/src/ty.rs @@ -11,13 +11,16 @@ use crate::{ intern::{Intern, Interned}, phantom_const::PhantomConst, reset::{AsyncReset, Reset, SyncReset}, - sim::value::{SimValue, ToSimValueWithType}, + sim::value::{ + DynSimOnlyValue, DynSimOnlyValueType, SimValue, ToSimValueWithType, + assert_matching_sim_values, + }, source_location::SourceLocation, util::ConstUsize, }; use bitvec::slice::BitSlice; use serde::{Deserialize, Deserializer, Serialize, Serializer, de::DeserializeOwned}; -use std::{fmt, hash::Hash, iter::FusedIterator, ops::Index, sync::Arc}; +use std::{fmt, hash::Hash, iter::FusedIterator, marker::PhantomData, mem, ops::Index, sync::Arc}; pub(crate) mod serde_impls; @@ -28,6 +31,20 @@ pub struct TypeProperties { pub is_storable: bool, pub is_castable_from_bits: bool, pub bit_width: usize, + pub sim_only_values_len: usize, +} + +impl TypeProperties { + #[track_caller] + pub(crate) fn assert_matching_opaque_sim_value(self, opaque: OpaqueSimValueSlice<'_>) { + assert_eq!(self.bit_width, opaque.bit_width()); + assert_eq!(self.sim_only_values_len, opaque.sim_only_values().len()); + } + #[track_caller] + pub(crate) fn assert_matching_opaque_sim_value_writer(self, writer: &OpaqueSimValueWriter<'_>) { + assert_eq!(self.bit_width, writer.bit_width()); + assert_eq!(self.sim_only_values_len, writer.sim_only_values_len()); + } } #[derive(Copy, Clone, Hash, PartialEq, Eq)] @@ -43,6 +60,7 @@ pub enum CanonicalType { Reset(Reset), Clock(Clock), PhantomConst(PhantomConst), + DynSimOnlyValueType(DynSimOnlyValueType), } impl fmt::Debug for CanonicalType { @@ -59,6 +77,7 @@ impl fmt::Debug for CanonicalType { Self::Reset(v) => v.fmt(f), Self::Clock(v) => v.fmt(f), Self::PhantomConst(v) => v.fmt(f), + Self::DynSimOnlyValueType(v) => v.fmt(f), } } } @@ -95,6 +114,7 @@ impl CanonicalType { CanonicalType::Reset(v) => v.type_properties(), CanonicalType::Clock(v) => v.type_properties(), CanonicalType::PhantomConst(v) => v.type_properties(), + CanonicalType::DynSimOnlyValueType(v) => v.type_properties(), } } pub fn is_passive(self) -> bool { @@ -177,6 +197,12 @@ impl CanonicalType { }; lhs.can_connect(rhs) } + CanonicalType::DynSimOnlyValueType(lhs) => { + let CanonicalType::DynSimOnlyValueType(rhs) = rhs else { + return false; + }; + lhs.can_connect(rhs) + } } } pub(crate) fn as_serde_unexpected_str(self) -> &'static str { @@ -287,6 +313,7 @@ impl_base_type!(SyncReset); impl_base_type!(Reset); impl_base_type!(Clock); impl_base_type!(PhantomConst); +impl_base_type!(DynSimOnlyValueType); impl_base_type_serde!(Bool, "a Bool"); impl_base_type_serde!(Enum, "an Enum"); @@ -348,9 +375,17 @@ pub trait Type: fn canonical(&self) -> CanonicalType; fn from_canonical(canonical_type: CanonicalType) -> Self; fn source_location() -> SourceLocation; - fn sim_value_from_bits(&self, bits: &BitSlice) -> Self::SimValue; - fn sim_value_clone_from_bits(&self, value: &mut Self::SimValue, bits: &BitSlice); - fn sim_value_to_bits(&self, value: &Self::SimValue, bits: &mut BitSlice); + fn sim_value_from_opaque(&self, opaque: OpaqueSimValueSlice<'_>) -> Self::SimValue; + fn sim_value_clone_from_opaque( + &self, + value: &mut Self::SimValue, + opaque: OpaqueSimValueSlice<'_>, + ); + fn sim_value_to_opaque<'w>( + &self, + value: &Self::SimValue, + writer: OpaqueSimValueWriter<'w>, + ) -> OpaqueSimValueWritten<'w>; } pub trait BaseType: @@ -405,6 +440,7 @@ impl Type for CanonicalType { CanonicalType::Reset(v) => v.mask_type().canonical(), CanonicalType::Clock(v) => v.mask_type().canonical(), CanonicalType::PhantomConst(v) => v.mask_type().canonical(), + CanonicalType::DynSimOnlyValueType(v) => v.mask_type().canonical(), } } fn canonical(&self) -> CanonicalType { @@ -416,28 +452,46 @@ impl Type for CanonicalType { fn source_location() -> SourceLocation { SourceLocation::builtin() } - fn sim_value_from_bits(&self, bits: &BitSlice) -> Self::SimValue { - assert_eq!(bits.len(), self.bit_width()); - OpaqueSimValue::from_bitslice(bits) + fn sim_value_from_opaque(&self, opaque: OpaqueSimValueSlice<'_>) -> Self::SimValue { + self.type_properties() + .assert_matching_opaque_sim_value(opaque); + opaque.to_owned() } - fn sim_value_clone_from_bits(&self, value: &mut Self::SimValue, bits: &BitSlice) { - assert_eq!(bits.len(), self.bit_width()); - assert_eq!(value.bit_width(), self.bit_width()); - value.bits_mut().bits_mut().copy_from_bitslice(bits); + fn sim_value_clone_from_opaque( + &self, + value: &mut Self::SimValue, + opaque: OpaqueSimValueSlice<'_>, + ) { + self.type_properties() + .assert_matching_opaque_sim_value(opaque); + value.clone_from_slice(opaque); } - fn sim_value_to_bits(&self, value: &Self::SimValue, bits: &mut BitSlice) { - assert_eq!(bits.len(), self.bit_width()); - assert_eq!(value.bit_width(), self.bit_width()); - bits.copy_from_bitslice(value.bits().bits()); + fn sim_value_to_opaque<'w>( + &self, + value: &Self::SimValue, + writer: OpaqueSimValueWriter<'w>, + ) -> OpaqueSimValueWritten<'w> { + self.type_properties() + .assert_matching_opaque_sim_value_writer(&writer); + writer.fill_cloned_from_slice(value.as_slice()) } } #[derive(Clone, PartialEq, Eq, Hash, Debug, Serialize, Deserialize)] pub struct OpaqueSimValue { bits: UIntValue, + #[serde(skip_serializing_if = "Vec::is_empty", default)] + sim_only_values: Vec, } impl OpaqueSimValue { + pub fn is_empty(&self) -> bool { + let Self { + bits, + sim_only_values, + } = self; + bits.bits().is_empty() && sim_only_values.is_empty() + } pub fn bit_width(&self) -> usize { self.bits.width() } @@ -451,13 +505,86 @@ impl OpaqueSimValue { self.bits } pub fn from_bits(bits: UIntValue) -> Self { - Self { bits } + Self { + bits, + sim_only_values: Vec::new(), + } } pub fn from_bitslice(v: &BitSlice) -> Self { + Self::from_bitslice_and_sim_only_values(v, Vec::new()) + } + pub fn from_bitslice_and_sim_only_values( + bits: &BitSlice, + sim_only_values: Vec, + ) -> Self { Self { - bits: UIntValue::new(Arc::new(v.to_bitvec())), + bits: UIntValue::new(Arc::new(bits.to_bitvec())), + sim_only_values, } } + pub fn from_bits_and_sim_only_values( + bits: UIntValue, + sim_only_values: Vec, + ) -> Self { + Self { + bits, + sim_only_values, + } + } + pub fn into_parts(self) -> (UIntValue, Vec) { + let Self { + bits, + sim_only_values, + } = self; + (bits, sim_only_values) + } + pub fn parts_mut(&mut self) -> (&mut UIntValue, &mut Vec) { + let Self { + bits, + sim_only_values, + } = self; + (bits, sim_only_values) + } + pub fn sim_only_values(&self) -> &[DynSimOnlyValue] { + &self.sim_only_values + } + pub fn sim_only_values_mut(&mut self) -> &mut Vec { + &mut self.sim_only_values + } + pub fn as_slice(&self) -> OpaqueSimValueSlice<'_> { + OpaqueSimValueSlice { + bits: self.bits.bits(), + sim_only_values: &self.sim_only_values, + } + } + pub fn slice( + &self, + bits_range: BitsRange, + sim_only_values_range: SOVRange, + ) -> OpaqueSimValueSlice<'_> + where + BitSlice: Index, + [DynSimOnlyValue]: Index, + { + self.as_slice().slice(bits_range, sim_only_values_range) + } + pub fn rewrite_with(&mut self, types: Interned<[DynSimOnlyValueType]>, f: F) + where + F: for<'b> FnOnce(OpaqueSimValueWriter<'b>) -> OpaqueSimValueWritten<'b>, // 'b is used as a brand + { + OpaqueSimValueWriter::rewrite_with(types, self, f); + } + pub fn rewrite_with_static(&mut self, types: &'static [DynSimOnlyValueType], f: F) + where + F: for<'b> FnOnce(OpaqueSimValueWriter<'b>) -> OpaqueSimValueWritten<'b>, // 'b is used as a brand + { + OpaqueSimValueWriter::rewrite_with_static(types, self, f); + } + pub fn clone_from_slice(&mut self, slice: OpaqueSimValueSlice<'_>) { + self.bits.bits_mut().clone_from_bitslice(slice.bits()); + self.sim_only_values + .clone_from_slice(slice.sim_only_values()); + } } impl> ToSimValueWithType for OpaqueSimValue { @@ -469,6 +596,163 @@ impl> ToSimValueWithType for OpaqueSimValu } } +#[derive(Copy, Clone, Debug)] +pub struct OpaqueSimValueSlice<'a> { + bits: &'a BitSlice, + sim_only_values: &'a [DynSimOnlyValue], +} + +impl<'a> Default for OpaqueSimValueSlice<'a> { + fn default() -> Self { + Self::empty() + } +} + +impl<'a> OpaqueSimValueSlice<'a> { + pub fn from_parts(bits: &'a BitSlice, sim_only_values: &'a [DynSimOnlyValue]) -> Self { + Self { + bits, + sim_only_values, + } + } + pub fn empty() -> Self { + Self { + bits: BitSlice::empty(), + sim_only_values: &[], + } + } + pub fn is_empty(self) -> bool { + let Self { + bits, + sim_only_values, + } = self; + bits.is_empty() && sim_only_values.is_empty() + } + pub fn bit_width(self) -> usize { + self.bits.len() + } + pub fn bits(self) -> &'a BitSlice { + self.bits + } + pub fn sim_only_values(self) -> &'a [DynSimOnlyValue] { + self.sim_only_values + } + pub fn to_owned(self) -> OpaqueSimValue { + OpaqueSimValue::from_bitslice_and_sim_only_values(self.bits, self.sim_only_values.to_vec()) + } + pub fn slice( + self, + bits_range: BitsRange, + sim_only_values_range: SOVRange, + ) -> OpaqueSimValueSlice<'a> + where + BitSlice: Index, + [DynSimOnlyValue]: Index, + { + Self { + bits: &self.bits[bits_range], + sim_only_values: &self.sim_only_values[sim_only_values_range], + } + } +} + +#[derive(Debug)] +pub struct OpaqueSimValueWriter<'a> { + bits: &'a mut BitSlice, + sim_only_values: &'a mut Vec, + sim_only_values_range: std::ops::Range, +} + +#[derive(Debug)] +pub struct OpaqueSimValueWritten<'a> { + _phantom: PhantomData<&'a ()>, +} + +impl<'a> OpaqueSimValueWriter<'a> { + pub fn rewrite_with(sim_only_values_len: usize, value: &mut OpaqueSimValue, f: F) + where + F: for<'b> FnOnce(OpaqueSimValueWriter<'b>) -> OpaqueSimValueWritten<'b>, // 'b is used as a brand + { + let OpaqueSimValueWritten { + _phantom: PhantomData, + } = f(OpaqueSimValueWriter::rewrite_helper( + sim_only_values_len, + value, + )); + } + pub(crate) fn rewrite_helper( + sim_only_values_len: usize, + value: &'a mut OpaqueSimValue, + ) -> Self { + let (bits_mut, sim_only_values) = value.parts_mut(); + sim_only_values.truncate(sim_only_values_len); + sim_only_values.reserve_exact(sim_only_values_len - sim_only_values.len()); + Self { + bits: bits_mut.bits_mut(), + sim_only_values, + sim_only_values_range: 0..sim_only_values_len, + } + } + pub fn bit_width(&self) -> usize { + self.bits.len() + } + pub fn sim_only_values_len(&self) -> usize { + self.sim_only_values_range.len() + } + pub fn is_empty(&self) -> bool { + let Self { + bits, + sim_only_values: _, + sim_only_values_range, + } = self; + bits.is_empty() && sim_only_values_range.is_empty() + } + pub fn fill_cloned_from_slice( + self, + slice: OpaqueSimValueSlice<'_>, + ) -> OpaqueSimValueWritten<'a> { + let Self { + bits, + sim_only_values, + sim_only_values_range, + } = self; + assert_eq!(slice.bit_width(), bits.len()); + assert_eq!(slice.sim_only_values().len(), sim_only_values_range.len()); + bits.copy_from_bitslice(slice.bits); + let (clone_from_src, clone_src) = slice.sim_only_values.split_at( + (sim_only_values.len() - sim_only_values_range.start).min(slice.sim_only_values.len()), + ); + sim_only_values[sim_only_values_range][..clone_from_src.len()] + .clone_from_slice(clone_from_src); + sim_only_values.extend_from_slice(clone_src); + OpaqueSimValueWritten { + _phantom: PhantomData, + } + } + pub fn fill_prefix_with( + &mut self, + prefix_bit_width: usize, + prefix_sim_only_values_len: usize, + f: F, + ) where + F: for<'b> FnOnce(OpaqueSimValueWriter<'b>) -> OpaqueSimValueWritten<'b>, // 'b is used as a brand + { + assert!(prefix_bit_width <= self.bit_width()); + assert!(prefix_sim_only_values_len <= self.sim_only_values_len()); + let next_start = self.sim_only_values_range.start + prefix_sim_only_values_len; + let OpaqueSimValueWritten { + _phantom: PhantomData, + } = f(OpaqueSimValueWriter { + bits: &mut self.bits[..prefix_bit_width], + sim_only_values: self.sim_only_values, + sim_only_values_range: self.sim_only_values_range.start..next_start, + }); + assert!(self.sim_only_values.len() >= next_start); + self.bits = &mut mem::take(&mut self.bits)[prefix_bit_width..]; + self.sim_only_values_range.start = next_start; + } +} + pub trait StaticType: Type + Default { const TYPE: Self; const MASK_TYPE: Self::MaskType; diff --git a/crates/fayalite/src/util.rs b/crates/fayalite/src/util.rs index 4670a1f..e550464 100644 --- a/crates/fayalite/src/util.rs +++ b/crates/fayalite/src/util.rs @@ -9,6 +9,7 @@ mod misc; mod scoped_ref; pub(crate) mod streaming_read_utf8; mod test_hasher; +mod type_id_serde; // allow easily switching the hasher crate-wide for testing #[cfg(feature = "unstable-test-hasher")] @@ -39,6 +40,9 @@ pub use misc::{ iter_eq_by, }; +#[doc(inline)] +pub use type_id_serde::TypeIdSerde; + pub mod job_server; pub mod prefix_sum; pub mod ready_valid; diff --git a/crates/fayalite/src/util/type_id_serde.rs b/crates/fayalite/src/util/type_id_serde.rs new file mode 100644 index 0000000..35408fb --- /dev/null +++ b/crates/fayalite/src/util/type_id_serde.rs @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: LGPL-3.0-or-later +// See Notices.txt for copyright information + +use hashbrown::{HashMap, hash_map::Entry}; +use serde::{Deserialize, Deserializer, Serialize, Serializer, de::Error}; +use std::{ + any::TypeId, + fmt::Write, + hash::{BuildHasher, Hash, Hasher, RandomState}, + sync::Mutex, +}; + +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] +/// [`TypeId`] that implements [`Serialize`] and [`Deserialize`] for use only within a single running instance of a +/// single program. If you try to transfer the serialized value from one program to another, it should fail to +/// deserialize with extremely high probability since we use 128-bit random numbers from [`RandomState`] (though +/// they may be deterministic on some targets). Note that those 128-bit random numbers are *not* the same number as is +/// stored in the inner [`TypeId`]. +pub struct TypeIdSerde(pub TypeId); + +#[derive(Copy, Clone, Eq, PartialEq, Hash, Debug, Serialize, Deserialize)] +#[serde(rename = "TypeIdSerde")] +struct TypeIdSerdeId([u32; 4]); + +#[derive(Default)] +struct TypeIdSerdeIdMapInner { + serde_id_to_type_id: HashMap, + serde_id_random_state: RandomState, + buffer: String, +} + +impl TypeIdSerdeIdMapInner { + #[cold] + fn new_serde_id(&mut self, type_id: TypeId) -> TypeIdSerdeId { + let mut try_number = 0u64; + let mut hasher = self.serde_id_random_state.build_hasher(); + // extract more bits of randomness from TypeId -- its Hash impl only hashes 64-bits + self.buffer.clear(); + write!(self.buffer, "{type_id:?}").expect("shouldn't ever fail"); + self.buffer.hash(&mut hasher); + loop { + let mut hasher = hasher.clone(); + try_number.hash(&mut hasher); + try_number += 1; + let retval = TypeIdSerdeId(std::array::from_fn(|i| { + let mut hasher = hasher.clone(); + i.hash(&mut hasher); + hasher.finish() as u32 + })); + match self.serde_id_to_type_id.entry(retval) { + Entry::Occupied(_) => continue, + Entry::Vacant(e) => { + e.insert(type_id); + return retval; + } + } + } + } +} + +#[derive(Default)] +struct TypeIdSerdeIdMap { + type_id_to_serde_id: HashMap, + inner: TypeIdSerdeIdMapInner, +} + +static TYPE_ID_SERDE_ID_MAP: Mutex> = Mutex::new(None); + +impl Serialize for TypeIdSerde { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + let mut type_id_serde_id_map = TYPE_ID_SERDE_ID_MAP.lock().unwrap(); + let map = type_id_serde_id_map.get_or_insert_default(); + let serde_id = match map.type_id_to_serde_id.entry(self.0) { + Entry::Occupied(e) => *e.get(), + Entry::Vacant(e) => *e.insert(map.inner.new_serde_id(self.0)), + }; + drop(type_id_serde_id_map); + serde_id.serialize(serializer) + } +} + +impl<'de> Deserialize<'de> for TypeIdSerde { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let serde_id = TypeIdSerdeId::deserialize(deserializer)?; + let mut type_id_serde_id_map = TYPE_ID_SERDE_ID_MAP.lock().unwrap(); + Ok(TypeIdSerde(*type_id_serde_id_map + .as_mut() + .and_then(|map| map.inner.serde_id_to_type_id.get(&serde_id)) + .ok_or_else(|| { + D::Error::custom( + "TypeIdSerde value from a different invocation of this program which is not allowed", + ) + })?)) + } +}