From e7e831cf00c17a6a840c5aab5c54b1d62c5a4ee1 Mon Sep 17 00:00:00 2001 From: Jacob Lifshay Date: Tue, 26 Aug 2025 19:17:21 -0700 Subject: [PATCH 1/3] split out simulator compiler into a separate module --- crates/fayalite/src/sim.rs | 5075 +------------------------- crates/fayalite/src/sim/compiler.rs | 5087 +++++++++++++++++++++++++++ 2 files changed, 5102 insertions(+), 5060 deletions(-) create mode 100644 crates/fayalite/src/sim/compiler.rs diff --git a/crates/fayalite/src/sim.rs b/crates/fayalite/src/sim.rs index d0daf34..596e323 100644 --- a/crates/fayalite/src/sim.rs +++ b/crates/fayalite/src/sim.rs @@ -5,64 +5,41 @@ use crate::{ bundle::{BundleField, BundleType}, - enum_::{EnumType, EnumVariant}, expr::{ - ExprEnum, Flow, ToLiteralBits, ops, + Flow, ToLiteralBits, target::{ - GetTarget, Target, TargetBase, TargetPathArrayElement, TargetPathBundleField, - TargetPathElement, + GetTarget, Target, TargetPathArrayElement, TargetPathBundleField, TargetPathElement, }, }, int::{BoolOrIntType, UIntValue}, - intern::{ - Intern, Interned, InternedCompare, Memoize, PtrEqWithTypeId, SupportsPtrEqWithTypeId, - }, - memory::PortKind, - module::{ - AnnotatedModuleIO, Block, ExternModuleBody, Id, InstantiatedModule, ModuleBody, NameId, - NormalModuleBody, ScopedNameId, Stmt, StmtConnect, StmtDeclaration, StmtFormal, StmtIf, - StmtInstance, StmtMatch, StmtReg, StmtWire, TargetInInstantiatedModule, - transform::deduce_resets::deduce_resets, - }, + intern::{Intern, Interned, InternedCompare, PtrEqWithTypeId, SupportsPtrEqWithTypeId}, prelude::*, - reset::{ResetType, ResetTypeDispatch}, + reset::ResetType, sim::{ + compiler::{ + CompiledBundleField, CompiledExternModule, CompiledTypeLayoutBody, CompiledValue, + }, interpreter::{ - BreakAction, BreakpointsSet, Insn, InsnField, InsnFieldKind, InsnFieldType, - InsnOrLabel, Insns, InsnsBuilding, InsnsBuildingDone, InsnsBuildingKind, Label, - MemoryData, RunResult, SlotDebugData, SmallUInt, State, StatePartArrayIndex, - StatePartArrayIndexed, StatePartIndex, StatePartIndexRange, StatePartKind, - StatePartKindBigSlots, StatePartKindMemories, StatePartKindSmallSlots, StatePartLayout, - StatePartLen, StatePartsValue, TypeArrayIndex, TypeArrayIndexes, TypeIndex, - TypeIndexRange, TypeLayout, TypeLen, TypeParts, + BreakAction, BreakpointsSet, RunResult, SmallUInt, State, StatePartIndex, + StatePartKindBigSlots, StatePartKindMemories, StatePartKindSmallSlots, TypeIndexRange, + TypeLen, }, time::{SimDuration, SimInstant}, value::SimValue, }, - ty::StaticType, util::{BitSliceWriteWithBase, DebugAsDisplay, HashMap, HashSet}, }; use bitvec::{bits, order::Lsb0, slice::BitSlice, vec::BitVec, view::BitView}; use num_bigint::BigInt; use num_traits::{Signed, Zero}; -use petgraph::{ - data::FromElements, - visit::{ - EdgeRef, GraphBase, IntoEdgeReferences, IntoNeighbors, IntoNeighborsDirected, - IntoNodeIdentifiers, IntoNodeReferences, NodeRef, VisitMap, Visitable, - }, -}; use std::{ any::Any, borrow::Cow, cell::RefCell, - collections::BTreeSet, fmt, future::{Future, IntoFuture}, hash::Hash, - marker::PhantomData, mem, - ops::IndexMut, pin::Pin, ptr, rc::Rc, @@ -70,5035 +47,13 @@ use std::{ task::Poll, }; +mod compiler; mod interpreter; pub mod time; pub mod value; pub mod vcd; -#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)] -enum CondBody { - IfTrue { - cond: CompiledValue, - }, - IfFalse { - cond: CompiledValue, - }, - MatchArm { - discriminant: StatePartIndex, - variant_index: usize, - }, -} - -#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)] -struct Cond { - body: CondBody, - source_location: SourceLocation, -} - -#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)] -struct CompiledBundleField { - offset: TypeIndex, - ty: CompiledTypeLayout, -} - -#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)] -enum CompiledTypeLayoutBody { - Scalar, - Array { - /// debug names are ignored, use parent's layout instead - element: Interned>, - }, - Bundle { - /// debug names are ignored, use parent's layout instead - fields: Interned<[CompiledBundleField]>, - }, -} - -#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)] -struct CompiledTypeLayout { - ty: T, - layout: TypeLayout, - body: CompiledTypeLayoutBody, -} - -impl CompiledTypeLayout { - fn with_prefixed_debug_names(self, prefix: &str) -> Self { - let Self { ty, layout, body } = self; - Self { - ty, - layout: layout.with_prefixed_debug_names(prefix), - body, - } - } - fn with_anonymized_debug_info(self) -> Self { - let Self { ty, layout, body } = self; - Self { - ty, - layout: layout.with_anonymized_debug_info(), - body, - } - } - fn get(ty: T) -> Self { - #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] - struct MyMemoize; - impl Memoize for MyMemoize { - type Input = CanonicalType; - type InputOwned = CanonicalType; - type Output = CompiledTypeLayout; - - fn inner(self, input: &Self::Input) -> Self::Output { - match input { - CanonicalType::UInt(_) - | CanonicalType::SInt(_) - | CanonicalType::Bool(_) - | CanonicalType::Enum(_) - | CanonicalType::AsyncReset(_) - | CanonicalType::SyncReset(_) - | CanonicalType::Reset(_) - | CanonicalType::Clock(_) => { - let mut layout = TypeLayout::empty(); - let debug_data = SlotDebugData { - name: Interned::default(), - ty: *input, - }; - layout.big_slots = StatePartLayout::scalar(debug_data, ()); - CompiledTypeLayout { - ty: *input, - layout: layout.into(), - body: CompiledTypeLayoutBody::Scalar, - } - } - CanonicalType::Array(array) => { - let mut layout = TypeLayout::empty(); - let element = CompiledTypeLayout::get(array.element()).intern_sized(); - for index in 0..array.len() { - layout.allocate( - &element - .layout - .with_prefixed_debug_names(&format!("[{index}]")), - ); - } - CompiledTypeLayout { - ty: *input, - layout: layout.into(), - body: CompiledTypeLayoutBody::Array { element }, - } - } - CanonicalType::PhantomConst(_) => { - let unit_layout = CompiledTypeLayout::get(()); - CompiledTypeLayout { - ty: *input, - layout: unit_layout.layout, - body: unit_layout.body, - } - } - CanonicalType::Bundle(bundle) => { - let mut layout = TypeLayout::empty(); - let fields = bundle - .fields() - .iter() - .map( - |BundleField { - name, - flipped: _, - ty, - }| { - let ty = CompiledTypeLayout::get(*ty); - let offset = layout - .allocate( - &ty.layout - .with_prefixed_debug_names(&format!(".{name}")), - ) - .start(); - CompiledBundleField { offset, ty } - }, - ) - .collect(); - CompiledTypeLayout { - ty: *input, - layout: layout.into(), - body: CompiledTypeLayoutBody::Bundle { fields }, - } - } - } - } - } - let CompiledTypeLayout { - ty: _, - layout, - body, - } = MyMemoize.get_owned(ty.canonical()); - Self { ty, layout, body } - } -} - -#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)] -struct CompiledValue { - layout: CompiledTypeLayout, - range: TypeIndexRange, - write: Option<(CompiledTypeLayout, TypeIndexRange)>, -} - -impl CompiledValue { - fn write(self) -> (CompiledTypeLayout, TypeIndexRange) { - self.write.unwrap_or((self.layout, self.range)) - } - fn write_value(self) -> Self { - let (layout, range) = self.write(); - Self { - layout, - range, - write: None, - } - } - fn map( - self, - mut f: impl FnMut( - CompiledTypeLayout, - TypeIndexRange, - ) -> (CompiledTypeLayout, TypeIndexRange), - ) -> CompiledValue { - let (layout, range) = f(self.layout, self.range); - CompiledValue { - layout, - range, - write: self.write.map(|(layout, range)| f(layout, range)), - } - } - fn map_ty(self, mut f: impl FnMut(T) -> U) -> CompiledValue { - self.map(|CompiledTypeLayout { ty, layout, body }, range| { - ( - CompiledTypeLayout { - ty: f(ty), - layout, - body, - }, - range, - ) - }) - } -} - -impl CompiledValue { - fn field_by_index(self, field_index: usize) -> CompiledValue { - self.map(|layout, range| { - let CompiledTypeLayout { - ty: _, - layout: _, - body: CompiledTypeLayoutBody::Bundle { fields }, - } = layout - else { - unreachable!(); - }; - ( - fields[field_index].ty, - range.slice(TypeIndexRange::new( - fields[field_index].offset, - fields[field_index].ty.layout.len(), - )), - ) - }) - } - fn field_by_name(self, name: Interned) -> CompiledValue { - self.field_by_index(self.layout.ty.name_indexes()[&name]) - } -} - -impl CompiledValue { - fn element(self, index: usize) -> CompiledValue { - self.map(|layout, range| { - let CompiledTypeLayoutBody::Array { element } = layout.body else { - unreachable!(); - }; - (*element, range.index_array(element.layout.len(), index)) - }) - } - fn element_dyn( - self, - index_slot: StatePartIndex, - ) -> CompiledExpr { - CompiledExpr::from(self).element_dyn(index_slot) - } -} - -#[derive(Debug, PartialEq, Eq, Hash, Copy, Clone)] -struct CompiledExpr { - static_part: CompiledValue, - indexes: TypeArrayIndexes, -} - -impl From> for CompiledExpr { - fn from(static_part: CompiledValue) -> Self { - Self { - static_part, - indexes: TypeArrayIndexes::default(), - } - } -} - -impl CompiledExpr { - fn map_ty(self, f: impl FnMut(T) -> U) -> CompiledExpr { - let Self { - static_part, - indexes, - } = self; - CompiledExpr { - static_part: static_part.map_ty(f), - indexes, - } - } - fn add_target_without_indexes_to_set(self, inputs: &mut SlotSet) { - let Self { - static_part, - indexes, - } = self; - indexes.as_ref().for_each_offset(|offset| { - inputs.extend([static_part.range.offset(offset)]); - }); - } - fn add_target_and_indexes_to_set(self, inputs: &mut SlotSet) { - let Self { - static_part: _, - indexes, - } = self; - self.add_target_without_indexes_to_set(inputs); - inputs.extend(indexes.as_ref().iter()); - } -} - -impl CompiledExpr { - fn field_by_index(self, field_index: usize) -> CompiledExpr { - CompiledExpr { - static_part: self.static_part.field_by_index(field_index), - indexes: self.indexes, - } - } - fn field_by_name(self, name: Interned) -> CompiledExpr { - CompiledExpr { - static_part: self.static_part.field_by_name(name), - indexes: self.indexes, - } - } -} - -impl CompiledExpr { - fn element(self, index: usize) -> CompiledExpr { - CompiledExpr { - static_part: self.static_part.element(index), - indexes: self.indexes, - } - } - fn element_dyn( - self, - index_slot: StatePartIndex, - ) -> CompiledExpr { - let CompiledTypeLayoutBody::Array { element } = self.static_part.layout.body else { - unreachable!(); - }; - let stride = element.layout.len(); - let indexes = self.indexes.join(TypeArrayIndex::from_parts( - index_slot, - self.static_part.layout.ty.len(), - stride, - )); - CompiledExpr { - static_part: self.static_part.map(|layout, range| { - let CompiledTypeLayoutBody::Array { element } = layout.body else { - unreachable!(); - }; - (*element, range.index_array(stride, 0)) - }), - indexes, - } - } -} - -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] -enum AssignmentOrSlotIndex { - AssignmentIndex(usize), - SmallSlot(StatePartIndex), - BigSlot(StatePartIndex), -} - -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] -enum AssignmentIO { - BigInput { - assignment_index: usize, - slot: StatePartIndex, - }, - SmallInput { - assignment_index: usize, - slot: StatePartIndex, - }, - BigOutput { - assignment_index: usize, - slot: StatePartIndex, - }, - SmallOutput { - assignment_index: usize, - slot: StatePartIndex, - }, -} - -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] -enum AssignmentsEdge { - IO(AssignmentIO), - AssignmentImmediatePredecessor { - predecessor_assignment_index: usize, - assignment_index: usize, - }, -} - -#[derive(Debug)] -enum Assignments { - Accumulating { - assignments: Vec, - }, - Finalized { - assignments: Box<[Assignment]>, - slots_layout: TypeLayout, - slot_readers: SlotToAssignmentIndexFullMap, - slot_writers: SlotToAssignmentIndexFullMap, - assignment_immediate_predecessors: Box<[Box<[usize]>]>, - assignment_immediate_successors: Box<[Box<[usize]>]>, - }, -} - -impl Default for Assignments { - fn default() -> Self { - Self::Accumulating { - assignments: Vec::new(), - } - } -} - -impl Assignments { - fn finalize(&mut self, slots_layout: TypeLayout) { - let Self::Accumulating { assignments } = self else { - unreachable!("already finalized"); - }; - let assignments = mem::take(assignments).into_boxed_slice(); - let mut slot_readers = SlotToAssignmentIndexFullMap::new(slots_layout.len()); - let mut slot_writers = SlotToAssignmentIndexFullMap::new(slots_layout.len()); - let mut assignment_immediate_predecessors = vec![BTreeSet::new(); assignments.len()]; - let mut assignment_immediate_successors = vec![BTreeSet::new(); assignments.len()]; - for (assignment_index, assignment) in assignments.iter().enumerate() { - slot_readers - .keys_for_assignment(assignment_index) - .extend([&assignment.inputs]); - slot_readers - .keys_for_assignment(assignment_index) - .extend(&assignment.conditions); - let SlotSet(TypeParts { - small_slots, - big_slots, - }) = &assignment.outputs; - for &slot in small_slots { - if let Some(&pred) = slot_writers[slot].last() { - assignment_immediate_predecessors[assignment_index].insert(pred); - assignment_immediate_successors[pred].insert(assignment_index); - } - slot_writers[slot].push(assignment_index); - } - for &slot in big_slots { - if let Some(&pred) = slot_writers[slot].last() { - assignment_immediate_predecessors[assignment_index].insert(pred); - assignment_immediate_successors[pred].insert(assignment_index); - } - slot_writers[slot].push(assignment_index); - } - } - *self = Self::Finalized { - assignments, - slots_layout, - slot_readers, - slot_writers, - assignment_immediate_predecessors: assignment_immediate_predecessors - .into_iter() - .map(Box::from_iter) - .collect(), - assignment_immediate_successors: assignment_immediate_successors - .into_iter() - .map(Box::from_iter) - .collect(), - }; - } - fn push(&mut self, v: Assignment) { - let Self::Accumulating { assignments } = self else { - unreachable!("already finalized"); - }; - assignments.push(v); - } - fn assignments(&self) -> &[Assignment] { - let Self::Finalized { assignments, .. } = self else { - unreachable!("Assignments::finalize should have been called"); - }; - assignments - } - fn slots_layout(&self) -> TypeLayout { - let Self::Finalized { slots_layout, .. } = self else { - unreachable!("Assignments::finalize should have been called"); - }; - *slots_layout - } - fn slot_readers(&self) -> &SlotToAssignmentIndexFullMap { - let Self::Finalized { slot_readers, .. } = self else { - unreachable!("Assignments::finalize should have been called"); - }; - slot_readers - } - fn slot_writers(&self) -> &SlotToAssignmentIndexFullMap { - let Self::Finalized { slot_writers, .. } = self else { - unreachable!("Assignments::finalize should have been called"); - }; - slot_writers - } - fn assignment_immediate_predecessors(&self) -> &[Box<[usize]>] { - let Self::Finalized { - assignment_immediate_predecessors, - .. - } = self - else { - unreachable!("Assignments::finalize should have been called"); - }; - assignment_immediate_predecessors - } - fn assignment_immediate_successors(&self) -> &[Box<[usize]>] { - let Self::Finalized { - assignment_immediate_successors, - .. - } = self - else { - unreachable!("Assignments::finalize should have been called"); - }; - assignment_immediate_successors - } - fn elements(&self) -> AssignmentsElements<'_> { - let SlotToAssignmentIndexFullMap(TypeParts { - small_slots, - big_slots, - }) = self.slot_readers(); - AssignmentsElements { - node_indexes: HashMap::with_capacity_and_hasher( - self.assignments().len() + small_slots.len() + big_slots.len(), - Default::default(), - ), - nodes: self.node_references(), - edges: self.edge_references(), - } - } -} - -impl GraphBase for Assignments { - type EdgeId = AssignmentsEdge; - type NodeId = AssignmentOrSlotIndex; -} - -#[derive(Debug, Clone, Copy)] -enum AssignmentsNodeRef<'a> { - Assignment { - index: usize, - assignment: &'a Assignment, - }, - SmallSlot(StatePartIndex, SlotDebugData), - BigSlot(StatePartIndex, SlotDebugData), -} - -impl<'a> NodeRef for AssignmentsNodeRef<'a> { - type NodeId = AssignmentOrSlotIndex; - type Weight = AssignmentsNodeRef<'a>; - - fn id(&self) -> Self::NodeId { - match *self { - AssignmentsNodeRef::Assignment { - index, - assignment: _, - } => AssignmentOrSlotIndex::AssignmentIndex(index), - AssignmentsNodeRef::SmallSlot(slot, _) => AssignmentOrSlotIndex::SmallSlot(slot), - AssignmentsNodeRef::BigSlot(slot, _) => AssignmentOrSlotIndex::BigSlot(slot), - } - } - - fn weight(&self) -> &Self::Weight { - self - } -} - -impl<'a> petgraph::visit::Data for &'a Assignments { - type NodeWeight = AssignmentsNodeRef<'a>; - type EdgeWeight = AssignmentsEdge; -} - -struct AssignmentsElements<'a> { - node_indexes: HashMap, - nodes: AssignmentsNodes<'a>, - edges: AssignmentsEdges<'a>, -} - -impl<'a> Iterator for AssignmentsElements<'a> { - type Item = petgraph::data::Element< - <&'a Assignments as petgraph::visit::Data>::NodeWeight, - <&'a Assignments as petgraph::visit::Data>::EdgeWeight, - >; - - fn next(&mut self) -> Option { - let Self { - node_indexes, - nodes, - edges, - } = self; - if let Some(node) = nodes.next() { - node_indexes.insert(node.id(), node_indexes.len()); - return Some(petgraph::data::Element::Node { weight: node }); - } - let edge = edges.next()?; - Some(petgraph::data::Element::Edge { - source: node_indexes[&edge.source()], - target: node_indexes[&edge.target()], - weight: *edge.weight(), - }) - } -} - -#[derive(Clone)] -struct AssignmentsNodeIdentifiers { - assignment_indexes: std::ops::Range, - small_slots: std::ops::Range, - big_slots: std::ops::Range, -} - -impl AssignmentsNodeIdentifiers { - fn internal_iter<'a>(&'a mut self) -> impl Iterator + 'a { - let Self { - assignment_indexes, - small_slots, - big_slots, - } = self; - assignment_indexes - .map(AssignmentOrSlotIndex::AssignmentIndex) - .chain(small_slots.map(|value| { - AssignmentOrSlotIndex::SmallSlot(StatePartIndex { - value, - _phantom: PhantomData, - }) - })) - .chain(big_slots.map(|value| { - AssignmentOrSlotIndex::BigSlot(StatePartIndex { - value, - _phantom: PhantomData, - }) - })) - } -} - -impl Iterator for AssignmentsNodeIdentifiers { - type Item = AssignmentOrSlotIndex; - fn next(&mut self) -> Option { - self.internal_iter().next() - } - - fn nth(&mut self, n: usize) -> Option { - self.internal_iter().nth(n) - } -} - -impl<'a> IntoNodeIdentifiers for &'a Assignments { - type NodeIdentifiers = AssignmentsNodeIdentifiers; - - fn node_identifiers(self) -> Self::NodeIdentifiers { - let TypeLen { - small_slots, - big_slots, - } = self.slot_readers().len(); - AssignmentsNodeIdentifiers { - assignment_indexes: 0..self.assignments().len(), - small_slots: 0..small_slots.value, - big_slots: 0..big_slots.value, - } - } -} - -struct AssignmentsNodes<'a> { - assignments: &'a Assignments, - nodes: AssignmentsNodeIdentifiers, -} - -impl<'a> Iterator for AssignmentsNodes<'a> { - type Item = AssignmentsNodeRef<'a>; - - fn next(&mut self) -> Option { - self.nodes.next().map(|node| match node { - AssignmentOrSlotIndex::AssignmentIndex(index) => AssignmentsNodeRef::Assignment { - index, - assignment: &self.assignments.assignments()[index], - }, - AssignmentOrSlotIndex::SmallSlot(slot) => AssignmentsNodeRef::SmallSlot( - slot, - *self.assignments.slots_layout().small_slots.debug_data(slot), - ), - AssignmentOrSlotIndex::BigSlot(slot) => AssignmentsNodeRef::BigSlot( - slot, - *self.assignments.slots_layout().big_slots.debug_data(slot), - ), - }) - } -} - -impl<'a> IntoNodeReferences for &'a Assignments { - type NodeRef = AssignmentsNodeRef<'a>; - type NodeReferences = AssignmentsNodes<'a>; - - fn node_references(self) -> Self::NodeReferences { - AssignmentsNodes { - assignments: self, - nodes: self.node_identifiers(), - } - } -} - -struct AssignmentsNeighborsDirected<'a> { - assignment_indexes: std::slice::Iter<'a, usize>, - small_slots: std::collections::btree_set::Iter<'a, StatePartIndex>, - big_slots: std::collections::btree_set::Iter<'a, StatePartIndex>, -} - -impl Iterator for AssignmentsNeighborsDirected<'_> { - type Item = AssignmentOrSlotIndex; - fn next(&mut self) -> Option { - let Self { - assignment_indexes, - small_slots, - big_slots, - } = self; - if let retval @ Some(_) = assignment_indexes - .next() - .copied() - .map(AssignmentOrSlotIndex::AssignmentIndex) - { - retval - } else if let retval @ Some(_) = small_slots - .next() - .copied() - .map(AssignmentOrSlotIndex::SmallSlot) - { - retval - } else if let retval @ Some(_) = big_slots - .next() - .copied() - .map(AssignmentOrSlotIndex::BigSlot) - { - retval - } else { - None - } - } -} - -impl<'a> IntoNeighbors for &'a Assignments { - type Neighbors = AssignmentsNeighborsDirected<'a>; - - fn neighbors(self, n: Self::NodeId) -> Self::Neighbors { - self.neighbors_directed(n, petgraph::Direction::Outgoing) - } -} - -impl<'a> IntoNeighborsDirected for &'a Assignments { - type NeighborsDirected = AssignmentsNeighborsDirected<'a>; - - fn neighbors_directed( - self, - n: Self::NodeId, - d: petgraph::Direction, - ) -> Self::NeighborsDirected { - use petgraph::Direction::*; - let slot_map = match d { - Outgoing => self.slot_readers(), - Incoming => self.slot_writers(), - }; - match n { - AssignmentOrSlotIndex::AssignmentIndex(assignment_index) => { - let assignment = &self.assignments()[assignment_index]; - let ( - assignment_indexes, - SlotSet(TypeParts { - small_slots, - big_slots, - }), - ) = match d { - Outgoing => ( - &self.assignment_immediate_successors()[assignment_index], - &assignment.outputs, - ), - Incoming => ( - &self.assignment_immediate_predecessors()[assignment_index], - &assignment.inputs, - ), - }; - AssignmentsNeighborsDirected { - assignment_indexes: assignment_indexes.iter(), - small_slots: small_slots.iter(), - big_slots: big_slots.iter(), - } - } - AssignmentOrSlotIndex::SmallSlot(slot) => AssignmentsNeighborsDirected { - assignment_indexes: slot_map[slot].iter(), - small_slots: Default::default(), - big_slots: Default::default(), - }, - AssignmentOrSlotIndex::BigSlot(slot) => AssignmentsNeighborsDirected { - assignment_indexes: slot_map[slot].iter(), - small_slots: Default::default(), - big_slots: Default::default(), - }, - } - } -} - -impl EdgeRef for AssignmentsEdge { - type NodeId = AssignmentOrSlotIndex; - type EdgeId = AssignmentsEdge; - type Weight = AssignmentsEdge; - - fn source(&self) -> Self::NodeId { - match *self { - AssignmentsEdge::IO(AssignmentIO::BigInput { - assignment_index: _, - slot, - }) => AssignmentOrSlotIndex::BigSlot(slot), - AssignmentsEdge::IO(AssignmentIO::SmallInput { - assignment_index: _, - slot, - }) => AssignmentOrSlotIndex::SmallSlot(slot), - AssignmentsEdge::IO(AssignmentIO::BigOutput { - assignment_index, - slot: _, - }) => AssignmentOrSlotIndex::AssignmentIndex(assignment_index), - AssignmentsEdge::IO(AssignmentIO::SmallOutput { - assignment_index, - slot: _, - }) => AssignmentOrSlotIndex::AssignmentIndex(assignment_index), - AssignmentsEdge::AssignmentImmediatePredecessor { - predecessor_assignment_index, - assignment_index: _, - } => AssignmentOrSlotIndex::AssignmentIndex(predecessor_assignment_index), - } - } - - fn target(&self) -> Self::NodeId { - match *self { - AssignmentsEdge::IO(AssignmentIO::BigInput { - assignment_index, - slot: _, - }) => AssignmentOrSlotIndex::AssignmentIndex(assignment_index), - AssignmentsEdge::IO(AssignmentIO::SmallInput { - assignment_index, - slot: _, - }) => AssignmentOrSlotIndex::AssignmentIndex(assignment_index), - AssignmentsEdge::IO(AssignmentIO::BigOutput { - assignment_index: _, - slot, - }) => AssignmentOrSlotIndex::BigSlot(slot), - AssignmentsEdge::IO(AssignmentIO::SmallOutput { - assignment_index: _, - slot, - }) => AssignmentOrSlotIndex::SmallSlot(slot), - AssignmentsEdge::AssignmentImmediatePredecessor { - predecessor_assignment_index: _, - assignment_index, - } => AssignmentOrSlotIndex::AssignmentIndex(assignment_index), - } - } - - fn weight(&self) -> &Self::Weight { - self - } - - fn id(&self) -> Self::EdgeId { - *self - } -} - -struct AssignmentsEdges<'a> { - assignments: &'a Assignments, - nodes: AssignmentsNodeIdentifiers, - outgoing_neighbors: Option<(AssignmentOrSlotIndex, AssignmentsNeighborsDirected<'a>)>, -} - -impl Iterator for AssignmentsEdges<'_> { - type Item = AssignmentsEdge; - - fn next(&mut self) -> Option { - loop { - if let Some((node, outgoing_neighbors)) = &mut self.outgoing_neighbors { - if let Some(outgoing_neighbor) = outgoing_neighbors.next() { - return Some(match (*node, outgoing_neighbor) { - ( - AssignmentOrSlotIndex::SmallSlot(_) | AssignmentOrSlotIndex::BigSlot(_), - AssignmentOrSlotIndex::SmallSlot(_) | AssignmentOrSlotIndex::BigSlot(_), - ) => unreachable!(), - ( - AssignmentOrSlotIndex::AssignmentIndex(predecessor_assignment_index), - AssignmentOrSlotIndex::AssignmentIndex(assignment_index), - ) => AssignmentsEdge::AssignmentImmediatePredecessor { - predecessor_assignment_index, - assignment_index, - }, - ( - AssignmentOrSlotIndex::AssignmentIndex(assignment_index), - AssignmentOrSlotIndex::SmallSlot(slot), - ) => AssignmentsEdge::IO(AssignmentIO::SmallOutput { - assignment_index, - slot, - }), - ( - AssignmentOrSlotIndex::AssignmentIndex(assignment_index), - AssignmentOrSlotIndex::BigSlot(slot), - ) => AssignmentsEdge::IO(AssignmentIO::BigOutput { - assignment_index, - slot, - }), - ( - AssignmentOrSlotIndex::SmallSlot(slot), - AssignmentOrSlotIndex::AssignmentIndex(assignment_index), - ) => AssignmentsEdge::IO(AssignmentIO::SmallInput { - assignment_index, - slot, - }), - ( - AssignmentOrSlotIndex::BigSlot(slot), - AssignmentOrSlotIndex::AssignmentIndex(assignment_index), - ) => AssignmentsEdge::IO(AssignmentIO::BigInput { - assignment_index, - slot, - }), - }); - } - } - let node = self.nodes.next()?; - self.outgoing_neighbors = Some(( - node, - self.assignments - .neighbors_directed(node, petgraph::Direction::Outgoing), - )); - } - } -} - -impl<'a> IntoEdgeReferences for &'a Assignments { - type EdgeRef = AssignmentsEdge; - type EdgeReferences = AssignmentsEdges<'a>; - - fn edge_references(self) -> Self::EdgeReferences { - AssignmentsEdges { - assignments: self, - nodes: self.node_identifiers(), - outgoing_neighbors: None, - } - } -} - -struct AssignmentsVisitMap { - assignments: Vec, - slots: DenseSlotSet, -} - -impl VisitMap for AssignmentsVisitMap { - fn visit(&mut self, n: AssignmentOrSlotIndex) -> bool { - match n { - AssignmentOrSlotIndex::AssignmentIndex(assignment_index) => { - !mem::replace(&mut self.assignments[assignment_index], true) - } - AssignmentOrSlotIndex::SmallSlot(slot) => self.slots.insert(slot), - AssignmentOrSlotIndex::BigSlot(slot) => self.slots.insert(slot), - } - } - - fn is_visited(&self, n: &AssignmentOrSlotIndex) -> bool { - match *n { - AssignmentOrSlotIndex::AssignmentIndex(assignment_index) => { - self.assignments[assignment_index] - } - AssignmentOrSlotIndex::SmallSlot(slot) => self.slots.contains(slot), - AssignmentOrSlotIndex::BigSlot(slot) => self.slots.contains(slot), - } - } - - fn unvisit(&mut self, n: AssignmentOrSlotIndex) -> bool { - match n { - AssignmentOrSlotIndex::AssignmentIndex(assignment_index) => { - mem::replace(&mut self.assignments[assignment_index], false) - } - AssignmentOrSlotIndex::SmallSlot(slot) => self.slots.remove(slot), - AssignmentOrSlotIndex::BigSlot(slot) => self.slots.remove(slot), - } - } -} - -impl Visitable for Assignments { - type Map = AssignmentsVisitMap; - - fn visit_map(self: &Self) -> Self::Map { - AssignmentsVisitMap { - assignments: vec![false; self.assignments().len()], - slots: DenseSlotSet::new(self.slot_readers().len()), - } - } - - fn reset_map(self: &Self, map: &mut Self::Map) { - let AssignmentsVisitMap { assignments, slots } = map; - assignments.clear(); - assignments.resize(self.assignments().len(), false); - if slots.len() != self.slot_readers().len() { - *slots = DenseSlotSet::new(self.slot_readers().len()); - } else { - slots.clear(); - } - } -} - -#[derive(Clone, Debug, PartialEq, Eq, Hash)] -struct DenseSlotSet(TypeParts); - -impl DenseSlotSet { - fn new(len: TypeLen) -> Self { - let TypeLen { - small_slots, - big_slots, - } = len; - Self(TypeParts { - small_slots: vec![false; small_slots.value.try_into().expect("length too big")] - .into_boxed_slice(), - big_slots: vec![false; big_slots.value.try_into().expect("length too big")] - .into_boxed_slice(), - }) - } - fn len(&self) -> TypeLen { - TypeLen { - small_slots: StatePartLen { - value: self.0.small_slots.len() as _, - _phantom: PhantomData, - }, - big_slots: StatePartLen { - value: self.0.big_slots.len() as _, - _phantom: PhantomData, - }, - } - } - fn clear(&mut self) { - let Self(TypeParts { - small_slots, - big_slots, - }) = self; - small_slots.fill(false); - big_slots.fill(false); - } -} - -impl StatePartsValue for DenseSlotSet { - type Value = Box<[bool]>; -} - -trait DenseSlotSetMethods: Extend> { - fn contains(&self, k: StatePartIndex) -> bool; - fn remove(&mut self, k: StatePartIndex) -> bool { - self.take(k).is_some() - } - fn take(&mut self, k: StatePartIndex) -> Option>; - fn replace(&mut self, k: StatePartIndex) -> Option>; - fn insert(&mut self, k: StatePartIndex) -> bool { - self.replace(k).is_none() - } -} - -impl Extend> for DenseSlotSet -where - Self: DenseSlotSetMethods, -{ - fn extend>>(&mut self, iter: T) { - iter.into_iter().for_each(|v| { - self.insert(v); - }); - } -} - -impl DenseSlotSetMethods for DenseSlotSet { - fn contains(&self, k: StatePartIndex) -> bool { - self.0.small_slots[k.as_usize()] - } - - fn take( - &mut self, - k: StatePartIndex, - ) -> Option> { - mem::replace(self.0.small_slots.get_mut(k.as_usize())?, false).then_some(k) - } - - fn replace( - &mut self, - k: StatePartIndex, - ) -> Option> { - mem::replace(&mut self.0.small_slots[k.as_usize()], true).then_some(k) - } -} - -impl DenseSlotSetMethods for DenseSlotSet { - fn contains(&self, k: StatePartIndex) -> bool { - self.0.big_slots[k.as_usize()] - } - - fn take( - &mut self, - k: StatePartIndex, - ) -> Option> { - mem::replace(self.0.big_slots.get_mut(k.as_usize())?, false).then_some(k) - } - - fn replace( - &mut self, - k: StatePartIndex, - ) -> Option> { - mem::replace(&mut self.0.big_slots[k.as_usize()], true).then_some(k) - } -} - -#[derive(Clone, Debug, Default, PartialEq, Eq, Hash)] -struct SlotVec(TypeParts); - -impl SlotVec { - fn is_empty(&self) -> bool { - let Self(TypeParts { - small_slots, - big_slots, - }) = self; - small_slots.is_empty() && big_slots.is_empty() - } -} - -impl StatePartsValue for SlotVec { - type Value = Vec>; -} - -#[derive(Clone, Debug, Default, PartialEq, Eq, Hash)] -struct SlotSet(TypeParts); - -impl SlotSet { - fn is_empty(&self) -> bool { - let Self(TypeParts { - small_slots, - big_slots, - }) = self; - small_slots.is_empty() && big_slots.is_empty() - } - fn for_each( - &self, - small_slots_fn: impl FnMut(StatePartIndex), - big_slots_fn: impl FnMut(StatePartIndex), - ) { - let Self(TypeParts { - small_slots, - big_slots, - }) = self; - small_slots.iter().copied().for_each(small_slots_fn); - big_slots.iter().copied().for_each(big_slots_fn); - } - fn all( - &self, - small_slots_fn: impl FnMut(StatePartIndex) -> bool, - big_slots_fn: impl FnMut(StatePartIndex) -> bool, - ) -> bool { - let Self(TypeParts { - small_slots, - big_slots, - }) = self; - small_slots.iter().copied().all(small_slots_fn) - && big_slots.iter().copied().all(big_slots_fn) - } -} - -impl StatePartsValue for SlotSet { - type Value = BTreeSet>; -} - -impl Extend> for SlotSet { - fn extend>>(&mut self, iter: T) { - self.0.small_slots.extend(iter); - } -} - -impl Extend> for SlotSet { - fn extend>>(&mut self, iter: T) { - self.0.big_slots.extend(iter); - } -} - -impl Extend> for SlotSet -where - Self: Extend>, -{ - fn extend>>(&mut self, iter: T) { - self.extend(iter.into_iter().flat_map(|v| v.iter())); - } -} - -impl Extend for SlotSet { - fn extend>(&mut self, iter: T) { - iter.into_iter().for_each( - |TypeIndexRange { - small_slots, - big_slots, - }| { - self.extend(small_slots.iter()); - self.extend(big_slots.iter()); - }, - ) - } -} - -impl Extend for SlotSet { - fn extend>(&mut self, iter: T) { - iter.into_iter().for_each( - |TypeArrayIndex { - small_slots, - big_slots, - }| { - self.extend([small_slots]); - self.extend([big_slots]); - }, - ) - } -} - -impl Extend> for SlotSet { - fn extend>>(&mut self, iter: T) { - self.extend(iter.into_iter().map(|v| v.index)); - } -} - -impl Extend for SlotSet { - fn extend>(&mut self, iter: T) { - iter.into_iter().for_each(|cond_body| match cond_body { - CondBody::IfTrue { cond } | CondBody::IfFalse { cond } => { - self.extend([cond.range]); - } - CondBody::MatchArm { - discriminant, - variant_index: _, - } => self.extend([discriminant]), - }) - } -} - -impl Extend for SlotSet { - fn extend>(&mut self, iter: T) { - self.extend(iter.into_iter().map(|v| v.body)) - } -} - -#[derive(Debug)] -struct Assignment { - inputs: SlotSet, - outputs: SlotSet, - conditions: Interned<[Cond]>, - insns: Vec, - source_location: SourceLocation, -} - -#[derive(Debug)] -struct SlotToAssignmentIndexFullMap(TypeParts); - -impl StatePartsValue for SlotToAssignmentIndexFullMap { - type Value = Box<[Vec]>; -} - -impl SlotToAssignmentIndexFullMap { - fn new(len: TypeLen) -> Self { - let TypeLen { - small_slots, - big_slots, - } = len; - Self(TypeParts { - small_slots: vec![Vec::new(); small_slots.value.try_into().expect("length too big")] - .into_boxed_slice(), - big_slots: vec![Vec::new(); big_slots.value.try_into().expect("length too big")] - .into_boxed_slice(), - }) - } - fn len(&self) -> TypeLen { - TypeLen { - small_slots: StatePartLen { - value: self.0.small_slots.len() as _, - _phantom: PhantomData, - }, - big_slots: StatePartLen { - value: self.0.big_slots.len() as _, - _phantom: PhantomData, - }, - } - } - fn keys_for_assignment( - &mut self, - assignment_index: usize, - ) -> SlotToAssignmentIndexFullMapKeysForAssignment<'_> { - SlotToAssignmentIndexFullMapKeysForAssignment { - map: self, - assignment_index, - } - } - fn for_each( - &self, - mut small_slots_fn: impl FnMut(StatePartIndex, &[usize]), - mut big_slots_fn: impl FnMut(StatePartIndex, &[usize]), - ) { - let Self(TypeParts { - small_slots, - big_slots, - }) = self; - small_slots.iter().enumerate().for_each(|(k, v)| { - small_slots_fn( - StatePartIndex { - value: k as _, - _phantom: PhantomData, - }, - v, - ) - }); - big_slots.iter().enumerate().for_each(|(k, v)| { - big_slots_fn( - StatePartIndex { - value: k as _, - _phantom: PhantomData, - }, - v, - ) - }); - } -} - -impl std::ops::Index> for SlotToAssignmentIndexFullMap { - type Output = Vec; - - fn index(&self, index: StatePartIndex) -> &Self::Output { - &self.0.small_slots[index.as_usize()] - } -} - -impl std::ops::IndexMut> for SlotToAssignmentIndexFullMap { - fn index_mut(&mut self, index: StatePartIndex) -> &mut Self::Output { - &mut self.0.small_slots[index.as_usize()] - } -} - -impl std::ops::Index> for SlotToAssignmentIndexFullMap { - type Output = Vec; - - fn index(&self, index: StatePartIndex) -> &Self::Output { - &self.0.big_slots[index.as_usize()] - } -} - -impl std::ops::IndexMut> for SlotToAssignmentIndexFullMap { - fn index_mut(&mut self, index: StatePartIndex) -> &mut Self::Output { - &mut self.0.big_slots[index.as_usize()] - } -} - -struct SlotToAssignmentIndexFullMapKeysForAssignment<'a> { - map: &'a mut SlotToAssignmentIndexFullMap, - assignment_index: usize, -} - -impl<'a, K: StatePartKind> Extend<&'a StatePartIndex> - for SlotToAssignmentIndexFullMapKeysForAssignment<'_> -where - Self: Extend>, -{ - fn extend>>(&mut self, iter: T) { - self.extend(iter.into_iter().copied()); - } -} - -impl Extend> - for SlotToAssignmentIndexFullMapKeysForAssignment<'_> -where - SlotToAssignmentIndexFullMap: IndexMut, Output = Vec>, -{ - fn extend>>(&mut self, iter: T) { - iter.into_iter() - .for_each(|slot| self.map[slot].push(self.assignment_index)); - } -} - -impl<'a> Extend<&'a SlotSet> for SlotToAssignmentIndexFullMapKeysForAssignment<'_> { - fn extend>(&mut self, iter: T) { - iter.into_iter().for_each( - |SlotSet(TypeParts { - small_slots, - big_slots, - })| { - self.extend(small_slots); - self.extend(big_slots); - }, - ); - } -} - -impl<'a> Extend<&'a Cond> for SlotToAssignmentIndexFullMapKeysForAssignment<'_> { - fn extend>(&mut self, iter: T) { - iter.into_iter().for_each(|cond| match cond.body { - CondBody::IfTrue { cond } | CondBody::IfFalse { cond } => { - let CompiledValue { - range: - TypeIndexRange { - small_slots, - big_slots, - }, - layout: _, - write: _, - } = cond; - self.extend(small_slots.iter()); - self.extend(big_slots.iter()); - } - CondBody::MatchArm { - discriminant, - variant_index: _, - } => self.extend([discriminant]), - }); - } -} - -impl Assignment { - fn new( - conditions: Interned<[Cond]>, - insns: Vec, - source_location: SourceLocation, - ) -> Self { - let mut inputs = SlotSet::default(); - let mut outputs = SlotSet::default(); - for insn in &insns { - let insn = match insn { - InsnOrLabel::Insn(insn) => insn, - InsnOrLabel::Label(_) => continue, - }; - for InsnField { ty, kind } in insn.fields() { - match (kind, ty) { - (InsnFieldKind::Input, InsnFieldType::SmallSlot(&slot)) => { - inputs.extend([slot]); - } - (InsnFieldKind::Input, InsnFieldType::BigSlot(&slot)) => { - inputs.extend([slot]); - } - ( - InsnFieldKind::Input, - InsnFieldType::SmallSlotArrayIndexed(&array_indexed), - ) => { - array_indexed.for_each_target(|slot| inputs.extend([slot])); - inputs.extend(array_indexed.indexes); - } - (InsnFieldKind::Input, InsnFieldType::BigSlotArrayIndexed(&array_indexed)) => { - array_indexed.for_each_target(|slot| inputs.extend([slot])); - inputs.extend(array_indexed.indexes); - } - (InsnFieldKind::Output, InsnFieldType::SmallSlot(&slot)) => { - outputs.extend([slot]); - } - (InsnFieldKind::Output, InsnFieldType::BigSlot(&slot)) => { - outputs.extend([slot]); - } - ( - InsnFieldKind::Output, - InsnFieldType::SmallSlotArrayIndexed(&array_indexed), - ) => { - array_indexed.for_each_target(|slot| { - outputs.extend([slot]); - }); - inputs.extend(array_indexed.indexes); - } - (InsnFieldKind::Output, InsnFieldType::BigSlotArrayIndexed(&array_indexed)) => { - array_indexed.for_each_target(|slot| { - outputs.extend([slot]); - }); - inputs.extend(array_indexed.indexes); - } - ( - _, - InsnFieldType::Memory(_) - | InsnFieldType::SmallUInt(_) - | InsnFieldType::SmallSInt(_) - | InsnFieldType::InternedBigInt(_) - | InsnFieldType::U8(_) - | InsnFieldType::USize(_) - | InsnFieldType::Empty(_), - ) - | ( - InsnFieldKind::Immediate - | InsnFieldKind::Memory - | InsnFieldKind::BranchTarget, - _, - ) => {} - } - } - } - Self { - inputs, - outputs, - conditions, - insns, - source_location, - } - } -} - -#[derive(Debug)] -struct RegisterReset { - is_async: bool, - init: CompiledValue, - rst: StatePartIndex, -} - -#[derive(Debug, Clone, Copy)] -struct ClockTrigger { - last_clk_was_low: StatePartIndex, - clk: StatePartIndex, - clk_triggered: StatePartIndex, - source_location: SourceLocation, -} - -#[derive(Debug)] -struct Register { - value: CompiledValue, - clk_triggered: StatePartIndex, - reset: Option, - source_location: SourceLocation, -} - -#[derive(Debug)] - -struct MemoryPort { - clk_triggered: StatePartIndex, - addr_delayed: Vec>, - en_delayed: Vec>, - data_layout: CompiledTypeLayout, - read_data_delayed: Vec, - write_data_delayed: Vec, - write_mask_delayed: Vec, - write_mode_delayed: Vec>, - write_insns: Vec, -} - -struct MemoryPortReadInsns<'a> { - addr: StatePartIndex, - en: StatePartIndex, - write_mode: Option>, - data: TypeIndexRange, - insns: &'a mut Vec, -} - -struct MemoryPortWriteInsns<'a> { - addr: StatePartIndex, - en: StatePartIndex, - write_mode: Option>, - data: TypeIndexRange, - mask: TypeIndexRange, - insns: &'a mut Vec, -} - -#[derive(Debug)] -struct Memory { - mem: Mem, - memory: StatePartIndex, - trace: TraceMem, - ports: Vec, -} - -#[derive(Copy, Clone)] -enum MakeTraceDeclTarget { - Expr(Expr), - Memory { - id: TraceMemoryId, - depth: usize, - stride: usize, - start: usize, - ty: CanonicalType, - }, -} - -impl MakeTraceDeclTarget { - fn flow(self) -> Flow { - match self { - MakeTraceDeclTarget::Expr(expr) => Expr::flow(expr), - MakeTraceDeclTarget::Memory { .. } => Flow::Duplex, - } - } - fn ty(self) -> CanonicalType { - match self { - MakeTraceDeclTarget::Expr(expr) => Expr::ty(expr), - MakeTraceDeclTarget::Memory { ty, .. } => ty, - } - } -} - -struct DebugOpaque(T); - -impl fmt::Debug for DebugOpaque { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str("<...>") - } -} - -#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] -struct CompiledExternModule { - module_io_targets: Interned<[Target]>, - module_io: Interned<[CompiledValue]>, - simulation: ExternModuleSimulation, -} - -#[derive(Debug)] -pub struct Compiler { - insns: Insns, - original_base_module: Interned>, - base_module: Interned>, - modules: HashMap, - extern_modules: Vec, - compiled_values: HashMap>, - compiled_exprs: HashMap, CompiledExpr>, - compiled_exprs_to_values: HashMap, CompiledValue>, - decl_conditions: HashMap>, - compiled_values_to_dyn_array_indexes: - HashMap, StatePartIndex>, - compiled_value_bool_dest_is_small_map: - HashMap, StatePartIndex>, - assignments: Assignments, - clock_triggers: Vec, - compiled_value_to_clock_trigger_map: HashMap, ClockTrigger>, - enum_discriminants: HashMap, StatePartIndex>, - registers: Vec, - traces: SimTraces>>, - memories: Vec, - dump_assignments_dot: Option>>, -} - -impl Compiler { - pub fn new(base_module: Interned>) -> Self { - let original_base_module = base_module; - let base_module = deduce_resets(base_module, true) - .unwrap_or_else(|e| panic!("failed to deduce reset types: {e}")); - Self { - insns: Insns::new(), - original_base_module, - base_module, - modules: HashMap::default(), - extern_modules: Vec::new(), - compiled_values: HashMap::default(), - compiled_exprs: HashMap::default(), - compiled_exprs_to_values: HashMap::default(), - decl_conditions: HashMap::default(), - compiled_values_to_dyn_array_indexes: HashMap::default(), - compiled_value_bool_dest_is_small_map: HashMap::default(), - assignments: Assignments::default(), - clock_triggers: Vec::new(), - compiled_value_to_clock_trigger_map: HashMap::default(), - enum_discriminants: HashMap::default(), - registers: Vec::new(), - traces: SimTraces(Vec::new()), - memories: Vec::new(), - dump_assignments_dot: None, - } - } - #[doc(hidden)] - /// This is explicitly unstable and may be changed/removed at any time - pub fn dump_assignments_dot(&mut self, callback: Box) { - self.dump_assignments_dot = Some(DebugOpaque(callback)); - } - fn new_sim_trace(&mut self, kind: SimTraceKind) -> TraceScalarId { - let id = TraceScalarId(self.traces.0.len()); - self.traces.0.push(SimTrace { - kind, - state: (), - last_state: (), - }); - id - } - fn make_trace_scalar_helper( - &mut self, - instantiated_module: InstantiatedModule, - target: MakeTraceDeclTarget, - source_location: SourceLocation, - small_kind: impl FnOnce(StatePartIndex) -> SimTraceKind, - big_kind: impl FnOnce(StatePartIndex) -> SimTraceKind, - ) -> TraceLocation { - match target { - MakeTraceDeclTarget::Expr(target) => { - let compiled_value = self.compile_expr(instantiated_module, target); - let compiled_value = self.compiled_expr_to_value(compiled_value, source_location); - TraceLocation::Scalar(self.new_sim_trace(match compiled_value.range.len() { - TypeLen::A_SMALL_SLOT => small_kind(compiled_value.range.small_slots.start), - TypeLen::A_BIG_SLOT => big_kind(compiled_value.range.big_slots.start), - _ => unreachable!(), - })) - } - MakeTraceDeclTarget::Memory { - id, - depth, - stride, - start, - ty, - } => TraceLocation::Memory(TraceMemoryLocation { - id, - depth, - stride, - start, - len: ty.bit_width(), - }), - } - } - fn make_trace_scalar( - &mut self, - instantiated_module: InstantiatedModule, - target: MakeTraceDeclTarget, - name: Interned, - source_location: SourceLocation, - ) -> TraceDecl { - let flow = target.flow(); - match target.ty() { - CanonicalType::UInt(ty) => TraceUInt { - location: self.make_trace_scalar_helper( - instantiated_module, - target, - source_location, - |index| SimTraceKind::SmallUInt { index, ty }, - |index| SimTraceKind::BigUInt { index, ty }, - ), - name, - ty, - flow, - } - .into(), - CanonicalType::SInt(ty) => TraceSInt { - location: self.make_trace_scalar_helper( - instantiated_module, - target, - source_location, - |index| SimTraceKind::SmallSInt { index, ty }, - |index| SimTraceKind::BigSInt { index, ty }, - ), - name, - ty, - flow, - } - .into(), - CanonicalType::Bool(_) => TraceBool { - location: self.make_trace_scalar_helper( - instantiated_module, - target, - source_location, - |index| SimTraceKind::SmallBool { index }, - |index| SimTraceKind::BigBool { index }, - ), - name, - flow, - } - .into(), - CanonicalType::Array(_) => unreachable!(), - CanonicalType::Enum(ty) => { - assert_eq!(ty.discriminant_bit_width(), ty.type_properties().bit_width); - let location = match target { - MakeTraceDeclTarget::Expr(target) => { - let compiled_value = self.compile_expr(instantiated_module, target); - let compiled_value = - self.compiled_expr_to_value(compiled_value, source_location); - let discriminant = self.compile_enum_discriminant( - compiled_value.map_ty(Enum::from_canonical), - source_location, - ); - TraceLocation::Scalar(self.new_sim_trace(SimTraceKind::EnumDiscriminant { - index: discriminant, - ty, - })) - } - MakeTraceDeclTarget::Memory { - id, - depth, - stride, - start, - ty: _, - } => TraceLocation::Memory(TraceMemoryLocation { - id, - depth, - stride, - start, - len: ty.type_properties().bit_width, - }), - }; - TraceFieldlessEnum { - location, - name, - ty, - flow, - } - .into() - } - CanonicalType::Bundle(_) | CanonicalType::PhantomConst(_) => unreachable!(), - CanonicalType::AsyncReset(_) => TraceAsyncReset { - location: self.make_trace_scalar_helper( - instantiated_module, - target, - source_location, - |index| SimTraceKind::SmallAsyncReset { index }, - |index| SimTraceKind::BigAsyncReset { index }, - ), - name, - flow, - } - .into(), - CanonicalType::SyncReset(_) => TraceSyncReset { - location: self.make_trace_scalar_helper( - instantiated_module, - target, - source_location, - |index| SimTraceKind::SmallSyncReset { index }, - |index| SimTraceKind::BigSyncReset { index }, - ), - name, - flow, - } - .into(), - CanonicalType::Reset(_) => unreachable!(), - CanonicalType::Clock(_) => TraceClock { - location: self.make_trace_scalar_helper( - instantiated_module, - target, - source_location, - |index| SimTraceKind::SmallClock { index }, - |index| SimTraceKind::BigClock { index }, - ), - name, - flow, - } - .into(), - } - } - fn make_trace_decl_child( - &mut self, - instantiated_module: InstantiatedModule, - target: MakeTraceDeclTarget, - name: Interned, - source_location: SourceLocation, - ) -> TraceDecl { - match target.ty() { - CanonicalType::Array(ty) => { - let elements = Interned::from_iter((0..ty.len()).map(|index| { - self.make_trace_decl_child( - instantiated_module, - match target { - MakeTraceDeclTarget::Expr(target) => MakeTraceDeclTarget::Expr( - Expr::::from_canonical(target)[index], - ), - MakeTraceDeclTarget::Memory { - id, - depth, - stride, - start, - ty: _, - } => MakeTraceDeclTarget::Memory { - id, - depth, - stride, - start: start + ty.element().bit_width() * index, - ty: ty.element(), - }, - }, - Intern::intern_owned(format!("[{index}]")), - source_location, - ) - })); - TraceArray { - name, - elements, - ty, - flow: target.flow(), - } - .into() - } - CanonicalType::Enum(ty) => { - if ty.variants().iter().all(|v| v.ty.is_none()) { - self.make_trace_scalar(instantiated_module, target, name, source_location) - } else { - let flow = target.flow(); - let location = match target { - MakeTraceDeclTarget::Expr(target) => { - let compiled_value = self.compile_expr(instantiated_module, target); - let compiled_value = - self.compiled_expr_to_value(compiled_value, source_location); - let discriminant = self.compile_enum_discriminant( - compiled_value.map_ty(Enum::from_canonical), - source_location, - ); - TraceLocation::Scalar(self.new_sim_trace( - SimTraceKind::EnumDiscriminant { - index: discriminant, - ty, - }, - )) - } - MakeTraceDeclTarget::Memory { - id, - depth, - stride, - start, - ty: _, - } => TraceLocation::Memory(TraceMemoryLocation { - id, - depth, - stride, - start, - len: ty.discriminant_bit_width(), - }), - }; - let discriminant = TraceEnumDiscriminant { - location, - name: "$tag".intern(), - ty, - flow, - }; - let non_empty_fields = - Interned::from_iter(ty.variants().into_iter().enumerate().flat_map( - |(variant_index, variant)| { - variant.ty.map(|variant_ty| { - self.make_trace_decl_child( - instantiated_module, - match target { - MakeTraceDeclTarget::Expr(target) => { - MakeTraceDeclTarget::Expr( - ops::VariantAccess::new_by_index( - Expr::::from_canonical(target), - variant_index, - ) - .to_expr(), - ) - } - MakeTraceDeclTarget::Memory { - id, - depth, - stride, - start, - ty: _, - } => MakeTraceDeclTarget::Memory { - id, - depth, - stride, - start: start + ty.discriminant_bit_width(), - ty: variant_ty, - }, - }, - variant.name, - source_location, - ) - }) - }, - )); - TraceEnumWithFields { - name, - discriminant, - non_empty_fields, - ty, - flow, - } - .into() - } - } - CanonicalType::Bundle(ty) => { - let fields = Interned::from_iter(ty.fields().iter().zip(ty.field_offsets()).map( - |(field, field_offset)| { - self.make_trace_decl_child( - instantiated_module, - match target { - MakeTraceDeclTarget::Expr(target) => { - MakeTraceDeclTarget::Expr(Expr::field( - Expr::::from_canonical(target), - &field.name, - )) - } - MakeTraceDeclTarget::Memory { - id, - depth, - stride, - start, - ty: _, - } => MakeTraceDeclTarget::Memory { - id, - depth, - stride, - start: start + field_offset, - ty: field.ty, - }, - }, - field.name, - source_location, - ) - }, - )); - TraceBundle { - name, - fields, - ty, - flow: target.flow(), - } - .into() - } - CanonicalType::UInt(_) - | CanonicalType::SInt(_) - | CanonicalType::Bool(_) - | CanonicalType::AsyncReset(_) - | CanonicalType::SyncReset(_) - | CanonicalType::Reset(_) - | CanonicalType::Clock(_) => { - self.make_trace_scalar(instantiated_module, target, name, source_location) - } - CanonicalType::PhantomConst(_) => TraceBundle { - name, - fields: Interned::default(), - ty: Bundle::new(Interned::default()), - flow: target.flow(), - } - .into(), - } - } - fn make_trace_decl( - &mut self, - instantiated_module: InstantiatedModule, - target_base: TargetBase, - ) -> TraceDecl { - let target = MakeTraceDeclTarget::Expr(target_base.to_expr()); - match target_base { - TargetBase::ModuleIO(module_io) => TraceModuleIO { - name: module_io.name(), - child: self - .make_trace_decl_child( - instantiated_module, - target, - module_io.name(), - module_io.source_location(), - ) - .intern(), - ty: module_io.ty(), - flow: module_io.flow(), - } - .into(), - TargetBase::MemPort(mem_port) => { - let name = Intern::intern_owned(mem_port.port_name().to_string()); - let TraceDecl::Scope(TraceScope::Bundle(bundle)) = self.make_trace_decl_child( - instantiated_module, - target, - name, - mem_port.source_location(), - ) else { - unreachable!() - }; - TraceMemPort { - name, - bundle, - ty: mem_port.ty(), - } - .into() - } - TargetBase::Reg(reg) => TraceReg { - name: reg.name(), - child: self - .make_trace_decl_child( - instantiated_module, - target, - reg.name(), - reg.source_location(), - ) - .intern(), - ty: reg.ty(), - } - .into(), - TargetBase::RegSync(reg) => TraceReg { - name: reg.name(), - child: self - .make_trace_decl_child( - instantiated_module, - target, - reg.name(), - reg.source_location(), - ) - .intern(), - ty: reg.ty(), - } - .into(), - TargetBase::RegAsync(reg) => TraceReg { - name: reg.name(), - child: self - .make_trace_decl_child( - instantiated_module, - target, - reg.name(), - reg.source_location(), - ) - .intern(), - ty: reg.ty(), - } - .into(), - TargetBase::Wire(wire) => TraceWire { - name: wire.name(), - child: self - .make_trace_decl_child( - instantiated_module, - target, - wire.name(), - wire.source_location(), - ) - .intern(), - ty: wire.ty(), - } - .into(), - TargetBase::Instance(instance) => { - let TraceDecl::Scope(TraceScope::Bundle(instance_io)) = self.make_trace_decl_child( - instantiated_module, - target, - instance.name(), - instance.source_location(), - ) else { - unreachable!() - }; - let compiled_module = &self.modules[&InstantiatedModule::Child { - parent: instantiated_module.intern(), - instance: instance.intern(), - }]; - TraceInstance { - name: instance.name(), - instance_io, - module: compiled_module.trace_decls, - ty: instance.ty(), - } - .into() - } - } - } - fn compile_value( - &mut self, - target: TargetInInstantiatedModule, - ) -> CompiledValue { - if let Some(&retval) = self.compiled_values.get(&target) { - return retval; - } - let retval = match target.target { - Target::Base(base) => { - let unprefixed_layout = CompiledTypeLayout::get(base.canonical_ty()); - let layout = unprefixed_layout.with_prefixed_debug_names(&format!( - "{:?}.{:?}", - target.instantiated_module, - base.target_name() - )); - let range = self.insns.allocate_variable(&layout.layout); - let write = match *base { - TargetBase::ModuleIO(_) - | TargetBase::MemPort(_) - | TargetBase::Wire(_) - | TargetBase::Instance(_) => None, - TargetBase::Reg(_) | TargetBase::RegSync(_) | TargetBase::RegAsync(_) => { - let write_layout = unprefixed_layout.with_prefixed_debug_names(&format!( - "{:?}.{:?}$next", - target.instantiated_module, - base.target_name() - )); - Some(( - write_layout, - self.insns.allocate_variable(&write_layout.layout), - )) - } - }; - CompiledValue { - range, - layout, - write, - } - } - Target::Child(target_child) => { - let parent = self.compile_value(TargetInInstantiatedModule { - instantiated_module: target.instantiated_module, - target: *target_child.parent(), - }); - match *target_child.path_element() { - TargetPathElement::BundleField(TargetPathBundleField { name }) => { - parent.map_ty(Bundle::from_canonical).field_by_name(name) - } - TargetPathElement::ArrayElement(TargetPathArrayElement { index }) => { - parent.map_ty(Array::from_canonical).element(index) - } - TargetPathElement::DynArrayElement(_) => unreachable!(), - } - } - }; - self.compiled_values.insert(target, retval); - retval - } - fn compiled_expr_to_value( - &mut self, - expr: CompiledExpr, - source_location: SourceLocation, - ) -> CompiledValue { - if let Some(&retval) = self.compiled_exprs_to_values.get(&expr) { - return retval; - } - assert!( - expr.static_part.layout.ty.is_passive(), - "invalid expression passed to compiled_expr_to_value -- type must be passive", - ); - let CompiledExpr { - static_part, - indexes, - } = expr; - let retval = if indexes.as_ref().is_empty() { - CompiledValue { - layout: static_part.layout, - range: static_part.range, - write: None, - } - } else { - let layout = static_part.layout.with_anonymized_debug_info(); - let retval = CompiledValue { - layout, - range: self.insns.allocate_variable(&layout.layout), - write: None, - }; - let TypeIndexRange { - small_slots, - big_slots, - } = retval.range; - self.add_assignment( - Interned::default(), - small_slots - .iter() - .zip(static_part.range.small_slots.iter()) - .map(|(dest, base)| Insn::ReadSmallIndexed { - dest, - src: StatePartArrayIndexed { - base, - indexes: indexes.small_slots, - }, - }) - .chain( - big_slots - .iter() - .zip(static_part.range.big_slots.iter()) - .map(|(dest, base)| Insn::ReadIndexed { - dest, - src: StatePartArrayIndexed { - base, - indexes: indexes.big_slots, - }, - }), - ), - source_location, - ); - retval - }; - self.compiled_exprs_to_values.insert(expr, retval); - retval - } - fn add_assignment>( - &mut self, - conditions: Interned<[Cond]>, - insns: impl IntoIterator, - source_location: SourceLocation, - ) { - let insns = Vec::from_iter(insns.into_iter().map(Into::into)); - self.assignments - .push(Assignment::new(conditions, insns, source_location)); - } - fn simple_big_expr_input( - &mut self, - instantiated_module: InstantiatedModule, - input: Expr, - ) -> StatePartIndex { - let input = self.compile_expr(instantiated_module, input); - let input = - self.compiled_expr_to_value(input, instantiated_module.leaf_module().source_location()); - assert_eq!(input.range.len(), TypeLen::A_BIG_SLOT); - input.range.big_slots.start - } - fn compile_expr_helper( - &mut self, - instantiated_module: InstantiatedModule, - dest_ty: CanonicalType, - make_insns: impl FnOnce(&mut Self, TypeIndexRange) -> Vec, - ) -> CompiledValue { - let layout = CompiledTypeLayout::get(dest_ty); - let range = self.insns.allocate_variable(&layout.layout); - let retval = CompiledValue { - layout, - range, - write: None, - }; - let insns = make_insns(self, range); - self.add_assignment( - Interned::default(), - insns, - instantiated_module.leaf_module().source_location(), - ); - retval - } - fn simple_nary_big_expr_helper( - &mut self, - instantiated_module: InstantiatedModule, - dest_ty: CanonicalType, - make_insns: impl FnOnce(StatePartIndex) -> Vec, - ) -> CompiledValue { - self.compile_expr_helper(instantiated_module, dest_ty, |_, dest| { - assert_eq!(dest.len(), TypeLen::A_BIG_SLOT); - make_insns(dest.big_slots.start) - }) - } - fn simple_nary_big_expr( - &mut self, - instantiated_module: InstantiatedModule, - dest_ty: CanonicalType, - inputs: [Expr; N], - make_insns: impl FnOnce( - StatePartIndex, - [StatePartIndex; N], - ) -> Vec, - ) -> CompiledValue { - let inputs = inputs.map(|input| self.simple_big_expr_input(instantiated_module, input)); - self.simple_nary_big_expr_helper(instantiated_module, dest_ty, |dest| { - make_insns(dest, inputs) - }) - } - fn compiled_value_to_dyn_array_index( - &mut self, - compiled_value: CompiledValue, - source_location: SourceLocation, - ) -> StatePartIndex { - if let Some(&retval) = self - .compiled_values_to_dyn_array_indexes - .get(&compiled_value) - { - return retval; - } - let mut ty = compiled_value.layout.ty; - ty.width = ty.width.min(SmallUInt::BITS as usize); - let retval = match compiled_value.range.len() { - TypeLen::A_SMALL_SLOT => compiled_value.range.small_slots.start, - TypeLen::A_BIG_SLOT => { - let debug_data = SlotDebugData { - name: Interned::default(), - ty: ty.canonical(), - }; - let dest = self - .insns - .allocate_variable(&TypeLayout { - small_slots: StatePartLayout::scalar(debug_data, ()), - big_slots: StatePartLayout::empty(), - }) - .small_slots - .start; - self.add_assignment( - Interned::default(), - vec![Insn::CastBigToArrayIndex { - dest, - src: compiled_value.range.big_slots.start, - }], - source_location, - ); - dest - } - _ => unreachable!(), - }; - self.compiled_values_to_dyn_array_indexes - .insert(compiled_value, retval); - retval - } - fn compiled_value_bool_dest_is_small( - &mut self, - compiled_value: CompiledValue, - source_location: SourceLocation, - ) -> StatePartIndex { - if let Some(&retval) = self - .compiled_value_bool_dest_is_small_map - .get(&compiled_value) - { - return retval; - } - let retval = match compiled_value.range.len() { - TypeLen::A_SMALL_SLOT => compiled_value.range.small_slots.start, - TypeLen::A_BIG_SLOT => { - let debug_data = SlotDebugData { - name: Interned::default(), - ty: Bool.canonical(), - }; - let dest = self - .insns - .allocate_variable(&TypeLayout { - small_slots: StatePartLayout::scalar(debug_data, ()), - big_slots: StatePartLayout::empty(), - }) - .small_slots - .start; - self.add_assignment( - Interned::default(), - vec![Insn::IsNonZeroDestIsSmall { - dest, - src: compiled_value.range.big_slots.start, - }], - source_location, - ); - dest - } - _ => unreachable!(), - }; - self.compiled_value_bool_dest_is_small_map - .insert(compiled_value, retval); - retval - } - fn compile_cast_scalar_to_bits( - &mut self, - instantiated_module: InstantiatedModule, - arg: Expr, - cast_fn: impl FnOnce(Expr) -> Expr, - ) -> CompiledValue { - let arg = Expr::::from_canonical(arg); - let retval = cast_fn(arg); - let retval = self.compile_expr(instantiated_module, Expr::canonical(retval)); - let retval = self - .compiled_expr_to_value(retval, instantiated_module.leaf_module().source_location()); - retval.map_ty(UInt::from_canonical) - } - fn compile_cast_aggregate_to_bits( - &mut self, - instantiated_module: InstantiatedModule, - parts: impl IntoIterator>, - ) -> CompiledValue { - let retval = parts - .into_iter() - .map(|part| part.cast_to_bits()) - .reduce(|accumulator, part| accumulator | (part << Expr::ty(accumulator).width)) - .unwrap_or_else(|| UInt[0].zero().to_expr()); - let retval = self.compile_expr(instantiated_module, Expr::canonical(retval)); - let retval = self - .compiled_expr_to_value(retval, instantiated_module.leaf_module().source_location()); - retval.map_ty(UInt::from_canonical) - } - fn compile_cast_to_bits( - &mut self, - instantiated_module: InstantiatedModule, - expr: ops::CastToBits, - ) -> CompiledValue { - match Expr::ty(expr.arg()) { - CanonicalType::UInt(_) => { - self.compile_cast_scalar_to_bits(instantiated_module, expr.arg(), |arg| arg) - } - CanonicalType::SInt(ty) => self.compile_cast_scalar_to_bits( - instantiated_module, - expr.arg(), - |arg: Expr| arg.cast_to(ty.as_same_width_uint()), - ), - CanonicalType::Bool(_) - | CanonicalType::AsyncReset(_) - | CanonicalType::SyncReset(_) - | CanonicalType::Reset(_) - | CanonicalType::Clock(_) => self.compile_cast_scalar_to_bits( - instantiated_module, - expr.arg(), - |arg: Expr| arg.cast_to(UInt[1]), - ), - CanonicalType::Array(ty) => self.compile_cast_aggregate_to_bits( - instantiated_module, - (0..ty.len()).map(|index| Expr::::from_canonical(expr.arg())[index]), - ), - CanonicalType::Enum(ty) => self - .simple_nary_big_expr( - instantiated_module, - UInt[ty.type_properties().bit_width].canonical(), - [Expr::canonical(expr.arg())], - |dest, [src]| vec![Insn::Copy { dest, src }], - ) - .map_ty(UInt::from_canonical), - CanonicalType::Bundle(ty) => self.compile_cast_aggregate_to_bits( - instantiated_module, - ty.fields().iter().map(|field| { - Expr::field(Expr::::from_canonical(expr.arg()), &field.name) - }), - ), - CanonicalType::PhantomConst(_) => { - self.compile_cast_aggregate_to_bits(instantiated_module, []) - } - } - } - fn compile_cast_bits_to( - &mut self, - instantiated_module: InstantiatedModule, - expr: ops::CastBitsTo, - ) -> CompiledValue { - let retval = match expr.ty() { - CanonicalType::UInt(_) => Expr::canonical(expr.arg()), - CanonicalType::SInt(ty) => Expr::canonical(expr.arg().cast_to(ty)), - CanonicalType::Bool(ty) => Expr::canonical(expr.arg().cast_to(ty)), - CanonicalType::Array(ty) => { - let stride = ty.element().bit_width(); - Expr::::canonical( - ops::ArrayLiteral::new( - ty.element(), - Interned::from_iter((0..ty.len()).map(|index| { - let start = stride * index; - let end = start + stride; - expr.arg()[start..end].cast_bits_to(ty.element()) - })), - ) - .to_expr(), - ) - } - ty @ CanonicalType::Enum(_) => { - return self.simple_nary_big_expr( - instantiated_module, - ty, - [Expr::canonical(expr.arg())], - |dest, [src]| vec![Insn::Copy { dest, src }], - ); - } - CanonicalType::Bundle(ty) => Expr::canonical( - ops::BundleLiteral::new( - ty, - Interned::from_iter(ty.field_offsets().iter().zip(&ty.fields()).map( - |(&offset, &field)| { - let end = offset + field.ty.bit_width(); - expr.arg()[offset..end].cast_bits_to(field.ty) - }, - )), - ) - .to_expr(), - ), - CanonicalType::AsyncReset(ty) => Expr::canonical(expr.arg().cast_to(ty)), - CanonicalType::SyncReset(ty) => Expr::canonical(expr.arg().cast_to(ty)), - CanonicalType::Reset(_) => unreachable!(), - CanonicalType::Clock(ty) => Expr::canonical(expr.arg().cast_to(ty)), - CanonicalType::PhantomConst(ty) => { - let _ = self.compile_expr(instantiated_module, Expr::canonical(expr.arg())); - Expr::canonical(ty.to_expr()) - } - }; - let retval = self.compile_expr(instantiated_module, Expr::canonical(retval)); - self.compiled_expr_to_value(retval, instantiated_module.leaf_module().source_location()) - } - fn compile_aggregate_literal( - &mut self, - instantiated_module: InstantiatedModule, - dest_ty: CanonicalType, - inputs: Interned<[Expr]>, - ) -> CompiledValue { - self.compile_expr_helper(instantiated_module, dest_ty, |this, dest| { - let mut insns = Vec::new(); - let mut offset = TypeIndex::ZERO; - for input in inputs { - let input = this.compile_expr(instantiated_module, input); - let input = this - .compiled_expr_to_value( - input, - instantiated_module.leaf_module().source_location(), - ) - .range; - insns.extend( - input.insns_for_copy_to(dest.slice(TypeIndexRange::new(offset, input.len()))), - ); - offset = offset.offset(input.len().as_index()); - } - insns - }) - } - fn compile_expr( - &mut self, - instantiated_module: InstantiatedModule, - expr: Expr, - ) -> CompiledExpr { - if let Some(&retval) = self.compiled_exprs.get(&expr) { - return retval; - } - let mut cast_bit = |arg: Expr| { - let src_signed = match Expr::ty(arg) { - CanonicalType::UInt(_) => false, - CanonicalType::SInt(_) => true, - CanonicalType::Bool(_) => false, - CanonicalType::Array(_) => unreachable!(), - CanonicalType::Enum(_) => unreachable!(), - CanonicalType::Bundle(_) => unreachable!(), - CanonicalType::AsyncReset(_) => false, - CanonicalType::SyncReset(_) => false, - CanonicalType::Reset(_) => false, - CanonicalType::Clock(_) => false, - CanonicalType::PhantomConst(_) => unreachable!(), - }; - let dest_signed = match Expr::ty(expr) { - CanonicalType::UInt(_) => false, - CanonicalType::SInt(_) => true, - CanonicalType::Bool(_) => false, - CanonicalType::Array(_) => unreachable!(), - CanonicalType::Enum(_) => unreachable!(), - CanonicalType::Bundle(_) => unreachable!(), - CanonicalType::AsyncReset(_) => false, - CanonicalType::SyncReset(_) => false, - CanonicalType::Reset(_) => false, - CanonicalType::Clock(_) => false, - CanonicalType::PhantomConst(_) => unreachable!(), - }; - self.simple_nary_big_expr(instantiated_module, Expr::ty(expr), [arg], |dest, [src]| { - match (src_signed, dest_signed) { - (false, false) | (true, true) => { - vec![Insn::Copy { dest, src }] - } - (false, true) => vec![Insn::CastToSInt { - dest, - src, - dest_width: 1, - }], - (true, false) => vec![Insn::CastToUInt { - dest, - src, - dest_width: 1, - }], - } - }) - .into() - }; - let retval: CompiledExpr<_> = match *Expr::expr_enum(expr) { - ExprEnum::UIntLiteral(expr) => self - .simple_nary_big_expr( - instantiated_module, - expr.ty().canonical(), - [], - |dest, []| { - vec![Insn::Const { - dest, - value: expr.to_bigint().intern_sized(), - }] - }, - ) - .into(), - ExprEnum::SIntLiteral(expr) => self - .simple_nary_big_expr( - instantiated_module, - expr.ty().canonical(), - [], - |dest, []| { - vec![Insn::Const { - dest, - value: expr.to_bigint().intern_sized(), - }] - }, - ) - .into(), - ExprEnum::BoolLiteral(expr) => self - .simple_nary_big_expr(instantiated_module, Bool.canonical(), [], |dest, []| { - vec![Insn::Const { - dest, - value: BigInt::from(expr).intern_sized(), - }] - }) - .into(), - ExprEnum::PhantomConst(_) => self - .compile_aggregate_literal(instantiated_module, Expr::ty(expr), Interned::default()) - .into(), - ExprEnum::BundleLiteral(literal) => self - .compile_aggregate_literal( - instantiated_module, - Expr::ty(expr), - literal.field_values(), - ) - .into(), - ExprEnum::ArrayLiteral(literal) => self - .compile_aggregate_literal( - instantiated_module, - Expr::ty(expr), - literal.element_values(), - ) - .into(), - ExprEnum::EnumLiteral(expr) => { - let enum_bits_ty = UInt[expr.ty().type_properties().bit_width]; - let enum_bits = if let Some(variant_value) = expr.variant_value() { - ( - UInt[expr.ty().discriminant_bit_width()] - .from_int_wrapping(expr.variant_index()), - variant_value, - ) - .cast_to_bits() - .cast_to(enum_bits_ty) - } else { - enum_bits_ty - .from_int_wrapping(expr.variant_index()) - .to_expr() - }; - self.compile_expr( - instantiated_module, - enum_bits.cast_bits_to(expr.ty().canonical()), - ) - } - ExprEnum::Uninit(expr) => self.compile_expr( - instantiated_module, - UInt[expr.ty().bit_width()].zero().cast_bits_to(expr.ty()), - ), - ExprEnum::NotU(expr) => self - .simple_nary_big_expr( - instantiated_module, - Expr::ty(expr.arg()).canonical(), - [Expr::canonical(expr.arg())], - |dest, [src]| { - vec![Insn::NotU { - dest, - src, - width: Expr::ty(expr.arg()).width(), - }] - }, - ) - .into(), - ExprEnum::NotS(expr) => self - .simple_nary_big_expr( - instantiated_module, - Expr::ty(expr.arg()).canonical(), - [Expr::canonical(expr.arg())], - |dest, [src]| vec![Insn::NotS { dest, src }], - ) - .into(), - ExprEnum::NotB(expr) => self - .simple_nary_big_expr( - instantiated_module, - Expr::ty(expr.arg()).canonical(), - [Expr::canonical(expr.arg())], - |dest, [src]| { - vec![Insn::NotU { - dest, - src, - width: 1, - }] - }, - ) - .into(), - ExprEnum::Neg(expr) => self - .simple_nary_big_expr( - instantiated_module, - expr.ty().canonical(), - [Expr::canonical(expr.arg())], - |dest, [src]| vec![Insn::Neg { dest, src }], - ) - .into(), - ExprEnum::BitAndU(expr) => self - .simple_nary_big_expr( - instantiated_module, - expr.ty().canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::And { dest, lhs, rhs }], - ) - .into(), - ExprEnum::BitAndS(expr) => self - .simple_nary_big_expr( - instantiated_module, - expr.ty().canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::And { dest, lhs, rhs }], - ) - .into(), - ExprEnum::BitAndB(expr) => self - .simple_nary_big_expr( - instantiated_module, - Bool.canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::And { dest, lhs, rhs }], - ) - .into(), - ExprEnum::BitOrU(expr) => self - .simple_nary_big_expr( - instantiated_module, - expr.ty().canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::Or { dest, lhs, rhs }], - ) - .into(), - ExprEnum::BitOrS(expr) => self - .simple_nary_big_expr( - instantiated_module, - expr.ty().canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::Or { dest, lhs, rhs }], - ) - .into(), - ExprEnum::BitOrB(expr) => self - .simple_nary_big_expr( - instantiated_module, - Bool.canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::Or { dest, lhs, rhs }], - ) - .into(), - ExprEnum::BitXorU(expr) => self - .simple_nary_big_expr( - instantiated_module, - expr.ty().canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::Xor { dest, lhs, rhs }], - ) - .into(), - ExprEnum::BitXorS(expr) => self - .simple_nary_big_expr( - instantiated_module, - expr.ty().canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::Xor { dest, lhs, rhs }], - ) - .into(), - ExprEnum::BitXorB(expr) => self - .simple_nary_big_expr( - instantiated_module, - Bool.canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::Xor { dest, lhs, rhs }], - ) - .into(), - ExprEnum::AddU(expr) => self - .simple_nary_big_expr( - instantiated_module, - expr.ty().canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::Add { dest, lhs, rhs }], - ) - .into(), - ExprEnum::AddS(expr) => self - .simple_nary_big_expr( - instantiated_module, - expr.ty().canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::Add { dest, lhs, rhs }], - ) - .into(), - ExprEnum::SubU(expr) => self - .simple_nary_big_expr( - instantiated_module, - expr.ty().canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| { - vec![Insn::SubU { - dest, - lhs, - rhs, - dest_width: expr.ty().width(), - }] - }, - ) - .into(), - ExprEnum::SubS(expr) => self - .simple_nary_big_expr( - instantiated_module, - expr.ty().canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::SubS { dest, lhs, rhs }], - ) - .into(), - ExprEnum::MulU(expr) => self - .simple_nary_big_expr( - instantiated_module, - expr.ty().canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::Mul { dest, lhs, rhs }], - ) - .into(), - ExprEnum::MulS(expr) => self - .simple_nary_big_expr( - instantiated_module, - expr.ty().canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::Mul { dest, lhs, rhs }], - ) - .into(), - ExprEnum::DivU(expr) => self - .simple_nary_big_expr( - instantiated_module, - expr.ty().canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::Div { dest, lhs, rhs }], - ) - .into(), - ExprEnum::DivS(expr) => self - .simple_nary_big_expr( - instantiated_module, - expr.ty().canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::Div { dest, lhs, rhs }], - ) - .into(), - ExprEnum::RemU(expr) => self - .simple_nary_big_expr( - instantiated_module, - expr.ty().canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::Rem { dest, lhs, rhs }], - ) - .into(), - ExprEnum::RemS(expr) => self - .simple_nary_big_expr( - instantiated_module, - expr.ty().canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::Rem { dest, lhs, rhs }], - ) - .into(), - ExprEnum::DynShlU(expr) => self - .simple_nary_big_expr( - instantiated_module, - expr.ty().canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::DynShl { dest, lhs, rhs }], - ) - .into(), - ExprEnum::DynShlS(expr) => self - .simple_nary_big_expr( - instantiated_module, - expr.ty().canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::DynShl { dest, lhs, rhs }], - ) - .into(), - ExprEnum::DynShrU(expr) => self - .simple_nary_big_expr( - instantiated_module, - expr.ty().canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::DynShr { dest, lhs, rhs }], - ) - .into(), - ExprEnum::DynShrS(expr) => self - .simple_nary_big_expr( - instantiated_module, - expr.ty().canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::DynShr { dest, lhs, rhs }], - ) - .into(), - ExprEnum::FixedShlU(expr) => self - .simple_nary_big_expr( - instantiated_module, - expr.ty().canonical(), - [Expr::canonical(expr.lhs())], - |dest, [lhs]| { - vec![Insn::Shl { - dest, - lhs, - rhs: expr.rhs(), - }] - }, - ) - .into(), - ExprEnum::FixedShlS(expr) => self - .simple_nary_big_expr( - instantiated_module, - expr.ty().canonical(), - [Expr::canonical(expr.lhs())], - |dest, [lhs]| { - vec![Insn::Shl { - dest, - lhs, - rhs: expr.rhs(), - }] - }, - ) - .into(), - ExprEnum::FixedShrU(expr) => self - .simple_nary_big_expr( - instantiated_module, - expr.ty().canonical(), - [Expr::canonical(expr.lhs())], - |dest, [lhs]| { - vec![Insn::Shr { - dest, - lhs, - rhs: expr.rhs(), - }] - }, - ) - .into(), - ExprEnum::FixedShrS(expr) => self - .simple_nary_big_expr( - instantiated_module, - expr.ty().canonical(), - [Expr::canonical(expr.lhs())], - |dest, [lhs]| { - vec![Insn::Shr { - dest, - lhs, - rhs: expr.rhs(), - }] - }, - ) - .into(), - ExprEnum::CmpLtB(expr) => self - .simple_nary_big_expr( - instantiated_module, - Bool.canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::CmpLt { dest, lhs, rhs }], - ) - .into(), - ExprEnum::CmpLeB(expr) => self - .simple_nary_big_expr( - instantiated_module, - Bool.canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::CmpLe { dest, lhs, rhs }], - ) - .into(), - ExprEnum::CmpGtB(expr) => self - .simple_nary_big_expr( - instantiated_module, - Bool.canonical(), - // swap both comparison direction and lhs/rhs - [Expr::canonical(expr.rhs()), Expr::canonical(expr.lhs())], - |dest, [lhs, rhs]| vec![Insn::CmpLt { dest, lhs, rhs }], - ) - .into(), - ExprEnum::CmpGeB(expr) => self - .simple_nary_big_expr( - instantiated_module, - Bool.canonical(), - // swap both comparison direction and lhs/rhs - [Expr::canonical(expr.rhs()), Expr::canonical(expr.lhs())], - |dest, [lhs, rhs]| vec![Insn::CmpLe { dest, lhs, rhs }], - ) - .into(), - ExprEnum::CmpEqB(expr) => self - .simple_nary_big_expr( - instantiated_module, - Bool.canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::CmpEq { dest, lhs, rhs }], - ) - .into(), - ExprEnum::CmpNeB(expr) => self - .simple_nary_big_expr( - instantiated_module, - Bool.canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::CmpNe { dest, lhs, rhs }], - ) - .into(), - ExprEnum::CmpLtU(expr) => self - .simple_nary_big_expr( - instantiated_module, - Bool.canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::CmpLt { dest, lhs, rhs }], - ) - .into(), - ExprEnum::CmpLeU(expr) => self - .simple_nary_big_expr( - instantiated_module, - Bool.canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::CmpLe { dest, lhs, rhs }], - ) - .into(), - ExprEnum::CmpGtU(expr) => self - .simple_nary_big_expr( - instantiated_module, - Bool.canonical(), - // swap both comparison direction and lhs/rhs - [Expr::canonical(expr.rhs()), Expr::canonical(expr.lhs())], - |dest, [lhs, rhs]| vec![Insn::CmpLt { dest, lhs, rhs }], - ) - .into(), - ExprEnum::CmpGeU(expr) => self - .simple_nary_big_expr( - instantiated_module, - Bool.canonical(), - // swap both comparison direction and lhs/rhs - [Expr::canonical(expr.rhs()), Expr::canonical(expr.lhs())], - |dest, [lhs, rhs]| vec![Insn::CmpLe { dest, lhs, rhs }], - ) - .into(), - ExprEnum::CmpEqU(expr) => self - .simple_nary_big_expr( - instantiated_module, - Bool.canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::CmpEq { dest, lhs, rhs }], - ) - .into(), - ExprEnum::CmpNeU(expr) => self - .simple_nary_big_expr( - instantiated_module, - Bool.canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::CmpNe { dest, lhs, rhs }], - ) - .into(), - ExprEnum::CmpLtS(expr) => self - .simple_nary_big_expr( - instantiated_module, - Bool.canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::CmpLt { dest, lhs, rhs }], - ) - .into(), - ExprEnum::CmpLeS(expr) => self - .simple_nary_big_expr( - instantiated_module, - Bool.canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::CmpLe { dest, lhs, rhs }], - ) - .into(), - ExprEnum::CmpGtS(expr) => self - .simple_nary_big_expr( - instantiated_module, - Bool.canonical(), - // swap both comparison direction and lhs/rhs - [Expr::canonical(expr.rhs()), Expr::canonical(expr.lhs())], - |dest, [lhs, rhs]| vec![Insn::CmpLt { dest, lhs, rhs }], - ) - .into(), - ExprEnum::CmpGeS(expr) => self - .simple_nary_big_expr( - instantiated_module, - Bool.canonical(), - // swap both comparison direction and lhs/rhs - [Expr::canonical(expr.rhs()), Expr::canonical(expr.lhs())], - |dest, [lhs, rhs]| vec![Insn::CmpLe { dest, lhs, rhs }], - ) - .into(), - ExprEnum::CmpEqS(expr) => self - .simple_nary_big_expr( - instantiated_module, - Bool.canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::CmpEq { dest, lhs, rhs }], - ) - .into(), - ExprEnum::CmpNeS(expr) => self - .simple_nary_big_expr( - instantiated_module, - Bool.canonical(), - [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], - |dest, [lhs, rhs]| vec![Insn::CmpNe { dest, lhs, rhs }], - ) - .into(), - ExprEnum::CastUIntToUInt(expr) => self - .simple_nary_big_expr( - instantiated_module, - expr.ty().canonical(), - [Expr::canonical(expr.arg())], - |dest, [src]| { - vec![Insn::CastToUInt { - dest, - src, - dest_width: expr.ty().width(), - }] - }, - ) - .into(), - ExprEnum::CastUIntToSInt(expr) => self - .simple_nary_big_expr( - instantiated_module, - expr.ty().canonical(), - [Expr::canonical(expr.arg())], - |dest, [src]| { - vec![Insn::CastToSInt { - dest, - src, - dest_width: expr.ty().width(), - }] - }, - ) - .into(), - ExprEnum::CastSIntToUInt(expr) => self - .simple_nary_big_expr( - instantiated_module, - expr.ty().canonical(), - [Expr::canonical(expr.arg())], - |dest, [src]| { - vec![Insn::CastToUInt { - dest, - src, - dest_width: expr.ty().width(), - }] - }, - ) - .into(), - ExprEnum::CastSIntToSInt(expr) => self - .simple_nary_big_expr( - instantiated_module, - expr.ty().canonical(), - [Expr::canonical(expr.arg())], - |dest, [src]| { - vec![Insn::CastToSInt { - dest, - src, - dest_width: expr.ty().width(), - }] - }, - ) - .into(), - ExprEnum::CastBoolToUInt(expr) => cast_bit(Expr::canonical(expr.arg())), - ExprEnum::CastBoolToSInt(expr) => cast_bit(Expr::canonical(expr.arg())), - ExprEnum::CastUIntToBool(expr) => cast_bit(Expr::canonical(expr.arg())), - ExprEnum::CastSIntToBool(expr) => cast_bit(Expr::canonical(expr.arg())), - ExprEnum::CastBoolToSyncReset(expr) => cast_bit(Expr::canonical(expr.arg())), - ExprEnum::CastUIntToSyncReset(expr) => cast_bit(Expr::canonical(expr.arg())), - ExprEnum::CastSIntToSyncReset(expr) => cast_bit(Expr::canonical(expr.arg())), - ExprEnum::CastBoolToAsyncReset(expr) => cast_bit(Expr::canonical(expr.arg())), - ExprEnum::CastUIntToAsyncReset(expr) => cast_bit(Expr::canonical(expr.arg())), - ExprEnum::CastSIntToAsyncReset(expr) => cast_bit(Expr::canonical(expr.arg())), - ExprEnum::CastSyncResetToBool(expr) => cast_bit(Expr::canonical(expr.arg())), - ExprEnum::CastSyncResetToUInt(expr) => cast_bit(Expr::canonical(expr.arg())), - ExprEnum::CastSyncResetToSInt(expr) => cast_bit(Expr::canonical(expr.arg())), - ExprEnum::CastSyncResetToReset(expr) => cast_bit(Expr::canonical(expr.arg())), - ExprEnum::CastAsyncResetToBool(expr) => cast_bit(Expr::canonical(expr.arg())), - ExprEnum::CastAsyncResetToUInt(expr) => cast_bit(Expr::canonical(expr.arg())), - ExprEnum::CastAsyncResetToSInt(expr) => cast_bit(Expr::canonical(expr.arg())), - ExprEnum::CastAsyncResetToReset(expr) => cast_bit(Expr::canonical(expr.arg())), - ExprEnum::CastResetToBool(expr) => cast_bit(Expr::canonical(expr.arg())), - ExprEnum::CastResetToUInt(expr) => cast_bit(Expr::canonical(expr.arg())), - ExprEnum::CastResetToSInt(expr) => cast_bit(Expr::canonical(expr.arg())), - ExprEnum::CastBoolToClock(expr) => cast_bit(Expr::canonical(expr.arg())), - ExprEnum::CastUIntToClock(expr) => cast_bit(Expr::canonical(expr.arg())), - ExprEnum::CastSIntToClock(expr) => cast_bit(Expr::canonical(expr.arg())), - ExprEnum::CastClockToBool(expr) => cast_bit(Expr::canonical(expr.arg())), - ExprEnum::CastClockToUInt(expr) => cast_bit(Expr::canonical(expr.arg())), - ExprEnum::CastClockToSInt(expr) => cast_bit(Expr::canonical(expr.arg())), - ExprEnum::FieldAccess(expr) => self - .compile_expr(instantiated_module, Expr::canonical(expr.base())) - .map_ty(Bundle::from_canonical) - .field_by_index(expr.field_index()), - ExprEnum::VariantAccess(variant_access) => { - let start = Expr::ty(variant_access.base()).discriminant_bit_width(); - let len = Expr::ty(expr).bit_width(); - self.compile_expr( - instantiated_module, - variant_access.base().cast_to_bits()[start..start + len] - .cast_bits_to(Expr::ty(expr)), - ) - } - ExprEnum::ArrayIndex(expr) => self - .compile_expr(instantiated_module, Expr::canonical(expr.base())) - .map_ty(Array::from_canonical) - .element(expr.element_index()), - ExprEnum::DynArrayIndex(expr) => { - let element_index = - self.compile_expr(instantiated_module, Expr::canonical(expr.element_index())); - let element_index = self.compiled_expr_to_value( - element_index, - instantiated_module.leaf_module().source_location(), - ); - let index_slot = self.compiled_value_to_dyn_array_index( - element_index.map_ty(UInt::from_canonical), - instantiated_module.leaf_module().source_location(), - ); - self.compile_expr(instantiated_module, Expr::canonical(expr.base())) - .map_ty(Array::from_canonical) - .element_dyn(index_slot) - } - ExprEnum::ReduceBitAndU(expr) => if Expr::ty(expr.arg()).width() == 0 { - self.compile_expr(instantiated_module, Expr::canonical(true.to_expr())) - } else { - self.compile_expr( - instantiated_module, - Expr::canonical( - expr.arg() - .cmp_eq(Expr::ty(expr.arg()).from_int_wrapping(-1)), - ), - ) - } - .into(), - ExprEnum::ReduceBitAndS(expr) => if Expr::ty(expr.arg()).width() == 0 { - self.compile_expr(instantiated_module, Expr::canonical(true.to_expr())) - } else { - self.compile_expr( - instantiated_module, - Expr::canonical( - expr.arg() - .cmp_eq(Expr::ty(expr.arg()).from_int_wrapping(-1)), - ), - ) - } - .into(), - ExprEnum::ReduceBitOrU(expr) => if Expr::ty(expr.arg()).width() == 0 { - self.compile_expr(instantiated_module, Expr::canonical(false.to_expr())) - } else { - self.compile_expr( - instantiated_module, - Expr::canonical(expr.arg().cmp_ne(Expr::ty(expr.arg()).from_int_wrapping(0))), - ) - } - .into(), - ExprEnum::ReduceBitOrS(expr) => if Expr::ty(expr.arg()).width() == 0 { - self.compile_expr(instantiated_module, Expr::canonical(false.to_expr())) - } else { - self.compile_expr( - instantiated_module, - Expr::canonical(expr.arg().cmp_ne(Expr::ty(expr.arg()).from_int_wrapping(0))), - ) - } - .into(), - ExprEnum::ReduceBitXorU(expr) => self - .simple_nary_big_expr( - instantiated_module, - UInt::<1>::TYPE.canonical(), - [Expr::canonical(expr.arg())], - |dest, [src]| { - vec![Insn::ReduceBitXor { - dest, - src, - input_width: Expr::ty(expr.arg()).width(), - }] - }, - ) - .into(), - ExprEnum::ReduceBitXorS(expr) => self - .simple_nary_big_expr( - instantiated_module, - UInt::<1>::TYPE.canonical(), - [Expr::canonical(expr.arg())], - |dest, [src]| { - vec![Insn::ReduceBitXor { - dest, - src, - input_width: Expr::ty(expr.arg()).width(), - }] - }, - ) - .into(), - ExprEnum::SliceUInt(expr) => self - .simple_nary_big_expr( - instantiated_module, - UInt::new_dyn(expr.range().len()).canonical(), - [Expr::canonical(expr.base())], - |dest, [src]| { - vec![Insn::SliceInt { - dest, - src, - start: expr.range().start, - len: expr.range().len(), - }] - }, - ) - .into(), - ExprEnum::SliceSInt(expr) => self - .simple_nary_big_expr( - instantiated_module, - UInt::new_dyn(expr.range().len()).canonical(), - [Expr::canonical(expr.base())], - |dest, [src]| { - vec![Insn::SliceInt { - dest, - src, - start: expr.range().start, - len: expr.range().len(), - }] - }, - ) - .into(), - ExprEnum::CastToBits(expr) => self - .compile_cast_to_bits(instantiated_module, expr) - .map_ty(CanonicalType::UInt) - .into(), - ExprEnum::CastBitsTo(expr) => { - self.compile_cast_bits_to(instantiated_module, expr).into() - } - ExprEnum::ModuleIO(expr) => self - .compile_value(TargetInInstantiatedModule { - instantiated_module, - target: expr.into(), - }) - .into(), - ExprEnum::Instance(expr) => self - .compile_value(TargetInInstantiatedModule { - instantiated_module, - target: expr.into(), - }) - .into(), - ExprEnum::Wire(expr) => self - .compile_value(TargetInInstantiatedModule { - instantiated_module, - target: expr.into(), - }) - .into(), - ExprEnum::Reg(expr) => self - .compile_value(TargetInInstantiatedModule { - instantiated_module, - target: expr.into(), - }) - .into(), - ExprEnum::RegSync(expr) => self - .compile_value(TargetInInstantiatedModule { - instantiated_module, - target: expr.into(), - }) - .into(), - ExprEnum::RegAsync(expr) => self - .compile_value(TargetInInstantiatedModule { - instantiated_module, - target: expr.into(), - }) - .into(), - ExprEnum::MemPort(expr) => self - .compile_value(TargetInInstantiatedModule { - instantiated_module, - target: expr.into(), - }) - .into(), - }; - self.compiled_exprs.insert(expr, retval); - retval - } - fn compile_simple_connect( - &mut self, - conditions: Interned<[Cond]>, - lhs: CompiledExpr, - rhs: CompiledValue, - source_location: SourceLocation, - ) { - let CompiledExpr { - static_part: lhs_static_part, - indexes, - } = lhs; - let (lhs_layout, lhs_range) = lhs_static_part.write(); - assert!( - lhs_layout.ty.is_passive(), - "invalid expression passed to compile_simple_connect -- type must be passive", - ); - let TypeIndexRange { - small_slots, - big_slots, - } = lhs_range; - self.add_assignment( - conditions, - small_slots - .iter() - .zip(rhs.range.small_slots.iter()) - .map(|(base, src)| { - if indexes.small_slots.is_empty() { - Insn::CopySmall { dest: base, src } - } else { - Insn::WriteSmallIndexed { - dest: StatePartArrayIndexed { - base, - indexes: indexes.small_slots, - }, - src, - } - } - }) - .chain( - big_slots - .iter() - .zip(rhs.range.big_slots.iter()) - .map(|(base, src)| { - if indexes.big_slots.is_empty() { - Insn::Copy { dest: base, src } - } else { - Insn::WriteIndexed { - dest: StatePartArrayIndexed { - base, - indexes: indexes.big_slots, - }, - src, - } - } - }), - ), - source_location, - ); - } - fn compile_connect( - &mut self, - lhs_instantiated_module: InstantiatedModule, - lhs_conditions: Interned<[Cond]>, - lhs: Expr, - rhs_instantiated_module: InstantiatedModule, - rhs_conditions: Interned<[Cond]>, - mut rhs: Expr, - source_location: SourceLocation, - ) { - if Expr::ty(lhs) != Expr::ty(rhs) || !Expr::ty(lhs).is_passive() { - match Expr::ty(lhs) { - CanonicalType::UInt(lhs_ty) => { - rhs = Expr::canonical(Expr::::from_canonical(rhs).cast_to(lhs_ty)); - } - CanonicalType::SInt(lhs_ty) => { - rhs = Expr::canonical(Expr::::from_canonical(rhs).cast_to(lhs_ty)); - } - CanonicalType::Bool(_) => unreachable!(), - CanonicalType::Array(lhs_ty) => { - let CanonicalType::Array(rhs_ty) = Expr::ty(rhs) else { - unreachable!(); - }; - assert_eq!(lhs_ty.len(), rhs_ty.len()); - let lhs = Expr::::from_canonical(lhs); - let rhs = Expr::::from_canonical(rhs); - for index in 0..lhs_ty.len() { - self.compile_connect( - lhs_instantiated_module, - lhs_conditions, - lhs[index], - rhs_instantiated_module, - rhs_conditions, - rhs[index], - source_location, - ); - } - return; - } - CanonicalType::Enum(lhs_ty) => { - let CanonicalType::Enum(rhs_ty) = Expr::ty(rhs) else { - unreachable!(); - }; - todo!("handle connect with different enum types"); - } - CanonicalType::Bundle(lhs_ty) => { - let CanonicalType::Bundle(rhs_ty) = Expr::ty(rhs) else { - unreachable!(); - }; - assert_eq!(lhs_ty.fields().len(), rhs_ty.fields().len()); - let lhs = Expr::::from_canonical(lhs); - let rhs = Expr::::from_canonical(rhs); - for ( - field_index, - ( - BundleField { - name, - flipped, - ty: _, - }, - rhs_field, - ), - ) in lhs_ty.fields().into_iter().zip(rhs_ty.fields()).enumerate() - { - assert_eq!(name, rhs_field.name); - assert_eq!(flipped, rhs_field.flipped); - let lhs_expr = ops::FieldAccess::new_by_index(lhs, field_index).to_expr(); - let rhs_expr = ops::FieldAccess::new_by_index(rhs, field_index).to_expr(); - if flipped { - // swap lhs/rhs - self.compile_connect( - rhs_instantiated_module, - rhs_conditions, - rhs_expr, - lhs_instantiated_module, - lhs_conditions, - lhs_expr, - source_location, - ); - } else { - self.compile_connect( - lhs_instantiated_module, - lhs_conditions, - lhs_expr, - rhs_instantiated_module, - rhs_conditions, - rhs_expr, - source_location, - ); - } - } - return; - } - CanonicalType::AsyncReset(_) => unreachable!(), - CanonicalType::SyncReset(_) => unreachable!(), - CanonicalType::Reset(_) => unreachable!(), - CanonicalType::Clock(_) => unreachable!(), - CanonicalType::PhantomConst(_) => unreachable!("PhantomConst mismatch"), - } - } - let Some(target) = lhs.target() else { - unreachable!("connect lhs must have target"); - }; - let lhs_decl_conditions = self.decl_conditions[&TargetInInstantiatedModule { - instantiated_module: lhs_instantiated_module, - target: target.base().into(), - }]; - let lhs = self.compile_expr(lhs_instantiated_module, lhs); - let rhs = self.compile_expr(rhs_instantiated_module, rhs); - let rhs = self.compiled_expr_to_value(rhs, source_location); - self.compile_simple_connect( - lhs_conditions[lhs_decl_conditions.len()..].intern(), - lhs, - rhs, - source_location, - ); - } - fn compile_clock( - &mut self, - clk: CompiledValue, - source_location: SourceLocation, - ) -> ClockTrigger { - if let Some(&retval) = self.compiled_value_to_clock_trigger_map.get(&clk) { - return retval; - } - let mut alloc_small_slot = |part_name: &str| { - self.insns - .state_layout - .ty - .small_slots - .allocate(&StatePartLayout::scalar( - SlotDebugData { - name: Interned::default(), - ty: Bool.canonical(), - }, - (), - )) - .start - }; - let last_clk_was_low = alloc_small_slot("last_clk_was_low"); - let clk_triggered = alloc_small_slot("clk_triggered"); - let retval = ClockTrigger { - last_clk_was_low, - clk: self.compiled_value_bool_dest_is_small( - clk.map_ty(CanonicalType::Clock), - source_location, - ), - clk_triggered, - source_location, - }; - self.add_assignment( - Interned::default(), - [Insn::AndSmall { - dest: clk_triggered, - lhs: retval.clk, - rhs: last_clk_was_low, - }], - source_location, - ); - self.clock_triggers.push(retval); - self.compiled_value_to_clock_trigger_map.insert(clk, retval); - retval - } - fn compile_enum_discriminant( - &mut self, - enum_value: CompiledValue, - source_location: SourceLocation, - ) -> StatePartIndex { - if let Some(&retval) = self.enum_discriminants.get(&enum_value) { - return retval; - } - let retval_ty = Enum::new( - enum_value - .layout - .ty - .variants() - .iter() - .map(|variant| EnumVariant { - name: variant.name, - ty: None, - }) - .collect(), - ); - let retval = if retval_ty == enum_value.layout.ty - && enum_value.range.len() == TypeLen::A_SMALL_SLOT - { - enum_value.range.small_slots.start - } else { - let retval = self - .insns - .state_layout - .ty - .small_slots - .allocate(&StatePartLayout::scalar( - SlotDebugData { - name: Interned::default(), - ty: retval_ty.canonical(), - }, - (), - )) - .start; - let discriminant_bit_width = enum_value.layout.ty.discriminant_bit_width(); - let discriminant_mask = !(!0u64 << discriminant_bit_width); - let insn = match enum_value.range.len() { - TypeLen::A_BIG_SLOT => Insn::AndBigWithSmallImmediate { - dest: retval, - lhs: enum_value.range.big_slots.start, - rhs: discriminant_mask, - }, - TypeLen::A_SMALL_SLOT => { - if discriminant_bit_width == enum_value.layout.ty.type_properties().bit_width { - Insn::CopySmall { - dest: retval, - src: enum_value.range.small_slots.start, - } - } else { - Insn::AndSmallImmediate { - dest: retval, - lhs: enum_value.range.small_slots.start, - rhs: discriminant_mask, - } - } - } - _ => unreachable!(), - }; - self.add_assignment(Interned::default(), [insn], source_location); - retval - }; - self.enum_discriminants.insert(enum_value, retval); - retval - } - fn compile_stmt_reg( - &mut self, - stmt_reg: StmtReg, - instantiated_module: InstantiatedModule, - value: CompiledValue, - ) { - let StmtReg { annotations, reg } = stmt_reg; - let clk = self.compile_expr(instantiated_module, Expr::canonical(reg.clock_domain().clk)); - let clk = self - .compiled_expr_to_value(clk, reg.source_location()) - .map_ty(Clock::from_canonical); - let clk = self.compile_clock(clk, reg.source_location()); - struct Dispatch; - impl ResetTypeDispatch for Dispatch { - type Input = (); - - type Output = bool; - - fn reset(self, _input: Self::Input) -> Self::Output { - unreachable!() - } - - fn sync_reset(self, _input: Self::Input) -> Self::Output { - false - } - - fn async_reset(self, _input: Self::Input) -> Self::Output { - true - } - } - let reset = if let Some(init) = reg.init() { - let init = self.compile_expr(instantiated_module, init); - let init = self.compiled_expr_to_value(init, reg.source_location()); - let rst = - self.compile_expr(instantiated_module, Expr::canonical(reg.clock_domain().rst)); - let rst = self.compiled_expr_to_value(rst, reg.source_location()); - let rst = self.compiled_value_bool_dest_is_small(rst, reg.source_location()); - let is_async = R::dispatch((), Dispatch); - if is_async { - let cond = Expr::canonical(reg.clock_domain().rst.cast_to(Bool)); - let cond = self.compile_expr(instantiated_module, cond); - let cond = self.compiled_expr_to_value(cond, reg.source_location()); - let cond = cond.map_ty(Bool::from_canonical); - // write to the register's current value since asynchronous reset is combinational - let lhs = CompiledValue { - layout: value.layout, - range: value.range, - write: None, - } - .into(); - self.compile_simple_connect( - [Cond { - body: CondBody::IfTrue { cond }, - source_location: reg.source_location(), - }][..] - .intern(), - lhs, - init, - reg.source_location(), - ); - } - Some(RegisterReset { - is_async, - init, - rst, - }) - } else { - None - }; - self.registers.push(Register { - value, - clk_triggered: clk.clk_triggered, - reset, - source_location: reg.source_location(), - }); - } - fn compile_declaration( - &mut self, - declaration: StmtDeclaration, - parent_module: Interned, - conditions: Interned<[Cond]>, - ) -> TraceDecl { - let target_base: TargetBase = match &declaration { - StmtDeclaration::Wire(v) => v.wire.into(), - StmtDeclaration::Reg(v) => v.reg.into(), - StmtDeclaration::RegSync(v) => v.reg.into(), - StmtDeclaration::RegAsync(v) => v.reg.into(), - StmtDeclaration::Instance(v) => v.instance.into(), - }; - let target = TargetInInstantiatedModule { - instantiated_module: *parent_module, - target: target_base.into(), - }; - self.decl_conditions.insert(target, conditions); - let compiled_value = self.compile_value(target); - match declaration { - StmtDeclaration::Wire(StmtWire { annotations, wire }) => {} - StmtDeclaration::Reg(_) => { - unreachable!("Reset types were already replaced by SyncReset or AsyncReset"); - } - StmtDeclaration::RegSync(stmt_reg) => { - self.compile_stmt_reg(stmt_reg, *parent_module, compiled_value) - } - StmtDeclaration::RegAsync(stmt_reg) => { - self.compile_stmt_reg(stmt_reg, *parent_module, compiled_value) - } - StmtDeclaration::Instance(StmtInstance { - annotations, - instance, - }) => { - let inner_instantiated_module = InstantiatedModule::Child { - parent: parent_module, - instance: instance.intern_sized(), - } - .intern_sized(); - let instance_expr = instance.to_expr(); - self.compile_module(inner_instantiated_module); - for (field_index, module_io) in - instance.instantiated().module_io().into_iter().enumerate() - { - let instance_field = - ops::FieldAccess::new_by_index(instance_expr, field_index).to_expr(); - match Expr::flow(instance_field) { - Flow::Source => { - // we need to supply the value to the instance since the - // parent module expects to read from the instance - self.compile_connect( - *parent_module, - conditions, - instance_field, - *inner_instantiated_module, - Interned::default(), - module_io.module_io.to_expr(), - instance.source_location(), - ); - } - Flow::Sink => { - // we need to take the value from the instance since the - // parent module expects to write to the instance - self.compile_connect( - *inner_instantiated_module, - Interned::default(), - module_io.module_io.to_expr(), - *parent_module, - conditions, - instance_field, - instance.source_location(), - ); - } - Flow::Duplex => unreachable!(), - } - } - } - } - self.make_trace_decl(*parent_module, target_base) - } - fn allocate_delay_chain( - &mut self, - len: usize, - layout: &TypeLayout, - first: Option, - last: Option, - mut from_allocation: impl FnMut(TypeIndexRange) -> T, - ) -> Vec { - match (len, first, last) { - (0, _, _) => Vec::new(), - (1, Some(v), _) | (1, None, Some(v)) => vec![v], - (2, Some(first), Some(last)) => vec![first, last], - (len, first, last) => { - let inner_len = len - first.is_some() as usize - last.is_some() as usize; - first - .into_iter() - .chain( - (0..inner_len) - .map(|_| from_allocation(self.insns.allocate_variable(layout))), - ) - .chain(last) - .collect() - } - } - } - fn allocate_delay_chain_small( - &mut self, - len: usize, - ty: CanonicalType, - first: Option>, - last: Option>, - ) -> Vec> { - self.allocate_delay_chain( - len, - &TypeLayout { - small_slots: StatePartLayout::scalar( - SlotDebugData { - name: Interned::default(), - ty, - }, - (), - ), - big_slots: StatePartLayout::empty(), - }, - first, - last, - |range| range.small_slots.start, - ) - } - fn compile_memory_port_rw_helper( - &mut self, - memory: StatePartIndex, - stride: usize, - mut start: usize, - data_layout: CompiledTypeLayout, - mask_layout: CompiledTypeLayout, - mut read: Option>, - mut write: Option>, - ) { - match data_layout.body { - CompiledTypeLayoutBody::Scalar => { - let CompiledTypeLayoutBody::Scalar = mask_layout.body else { - unreachable!(); - }; - let signed = match data_layout.ty { - CanonicalType::UInt(_) => false, - CanonicalType::SInt(_) => true, - CanonicalType::Bool(_) => false, - CanonicalType::Array(_) => unreachable!(), - CanonicalType::Enum(_) => false, - CanonicalType::Bundle(_) => unreachable!(), - CanonicalType::AsyncReset(_) => false, - CanonicalType::SyncReset(_) => false, - CanonicalType::Reset(_) => false, - CanonicalType::Clock(_) => false, - CanonicalType::PhantomConst(_) => unreachable!(), - }; - let width = data_layout.ty.bit_width(); - if let Some(MemoryPortReadInsns { - addr, - en: _, - write_mode: _, - data, - insns, - }) = read - { - insns.push( - match data.len() { - TypeLen::A_BIG_SLOT => { - let dest = data.big_slots.start; - if signed { - Insn::MemoryReadSInt { - dest, - memory, - addr, - stride, - start, - width, - } - } else { - Insn::MemoryReadUInt { - dest, - memory, - addr, - stride, - start, - width, - } - } - } - TypeLen::A_SMALL_SLOT => { - let _dest = data.small_slots.start; - todo!("memory ports' data are always big for now"); - } - _ => unreachable!(), - } - .into(), - ); - } - if let Some(MemoryPortWriteInsns { - addr, - en: _, - write_mode: _, - data, - mask, - insns, - }) = write - { - let end_label = self.insns.new_label(); - insns.push( - match mask.len() { - TypeLen::A_BIG_SLOT => Insn::BranchIfZero { - target: end_label.0, - value: mask.big_slots.start, - }, - TypeLen::A_SMALL_SLOT => Insn::BranchIfSmallZero { - target: end_label.0, - value: mask.small_slots.start, - }, - _ => unreachable!(), - } - .into(), - ); - insns.push( - match data.len() { - TypeLen::A_BIG_SLOT => { - let value = data.big_slots.start; - if signed { - Insn::MemoryWriteSInt { - value, - memory, - addr, - stride, - start, - width, - } - } else { - Insn::MemoryWriteUInt { - value, - memory, - addr, - stride, - start, - width, - } - } - } - TypeLen::A_SMALL_SLOT => { - let _value = data.small_slots.start; - todo!("memory ports' data are always big for now"); - } - _ => unreachable!(), - } - .into(), - ); - insns.push(end_label.into()); - } - } - CompiledTypeLayoutBody::Array { element } => { - let CompiledTypeLayoutBody::Array { - element: mask_element, - } = mask_layout.body - else { - unreachable!(); - }; - let ty = ::from_canonical(data_layout.ty); - let element_bit_width = ty.element().bit_width(); - let element_size = element.layout.len(); - let mask_element_size = mask_element.layout.len(); - for element_index in 0..ty.len() { - self.compile_memory_port_rw_helper( - memory, - stride, - start, - *element, - *mask_element, - read.as_mut().map( - |MemoryPortReadInsns { - addr, - en, - write_mode, - data, - insns, - }| MemoryPortReadInsns { - addr: *addr, - en: *en, - write_mode: *write_mode, - data: data.index_array(element_size, element_index), - insns, - }, - ), - write.as_mut().map( - |MemoryPortWriteInsns { - addr, - en, - write_mode, - data, - mask, - insns, - }| { - MemoryPortWriteInsns { - addr: *addr, - en: *en, - write_mode: *write_mode, - data: data.index_array(element_size, element_index), - mask: mask.index_array(mask_element_size, element_index), - insns, - } - }, - ), - ); - start += element_bit_width; - } - } - CompiledTypeLayoutBody::Bundle { fields } => { - let CompiledTypeLayoutBody::Bundle { - fields: mask_fields, - } = mask_layout.body - else { - unreachable!(); - }; - assert_eq!(fields.len(), mask_fields.len()); - for (field, mask_field) in fields.into_iter().zip(mask_fields) { - let field_index_range = - TypeIndexRange::new(field.offset, field.ty.layout.len()); - let mask_field_index_range = - TypeIndexRange::new(mask_field.offset, mask_field.ty.layout.len()); - self.compile_memory_port_rw_helper( - memory, - stride, - start, - field.ty, - mask_field.ty, - read.as_mut().map( - |MemoryPortReadInsns { - addr, - en, - write_mode, - data, - insns, - }| MemoryPortReadInsns { - addr: *addr, - en: *en, - write_mode: *write_mode, - data: data.slice(field_index_range), - insns, - }, - ), - write.as_mut().map( - |MemoryPortWriteInsns { - addr, - en, - write_mode, - data, - mask, - insns, - }| { - MemoryPortWriteInsns { - addr: *addr, - en: *en, - write_mode: *write_mode, - data: data.slice(field_index_range), - mask: mask.slice(mask_field_index_range), - insns, - } - }, - ), - ); - start = start + field.ty.ty.bit_width(); - } - } - } - } - fn compile_memory_port_rw( - &mut self, - memory: StatePartIndex, - data_layout: CompiledTypeLayout, - mask_layout: CompiledTypeLayout, - mut read: Option>, - mut write: Option>, - ) { - let read_else_label = read.as_mut().map( - |MemoryPortReadInsns { - addr: _, - en, - write_mode, - data: _, - insns, - }| { - let else_label = self.insns.new_label(); - insns.push( - Insn::BranchIfSmallZero { - target: else_label.0, - value: *en, - } - .into(), - ); - if let Some(write_mode) = *write_mode { - insns.push( - Insn::BranchIfSmallNonZero { - target: else_label.0, - value: write_mode, - } - .into(), - ); - } - else_label - }, - ); - let write_end_label = write.as_mut().map( - |MemoryPortWriteInsns { - addr: _, - en, - write_mode, - data: _, - mask: _, - insns, - }| { - let end_label = self.insns.new_label(); - insns.push( - Insn::BranchIfSmallZero { - target: end_label.0, - value: *en, - } - .into(), - ); - if let Some(write_mode) = *write_mode { - insns.push( - Insn::BranchIfSmallZero { - target: end_label.0, - value: write_mode, - } - .into(), - ); - } - end_label - }, - ); - self.compile_memory_port_rw_helper( - memory, - data_layout.ty.bit_width(), - 0, - data_layout, - mask_layout, - read.as_mut().map( - |MemoryPortReadInsns { - addr, - en, - write_mode, - data, - insns, - }| MemoryPortReadInsns { - addr: *addr, - en: *en, - write_mode: *write_mode, - data: *data, - insns: *insns, - }, - ), - write.as_mut().map( - |MemoryPortWriteInsns { - addr, - en, - write_mode, - data, - mask, - insns, - }| MemoryPortWriteInsns { - addr: *addr, - en: *en, - write_mode: *write_mode, - data: *data, - mask: *mask, - insns: *insns, - }, - ), - ); - if let ( - Some(else_label), - Some(MemoryPortReadInsns { - addr: _, - en: _, - write_mode: _, - data, - insns, - }), - ) = (read_else_label, read) - { - let end_label = self.insns.new_label(); - insns.push( - Insn::Branch { - target: end_label.0, - } - .into(), - ); - insns.push(else_label.into()); - let TypeIndexRange { - small_slots, - big_slots, - } = data; - for dest in small_slots.iter() { - insns.push(Insn::ConstSmall { dest, value: 0 }.into()); - } - for dest in big_slots.iter() { - insns.push( - Insn::Const { - dest, - value: BigInt::ZERO.intern_sized(), - } - .into(), - ); - } - insns.push(end_label.into()); - } - if let (Some(end_label), Some(write)) = (write_end_label, write) { - write.insns.push(end_label.into()); - } - } - fn compile_memory( - &mut self, - mem: Mem, - instantiated_module: InstantiatedModule, - conditions: Interned<[Cond]>, - trace_decls: &mut Vec, - ) { - let data_layout = CompiledTypeLayout::get(mem.array_type().element()); - let mask_layout = CompiledTypeLayout::get(mem.array_type().element().mask_type()); - let read_latency_plus_1 = mem - .read_latency() - .checked_add(1) - .expect("read latency too big"); - let write_latency_plus_1 = mem - .write_latency() - .get() - .checked_add(1) - .expect("write latency too big"); - let read_cycle = match mem.read_under_write() { - ReadUnderWrite::Old => 0, - ReadUnderWrite::New => mem.read_latency(), - ReadUnderWrite::Undefined => mem.read_latency() / 2, // something other than Old or New - }; - let memory = self - .insns - .state_layout - .memories - .allocate(&StatePartLayout::scalar( - (), - MemoryData { - array_type: mem.array_type(), - data: mem.initial_value().unwrap_or_else(|| { - Intern::intern_owned(BitVec::repeat( - false, - mem.array_type().type_properties().bit_width, - )) - }), - }, - )) - .start; - let (ports, trace_ports) = mem - .ports() - .iter() - .map(|&port| { - let target_base = TargetBase::MemPort(port); - let target = TargetInInstantiatedModule { - instantiated_module, - target: target_base.into(), - }; - self.decl_conditions.insert(target, conditions); - let TraceDecl::Scope(TraceScope::MemPort(trace_port)) = - self.make_trace_decl(instantiated_module, target_base) - else { - unreachable!(); - }; - let clk = Expr::field(port.to_expr(), "clk"); - let clk = self.compile_expr(instantiated_module, clk); - let clk = self.compiled_expr_to_value(clk, mem.source_location()); - let clk_triggered = self - .compile_clock(clk.map_ty(Clock::from_canonical), mem.source_location()) - .clk_triggered; - let en = Expr::field(port.to_expr(), "en"); - let en = self.compile_expr(instantiated_module, en); - let en = self.compiled_expr_to_value(en, mem.source_location()); - let en = self.compiled_value_bool_dest_is_small(en, mem.source_location()); - let addr = Expr::field(port.to_expr(), "addr"); - let addr = self.compile_expr(instantiated_module, addr); - let addr = self.compiled_expr_to_value(addr, mem.source_location()); - let addr_ty = addr.layout.ty; - let addr = self.compiled_value_to_dyn_array_index( - addr.map_ty(UInt::from_canonical), - mem.source_location(), - ); - let read_data = port.port_kind().rdata_name().map(|name| { - let read_data = - self.compile_expr(instantiated_module, Expr::field(port.to_expr(), name)); - let read_data = self.compiled_expr_to_value(read_data, mem.source_location()); - read_data.range - }); - let write_data = port.port_kind().wdata_name().map(|name| { - let write_data = - self.compile_expr(instantiated_module, Expr::field(port.to_expr(), name)); - let write_data = self.compiled_expr_to_value(write_data, mem.source_location()); - write_data.range - }); - let write_mask = port.port_kind().wmask_name().map(|name| { - let write_mask = - self.compile_expr(instantiated_module, Expr::field(port.to_expr(), name)); - let write_mask = self.compiled_expr_to_value(write_mask, mem.source_location()); - write_mask.range - }); - let write_mode = port.port_kind().wmode_name().map(|name| { - let write_mode = - self.compile_expr(instantiated_module, Expr::field(port.to_expr(), name)); - let write_mode = self.compiled_expr_to_value(write_mode, mem.source_location()); - self.compiled_value_bool_dest_is_small(write_mode, mem.source_location()) - }); - struct PortParts { - en_delayed_len: usize, - addr_delayed_len: usize, - read_data_delayed_len: usize, - write_data_delayed_len: usize, - write_mask_delayed_len: usize, - write_mode_delayed_len: usize, - read_cycle: Option, - write_cycle: Option, - } - let PortParts { - en_delayed_len, - addr_delayed_len, - read_data_delayed_len, - write_data_delayed_len, - write_mask_delayed_len, - write_mode_delayed_len, - read_cycle, - write_cycle, - } = match port.port_kind() { - PortKind::ReadOnly => PortParts { - en_delayed_len: read_cycle + 1, - addr_delayed_len: read_cycle + 1, - read_data_delayed_len: read_latency_plus_1 - read_cycle, - write_data_delayed_len: 0, - write_mask_delayed_len: 0, - write_mode_delayed_len: 0, - read_cycle: Some(read_cycle), - write_cycle: None, - }, - PortKind::WriteOnly => PortParts { - en_delayed_len: write_latency_plus_1, - addr_delayed_len: write_latency_plus_1, - read_data_delayed_len: 0, - write_data_delayed_len: write_latency_plus_1, - write_mask_delayed_len: write_latency_plus_1, - write_mode_delayed_len: 0, - read_cycle: None, - write_cycle: Some(mem.write_latency().get()), - }, - PortKind::ReadWrite => { - let can_rw_at_end = match mem.read_under_write() { - ReadUnderWrite::Old => false, - ReadUnderWrite::New | ReadUnderWrite::Undefined => true, - }; - let latency_plus_1 = read_latency_plus_1; - if latency_plus_1 != write_latency_plus_1 || !can_rw_at_end { - todo!( - "not sure what to do, issue: \ - https://github.com/chipsalliance/firrtl-spec/issues/263" - ); - } - PortParts { - en_delayed_len: latency_plus_1, - addr_delayed_len: latency_plus_1, - read_data_delayed_len: 1, - write_data_delayed_len: latency_plus_1, - write_mask_delayed_len: latency_plus_1, - write_mode_delayed_len: latency_plus_1, - read_cycle: Some(latency_plus_1 - 1), - write_cycle: Some(latency_plus_1 - 1), - } - } - }; - let addr_delayed = self.allocate_delay_chain_small( - addr_delayed_len, - addr_ty.canonical(), - Some(addr), - None, - ); - let en_delayed = self.allocate_delay_chain_small( - en_delayed_len, - Bool.canonical(), - Some(en), - None, - ); - let read_data_delayed = self.allocate_delay_chain( - read_data_delayed_len, - &data_layout.layout, - None, - read_data, - |v| v, - ); - let write_data_delayed = self.allocate_delay_chain( - write_data_delayed_len, - &data_layout.layout, - write_data, - None, - |v| v, - ); - let write_mask_delayed = self.allocate_delay_chain( - write_mask_delayed_len, - &mask_layout.layout, - write_mask, - None, - |v| v, - ); - let write_mode_delayed = self.allocate_delay_chain_small( - write_mode_delayed_len, - Bool.canonical(), - write_mode, - None, - ); - let mut read_insns = Vec::new(); - let mut write_insns = Vec::new(); - self.compile_memory_port_rw( - memory, - data_layout, - mask_layout, - read_cycle.map(|read_cycle| MemoryPortReadInsns { - addr: addr_delayed[read_cycle], - en: en_delayed[read_cycle], - write_mode: write_mode_delayed.get(read_cycle).copied(), - data: read_data_delayed[0], - insns: &mut read_insns, - }), - write_cycle.map(|write_cycle| MemoryPortWriteInsns { - addr: addr_delayed[write_cycle], - en: en_delayed[write_cycle], - write_mode: write_mode_delayed.get(write_cycle).copied(), - data: write_data_delayed[write_cycle], - mask: write_mask_delayed[write_cycle], - insns: &mut write_insns, - }), - ); - self.add_assignment(Interned::default(), read_insns, mem.source_location()); - ( - MemoryPort { - clk_triggered, - addr_delayed, - en_delayed, - data_layout, - read_data_delayed, - write_data_delayed, - write_mask_delayed, - write_mode_delayed, - write_insns, - }, - trace_port, - ) - }) - .unzip(); - let name = mem.scoped_name().1.0; - let id = TraceMemoryId(self.memories.len()); - let stride = mem.array_type().element().bit_width(); - let trace = TraceMem { - id, - name, - stride, - element_type: self - .make_trace_decl_child( - instantiated_module, - MakeTraceDeclTarget::Memory { - id, - depth: mem.array_type().len(), - stride, - start: 0, - ty: mem.array_type().element(), - }, - name, - mem.source_location(), - ) - .intern_sized(), - ports: Intern::intern_owned(trace_ports), - array_type: mem.array_type(), - }; - trace_decls.push(trace.into()); - self.memories.push(Memory { - mem, - memory, - trace, - ports, - }); - } - fn compile_block( - &mut self, - parent_module: Interned, - block: Block, - conditions: Interned<[Cond]>, - trace_decls: &mut Vec, - ) { - let Block { memories, stmts } = block; - for memory in memories { - self.compile_memory(memory, *parent_module, conditions, trace_decls); - } - for stmt in stmts { - match stmt { - Stmt::Connect(StmtConnect { - lhs, - rhs, - source_location, - }) => self.compile_connect( - *parent_module, - conditions, - lhs, - *parent_module, - conditions, - rhs, - source_location, - ), - Stmt::Formal(StmtFormal { .. }) => todo!("implement simulating formal statements"), - Stmt::If(StmtIf { - cond, - source_location, - blocks: [then_block, else_block], - }) => { - let cond = self.compile_expr(*parent_module, Expr::canonical(cond)); - let cond = self.compiled_expr_to_value(cond, source_location); - let cond = cond.map_ty(Bool::from_canonical); - self.compile_block( - parent_module, - then_block, - Interned::from_iter(conditions.iter().copied().chain([Cond { - body: CondBody::IfTrue { cond }, - source_location, - }])), - trace_decls, - ); - self.compile_block( - parent_module, - else_block, - Interned::from_iter(conditions.iter().copied().chain([Cond { - body: CondBody::IfFalse { cond }, - source_location, - }])), - trace_decls, - ); - } - Stmt::Match(StmtMatch { - expr, - source_location, - blocks, - }) => { - let enum_expr = self.compile_expr(*parent_module, Expr::canonical(expr)); - let enum_expr = self.compiled_expr_to_value(enum_expr, source_location); - let enum_expr = enum_expr.map_ty(Enum::from_canonical); - let discriminant = self.compile_enum_discriminant(enum_expr, source_location); - for (variant_index, block) in blocks.into_iter().enumerate() { - self.compile_block( - parent_module, - block, - Interned::from_iter(conditions.iter().copied().chain([Cond { - body: CondBody::MatchArm { - discriminant, - variant_index, - }, - source_location, - }])), - trace_decls, - ); - } - } - Stmt::Declaration(declaration) => { - trace_decls.push(self.compile_declaration( - declaration, - parent_module, - conditions, - )); - } - } - } - } - fn compile_module(&mut self, module: Interned) -> &CompiledModule { - let mut trace_decls = Vec::new(); - let module_io = module - .leaf_module() - .module_io() - .iter() - .map( - |&AnnotatedModuleIO { - annotations: _, - module_io, - }| { - let target = TargetInInstantiatedModule { - instantiated_module: *module, - target: Target::from(module_io), - }; - self.decl_conditions.insert(target, Interned::default()); - trace_decls.push(self.make_trace_decl(*module, module_io.into())); - self.compile_value(target) - }, - ) - .collect(); - match module.leaf_module().body() { - ModuleBody::Normal(NormalModuleBody { body }) => { - self.compile_block(module, body, Interned::default(), &mut trace_decls); - } - ModuleBody::Extern(ExternModuleBody { - verilog_name: _, - parameters: _, - simulation, - }) => { - let Some(simulation) = simulation else { - panic!( - "can't simulate extern module without extern_module_simulation: {}", - module.leaf_module().source_location() - ); - }; - self.extern_modules.push(CompiledExternModule { - module_io_targets: module - .leaf_module() - .module_io() - .iter() - .map(|v| Target::from(v.module_io)) - .collect(), - module_io, - simulation, - }); - } - } - let hashbrown::hash_map::Entry::Vacant(entry) = self.modules.entry(*module) else { - unreachable!("compiled same instantiated module twice"); - }; - entry.insert(CompiledModule { - module_io, - trace_decls: TraceModule { - name: module.leaf_module().name(), - children: Intern::intern_owned(trace_decls), - }, - }) - } - fn process_assignments(&mut self) { - self.assignments - .finalize(self.insns.state_layout().ty.clone().into()); - if let Some(DebugOpaque(dump_assignments_dot)) = &self.dump_assignments_dot { - let graph = - petgraph::graph::DiGraph::<_, _, usize>::from_elements(self.assignments.elements()); - dump_assignments_dot(&petgraph::dot::Dot::new(&graph)); - } - let assignments_order: Vec<_> = match petgraph::algo::toposort(&self.assignments, None) { - Ok(nodes) => nodes - .into_iter() - .filter_map(|n| match n { - AssignmentOrSlotIndex::AssignmentIndex(v) => Some(v), - _ => None, - }) - .collect(), - Err(e) => match e.node_id() { - AssignmentOrSlotIndex::AssignmentIndex(assignment_index) => panic!( - "combinatorial logic cycle detected at: {}", - self.assignments.assignments()[assignment_index].source_location, - ), - AssignmentOrSlotIndex::SmallSlot(slot) => panic!( - "combinatorial logic cycle detected through: {}", - self.insns.state_layout().ty.small_slots.debug_data[slot.as_usize()].name, - ), - AssignmentOrSlotIndex::BigSlot(slot) => panic!( - "combinatorial logic cycle detected through: {}", - self.insns.state_layout().ty.big_slots.debug_data[slot.as_usize()].name, - ), - }, - }; - struct CondStackEntry<'a> { - cond: &'a Cond, - end_label: Label, - } - let mut cond_stack = Vec::>::new(); - for assignment_index in assignments_order { - let Assignment { - inputs: _, - outputs: _, - conditions, - insns, - source_location, - } = &self.assignments.assignments()[assignment_index]; - let mut same_len = 0; - for (index, (entry, cond)) in cond_stack.iter().zip(conditions).enumerate() { - if entry.cond != cond { - break; - } - same_len = index + 1; - } - while cond_stack.len() > same_len { - let CondStackEntry { cond: _, end_label } = - cond_stack.pop().expect("just checked len"); - self.insns.define_label_at_next_insn(end_label); - } - for cond in &conditions[cond_stack.len()..] { - let end_label = self.insns.new_label(); - match cond.body { - CondBody::IfTrue { cond: cond_value } - | CondBody::IfFalse { cond: cond_value } => { - let (branch_if_zero, branch_if_non_zero) = match cond_value.range.len() { - TypeLen::A_SMALL_SLOT => ( - Insn::BranchIfSmallZero { - target: end_label.0, - value: cond_value.range.small_slots.start, - }, - Insn::BranchIfSmallNonZero { - target: end_label.0, - value: cond_value.range.small_slots.start, - }, - ), - TypeLen::A_BIG_SLOT => ( - Insn::BranchIfZero { - target: end_label.0, - value: cond_value.range.big_slots.start, - }, - Insn::BranchIfNonZero { - target: end_label.0, - value: cond_value.range.big_slots.start, - }, - ), - _ => unreachable!(), - }; - self.insns.push( - if let CondBody::IfTrue { .. } = cond.body { - branch_if_zero - } else { - branch_if_non_zero - }, - cond.source_location, - ); - } - CondBody::MatchArm { - discriminant, - variant_index, - } => { - self.insns.push( - Insn::BranchIfSmallNeImmediate { - target: end_label.0, - lhs: discriminant, - rhs: variant_index as _, - }, - cond.source_location, - ); - } - } - cond_stack.push(CondStackEntry { cond, end_label }); - } - self.insns.extend(insns.iter().copied(), *source_location); - } - for CondStackEntry { cond: _, end_label } in cond_stack { - self.insns.define_label_at_next_insn(end_label); - } - } - fn process_clocks(&mut self) -> Interned<[StatePartIndex]> { - mem::take(&mut self.clock_triggers) - .into_iter() - .map( - |ClockTrigger { - last_clk_was_low, - clk, - clk_triggered, - source_location, - }| { - self.insns.push( - Insn::XorSmallImmediate { - dest: last_clk_was_low, - lhs: clk, - rhs: 1, - }, - source_location, - ); - clk_triggered - }, - ) - .collect() - } - fn process_registers(&mut self) { - for Register { - value, - clk_triggered, - reset, - source_location, - } in mem::take(&mut self.registers) - { - match reset { - Some(RegisterReset { - is_async, - init, - rst, - }) => { - let reg_end = self.insns.new_label(); - let reg_reset = self.insns.new_label(); - let branch_if_reset = Insn::BranchIfSmallNonZero { - target: reg_reset.0, - value: rst, - }; - let branch_if_not_triggered = Insn::BranchIfSmallZero { - target: reg_end.0, - value: clk_triggered, - }; - if is_async { - self.insns.push(branch_if_reset, source_location); - self.insns.push(branch_if_not_triggered, source_location); - } else { - self.insns.push(branch_if_not_triggered, source_location); - self.insns.push(branch_if_reset, source_location); - } - self.insns.extend( - value.range.insns_for_copy_from(value.write_value().range), - source_location, - ); - self.insns - .push(Insn::Branch { target: reg_end.0 }, source_location); - self.insns.define_label_at_next_insn(reg_reset); - self.insns - .extend(value.range.insns_for_copy_from(init.range), source_location); - self.insns.define_label_at_next_insn(reg_end); - } - None => { - let reg_end = self.insns.new_label(); - self.insns.push( - Insn::BranchIfSmallZero { - target: reg_end.0, - value: clk_triggered, - }, - source_location, - ); - self.insns.extend( - value.range.insns_for_copy_from(value.write_value().range), - source_location, - ); - self.insns.define_label_at_next_insn(reg_end); - } - } - } - } - fn process_memories(&mut self) { - for memory_index in 0..self.memories.len() { - let Memory { - mem, - memory: _, - trace: _, - ref mut ports, - } = self.memories[memory_index]; - for MemoryPort { - clk_triggered, - addr_delayed, - en_delayed, - data_layout: _, - read_data_delayed, - write_data_delayed, - write_mask_delayed, - write_mode_delayed, - write_insns, - } in mem::take(ports) - { - let port_end = self.insns.new_label(); - let small_shift_reg = - |this: &mut Self, values: &[StatePartIndex]| { - for pair in values.windows(2).rev() { - this.insns.push( - Insn::CopySmall { - dest: pair[1], - src: pair[0], - }, - mem.source_location(), - ); - } - }; - let shift_reg = |this: &mut Self, values: &[TypeIndexRange]| { - for pair in values.windows(2).rev() { - this.insns - .extend(pair[0].insns_for_copy_to(pair[1]), mem.source_location()); - } - }; - self.insns.push( - Insn::BranchIfSmallZero { - target: port_end.0, - value: clk_triggered, - }, - mem.source_location(), - ); - small_shift_reg(self, &addr_delayed); - small_shift_reg(self, &en_delayed); - shift_reg(self, &write_data_delayed); - shift_reg(self, &write_mask_delayed); - small_shift_reg(self, &write_mode_delayed); - shift_reg(self, &read_data_delayed); - self.insns.extend(write_insns, mem.source_location()); - self.insns.define_label_at_next_insn(port_end); - } - } - } - pub fn compile(mut self) -> Compiled { - let base_module = - *self.compile_module(InstantiatedModule::Base(self.base_module).intern_sized()); - self.process_assignments(); - self.process_registers(); - self.process_memories(); - let clocks_triggered = self.process_clocks(); - self.insns - .push(Insn::Return, self.base_module.source_location()); - Compiled { - insns: Insns::from(self.insns).intern_sized(), - base_module, - extern_modules: Intern::intern_owned(self.extern_modules), - io: Instance::new_unchecked( - ScopedNameId( - NameId("".intern(), Id::new()), - self.original_base_module.name_id(), - ), - self.original_base_module, - self.original_base_module.source_location(), - ), - traces: SimTraces(Intern::intern_owned(self.traces.0)), - trace_memories: Interned::from_iter(self.memories.iter().map( - |&Memory { - mem: _, - memory, - trace, - ports: _, - }| (memory, trace), - )), - clocks_triggered, - } - } -} - -#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] -struct CompiledModule { - module_io: Interned<[CompiledValue]>, - trace_decls: TraceModule, -} - -#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] -pub struct Compiled { - insns: Interned>, - base_module: CompiledModule, - extern_modules: Interned<[CompiledExternModule]>, - io: Instance, - traces: SimTraces]>>, - trace_memories: Interned<[(StatePartIndex, TraceMem)]>, - clocks_triggered: Interned<[StatePartIndex]>, -} - -impl Compiled { - pub fn new(module: Interned>) -> Self { - Self::from_canonical(Compiler::new(module.canonical().intern()).compile()) - } - pub fn canonical(self) -> Compiled { - let Self { - insns, - base_module, - extern_modules, - io, - traces, - trace_memories, - clocks_triggered, - } = self; - Compiled { - insns, - base_module, - extern_modules, - io: Instance::from_canonical(io.canonical()), - traces, - trace_memories, - clocks_triggered, - } - } - pub fn from_canonical(canonical: Compiled) -> Self { - let Compiled { - insns, - base_module, - extern_modules, - io, - traces, - trace_memories, - clocks_triggered, - } = canonical; - Self { - insns, - base_module, - extern_modules, - io: Instance::from_canonical(io.canonical()), - traces, - trace_memories, - clocks_triggered, - } - } -} +pub use compiler::{Compiled, Compiler}; #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct TraceScalarId(usize); @@ -5794,14 +749,14 @@ where } #[derive(Clone, PartialEq, Eq, Hash, Debug)] -struct SimTrace { +pub(crate) struct SimTrace { kind: K, state: S, last_state: S, } #[derive(Copy, Clone, PartialEq, Eq, Hash)] -struct SimTraces(T); +pub(crate) struct SimTraces(T); impl fmt::Debug for SimTraces where @@ -5845,7 +800,7 @@ impl SimTraceDebug for SimTrace { } #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] -enum SimTraceKind { +pub(crate) enum SimTraceKind { BigUInt { index: StatePartIndex, ty: UInt, diff --git a/crates/fayalite/src/sim/compiler.rs b/crates/fayalite/src/sim/compiler.rs new file mode 100644 index 0000000..dd06267 --- /dev/null +++ b/crates/fayalite/src/sim/compiler.rs @@ -0,0 +1,5087 @@ +// SPDX-License-Identifier: LGPL-3.0-or-later +// See Notices.txt for copyright information + +//! Compiler to Interpreter IR for Fayalite Simulation + +use crate::{ + bundle::{BundleField, BundleType}, + enum_::{EnumType, EnumVariant}, + expr::{ + ExprEnum, Flow, ops, + target::{ + GetTarget, Target, TargetBase, TargetPathArrayElement, TargetPathBundleField, + TargetPathElement, + }, + }, + int::BoolOrIntType, + intern::{Intern, Interned, Memoize}, + memory::PortKind, + module::{ + AnnotatedModuleIO, Block, ExternModuleBody, Id, InstantiatedModule, ModuleBody, NameId, + NormalModuleBody, ScopedNameId, Stmt, StmtConnect, StmtDeclaration, StmtFormal, StmtIf, + StmtInstance, StmtMatch, StmtReg, StmtWire, TargetInInstantiatedModule, + transform::deduce_resets::deduce_resets, + }, + prelude::*, + reset::{ResetType, ResetTypeDispatch}, + sim::{ + ExternModuleSimulation, SimTrace, SimTraceKind, SimTraces, TraceArray, TraceAsyncReset, + TraceBool, TraceBundle, TraceClock, TraceDecl, TraceEnumDiscriminant, TraceEnumWithFields, + TraceFieldlessEnum, TraceInstance, TraceLocation, TraceMem, TraceMemPort, TraceMemoryId, + TraceMemoryLocation, TraceModule, TraceModuleIO, TraceReg, TraceSInt, TraceScalarId, + TraceScope, TraceSyncReset, TraceUInt, TraceWire, + interpreter::{ + Insn, InsnField, InsnFieldKind, InsnFieldType, InsnOrLabel, Insns, InsnsBuilding, + InsnsBuildingDone, InsnsBuildingKind, Label, MemoryData, SlotDebugData, SmallUInt, + StatePartArrayIndex, StatePartArrayIndexed, StatePartIndex, StatePartIndexRange, + StatePartKind, StatePartKindBigSlots, StatePartKindMemories, StatePartKindSmallSlots, + StatePartLayout, StatePartLen, StatePartsValue, TypeArrayIndex, TypeArrayIndexes, + TypeIndex, TypeIndexRange, TypeLayout, TypeLen, TypeParts, + }, + }, + ty::StaticType, + util::HashMap, +}; +use bitvec::vec::BitVec; +use num_bigint::BigInt; +use petgraph::{ + data::FromElements, + visit::{ + EdgeRef, GraphBase, IntoEdgeReferences, IntoNeighbors, IntoNeighborsDirected, + IntoNodeIdentifiers, IntoNodeReferences, NodeRef, VisitMap, Visitable, + }, +}; +use std::{collections::BTreeSet, fmt, hash::Hash, marker::PhantomData, mem, ops::IndexMut}; + +#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)] +enum CondBody { + IfTrue { + cond: CompiledValue, + }, + IfFalse { + cond: CompiledValue, + }, + MatchArm { + discriminant: StatePartIndex, + variant_index: usize, + }, +} + +#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)] +struct Cond { + body: CondBody, + source_location: SourceLocation, +} + +#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)] +pub(crate) struct CompiledBundleField { + pub(crate) offset: TypeIndex, + pub(crate) ty: CompiledTypeLayout, +} + +#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)] +pub(crate) enum CompiledTypeLayoutBody { + Scalar, + Array { + /// debug names are ignored, use parent's layout instead + element: Interned>, + }, + Bundle { + /// debug names are ignored, use parent's layout instead + fields: Interned<[CompiledBundleField]>, + }, +} + +#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)] +pub(crate) struct CompiledTypeLayout { + pub(crate) ty: T, + pub(crate) layout: TypeLayout, + pub(crate) body: CompiledTypeLayoutBody, +} + +impl CompiledTypeLayout { + fn with_prefixed_debug_names(self, prefix: &str) -> Self { + let Self { ty, layout, body } = self; + Self { + ty, + layout: layout.with_prefixed_debug_names(prefix), + body, + } + } + fn with_anonymized_debug_info(self) -> Self { + let Self { ty, layout, body } = self; + Self { + ty, + layout: layout.with_anonymized_debug_info(), + body, + } + } + fn get(ty: T) -> Self { + #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] + struct MyMemoize; + impl Memoize for MyMemoize { + type Input = CanonicalType; + type InputOwned = CanonicalType; + type Output = CompiledTypeLayout; + + fn inner(self, input: &Self::Input) -> Self::Output { + match input { + CanonicalType::UInt(_) + | CanonicalType::SInt(_) + | CanonicalType::Bool(_) + | CanonicalType::Enum(_) + | CanonicalType::AsyncReset(_) + | CanonicalType::SyncReset(_) + | CanonicalType::Reset(_) + | CanonicalType::Clock(_) => { + let mut layout = TypeLayout::empty(); + let debug_data = SlotDebugData { + name: Interned::default(), + ty: *input, + }; + layout.big_slots = StatePartLayout::scalar(debug_data, ()); + CompiledTypeLayout { + ty: *input, + layout: layout.into(), + body: CompiledTypeLayoutBody::Scalar, + } + } + CanonicalType::Array(array) => { + let mut layout = TypeLayout::empty(); + let element = CompiledTypeLayout::get(array.element()).intern_sized(); + for index in 0..array.len() { + layout.allocate( + &element + .layout + .with_prefixed_debug_names(&format!("[{index}]")), + ); + } + CompiledTypeLayout { + ty: *input, + layout: layout.into(), + body: CompiledTypeLayoutBody::Array { element }, + } + } + CanonicalType::PhantomConst(_) => { + let unit_layout = CompiledTypeLayout::get(()); + CompiledTypeLayout { + ty: *input, + layout: unit_layout.layout, + body: unit_layout.body, + } + } + CanonicalType::Bundle(bundle) => { + let mut layout = TypeLayout::empty(); + let fields = bundle + .fields() + .iter() + .map( + |BundleField { + name, + flipped: _, + ty, + }| { + let ty = CompiledTypeLayout::get(*ty); + let offset = layout + .allocate( + &ty.layout + .with_prefixed_debug_names(&format!(".{name}")), + ) + .start(); + CompiledBundleField { offset, ty } + }, + ) + .collect(); + CompiledTypeLayout { + ty: *input, + layout: layout.into(), + body: CompiledTypeLayoutBody::Bundle { fields }, + } + } + } + } + } + let CompiledTypeLayout { + ty: _, + layout, + body, + } = MyMemoize.get_owned(ty.canonical()); + Self { ty, layout, body } + } +} + +#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)] +pub(crate) struct CompiledValue { + pub(crate) layout: CompiledTypeLayout, + pub(crate) range: TypeIndexRange, + pub(crate) write: Option<(CompiledTypeLayout, TypeIndexRange)>, +} + +impl CompiledValue { + fn write(self) -> (CompiledTypeLayout, TypeIndexRange) { + self.write.unwrap_or((self.layout, self.range)) + } + fn write_value(self) -> Self { + let (layout, range) = self.write(); + Self { + layout, + range, + write: None, + } + } + fn map( + self, + mut f: impl FnMut( + CompiledTypeLayout, + TypeIndexRange, + ) -> (CompiledTypeLayout, TypeIndexRange), + ) -> CompiledValue { + let (layout, range) = f(self.layout, self.range); + CompiledValue { + layout, + range, + write: self.write.map(|(layout, range)| f(layout, range)), + } + } + pub(crate) fn map_ty(self, mut f: impl FnMut(T) -> U) -> CompiledValue { + self.map(|CompiledTypeLayout { ty, layout, body }, range| { + ( + CompiledTypeLayout { + ty: f(ty), + layout, + body, + }, + range, + ) + }) + } +} + +impl CompiledValue { + fn field_by_index(self, field_index: usize) -> CompiledValue { + self.map(|layout, range| { + let CompiledTypeLayout { + ty: _, + layout: _, + body: CompiledTypeLayoutBody::Bundle { fields }, + } = layout + else { + unreachable!(); + }; + ( + fields[field_index].ty, + range.slice(TypeIndexRange::new( + fields[field_index].offset, + fields[field_index].ty.layout.len(), + )), + ) + }) + } + pub(crate) fn field_by_name(self, name: Interned) -> CompiledValue { + self.field_by_index(self.layout.ty.name_indexes()[&name]) + } +} + +impl CompiledValue { + pub(crate) fn element(self, index: usize) -> CompiledValue { + self.map(|layout, range| { + let CompiledTypeLayoutBody::Array { element } = layout.body else { + unreachable!(); + }; + (*element, range.index_array(element.layout.len(), index)) + }) + } + fn element_dyn( + self, + index_slot: StatePartIndex, + ) -> CompiledExpr { + CompiledExpr::from(self).element_dyn(index_slot) + } +} + +#[derive(Debug, PartialEq, Eq, Hash, Copy, Clone)] +struct CompiledExpr { + static_part: CompiledValue, + indexes: TypeArrayIndexes, +} + +impl From> for CompiledExpr { + fn from(static_part: CompiledValue) -> Self { + Self { + static_part, + indexes: TypeArrayIndexes::default(), + } + } +} + +impl CompiledExpr { + fn map_ty(self, f: impl FnMut(T) -> U) -> CompiledExpr { + let Self { + static_part, + indexes, + } = self; + CompiledExpr { + static_part: static_part.map_ty(f), + indexes, + } + } + fn add_target_without_indexes_to_set(self, inputs: &mut SlotSet) { + let Self { + static_part, + indexes, + } = self; + indexes.as_ref().for_each_offset(|offset| { + inputs.extend([static_part.range.offset(offset)]); + }); + } + fn add_target_and_indexes_to_set(self, inputs: &mut SlotSet) { + let Self { + static_part: _, + indexes, + } = self; + self.add_target_without_indexes_to_set(inputs); + inputs.extend(indexes.as_ref().iter()); + } +} + +impl CompiledExpr { + fn field_by_index(self, field_index: usize) -> CompiledExpr { + CompiledExpr { + static_part: self.static_part.field_by_index(field_index), + indexes: self.indexes, + } + } + fn field_by_name(self, name: Interned) -> CompiledExpr { + CompiledExpr { + static_part: self.static_part.field_by_name(name), + indexes: self.indexes, + } + } +} + +impl CompiledExpr { + fn element(self, index: usize) -> CompiledExpr { + CompiledExpr { + static_part: self.static_part.element(index), + indexes: self.indexes, + } + } + fn element_dyn( + self, + index_slot: StatePartIndex, + ) -> CompiledExpr { + let CompiledTypeLayoutBody::Array { element } = self.static_part.layout.body else { + unreachable!(); + }; + let stride = element.layout.len(); + let indexes = self.indexes.join(TypeArrayIndex::from_parts( + index_slot, + self.static_part.layout.ty.len(), + stride, + )); + CompiledExpr { + static_part: self.static_part.map(|layout, range| { + let CompiledTypeLayoutBody::Array { element } = layout.body else { + unreachable!(); + }; + (*element, range.index_array(stride, 0)) + }), + indexes, + } + } +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] +enum AssignmentOrSlotIndex { + AssignmentIndex(usize), + SmallSlot(StatePartIndex), + BigSlot(StatePartIndex), +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] +enum AssignmentIO { + BigInput { + assignment_index: usize, + slot: StatePartIndex, + }, + SmallInput { + assignment_index: usize, + slot: StatePartIndex, + }, + BigOutput { + assignment_index: usize, + slot: StatePartIndex, + }, + SmallOutput { + assignment_index: usize, + slot: StatePartIndex, + }, +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] +enum AssignmentsEdge { + IO(AssignmentIO), + AssignmentImmediatePredecessor { + predecessor_assignment_index: usize, + assignment_index: usize, + }, +} + +#[derive(Debug)] +enum Assignments { + Accumulating { + assignments: Vec, + }, + Finalized { + assignments: Box<[Assignment]>, + slots_layout: TypeLayout, + slot_readers: SlotToAssignmentIndexFullMap, + slot_writers: SlotToAssignmentIndexFullMap, + assignment_immediate_predecessors: Box<[Box<[usize]>]>, + assignment_immediate_successors: Box<[Box<[usize]>]>, + }, +} + +impl Default for Assignments { + fn default() -> Self { + Self::Accumulating { + assignments: Vec::new(), + } + } +} + +impl Assignments { + fn finalize(&mut self, slots_layout: TypeLayout) { + let Self::Accumulating { assignments } = self else { + unreachable!("already finalized"); + }; + let assignments = mem::take(assignments).into_boxed_slice(); + let mut slot_readers = SlotToAssignmentIndexFullMap::new(slots_layout.len()); + let mut slot_writers = SlotToAssignmentIndexFullMap::new(slots_layout.len()); + let mut assignment_immediate_predecessors = vec![BTreeSet::new(); assignments.len()]; + let mut assignment_immediate_successors = vec![BTreeSet::new(); assignments.len()]; + for (assignment_index, assignment) in assignments.iter().enumerate() { + slot_readers + .keys_for_assignment(assignment_index) + .extend([&assignment.inputs]); + slot_readers + .keys_for_assignment(assignment_index) + .extend(&assignment.conditions); + let SlotSet(TypeParts { + small_slots, + big_slots, + }) = &assignment.outputs; + for &slot in small_slots { + if let Some(&pred) = slot_writers[slot].last() { + assignment_immediate_predecessors[assignment_index].insert(pred); + assignment_immediate_successors[pred].insert(assignment_index); + } + slot_writers[slot].push(assignment_index); + } + for &slot in big_slots { + if let Some(&pred) = slot_writers[slot].last() { + assignment_immediate_predecessors[assignment_index].insert(pred); + assignment_immediate_successors[pred].insert(assignment_index); + } + slot_writers[slot].push(assignment_index); + } + } + *self = Self::Finalized { + assignments, + slots_layout, + slot_readers, + slot_writers, + assignment_immediate_predecessors: assignment_immediate_predecessors + .into_iter() + .map(Box::from_iter) + .collect(), + assignment_immediate_successors: assignment_immediate_successors + .into_iter() + .map(Box::from_iter) + .collect(), + }; + } + fn push(&mut self, v: Assignment) { + let Self::Accumulating { assignments } = self else { + unreachable!("already finalized"); + }; + assignments.push(v); + } + fn assignments(&self) -> &[Assignment] { + let Self::Finalized { assignments, .. } = self else { + unreachable!("Assignments::finalize should have been called"); + }; + assignments + } + fn slots_layout(&self) -> TypeLayout { + let Self::Finalized { slots_layout, .. } = self else { + unreachable!("Assignments::finalize should have been called"); + }; + *slots_layout + } + fn slot_readers(&self) -> &SlotToAssignmentIndexFullMap { + let Self::Finalized { slot_readers, .. } = self else { + unreachable!("Assignments::finalize should have been called"); + }; + slot_readers + } + fn slot_writers(&self) -> &SlotToAssignmentIndexFullMap { + let Self::Finalized { slot_writers, .. } = self else { + unreachable!("Assignments::finalize should have been called"); + }; + slot_writers + } + fn assignment_immediate_predecessors(&self) -> &[Box<[usize]>] { + let Self::Finalized { + assignment_immediate_predecessors, + .. + } = self + else { + unreachable!("Assignments::finalize should have been called"); + }; + assignment_immediate_predecessors + } + fn assignment_immediate_successors(&self) -> &[Box<[usize]>] { + let Self::Finalized { + assignment_immediate_successors, + .. + } = self + else { + unreachable!("Assignments::finalize should have been called"); + }; + assignment_immediate_successors + } + fn elements(&self) -> AssignmentsElements<'_> { + let SlotToAssignmentIndexFullMap(TypeParts { + small_slots, + big_slots, + }) = self.slot_readers(); + AssignmentsElements { + node_indexes: HashMap::with_capacity_and_hasher( + self.assignments().len() + small_slots.len() + big_slots.len(), + Default::default(), + ), + nodes: self.node_references(), + edges: self.edge_references(), + } + } +} + +impl GraphBase for Assignments { + type EdgeId = AssignmentsEdge; + type NodeId = AssignmentOrSlotIndex; +} + +#[derive(Debug, Clone, Copy)] +enum AssignmentsNodeRef<'a> { + Assignment { + index: usize, + #[allow(dead_code, reason = "used in Debug impl")] + assignment: &'a Assignment, + }, + SmallSlot( + StatePartIndex, + #[allow(dead_code, reason = "used in Debug impl")] SlotDebugData, + ), + BigSlot( + StatePartIndex, + #[allow(dead_code, reason = "used in Debug impl")] SlotDebugData, + ), +} + +impl<'a> NodeRef for AssignmentsNodeRef<'a> { + type NodeId = AssignmentOrSlotIndex; + type Weight = AssignmentsNodeRef<'a>; + + fn id(&self) -> Self::NodeId { + match *self { + AssignmentsNodeRef::Assignment { + index, + assignment: _, + } => AssignmentOrSlotIndex::AssignmentIndex(index), + AssignmentsNodeRef::SmallSlot(slot, _) => AssignmentOrSlotIndex::SmallSlot(slot), + AssignmentsNodeRef::BigSlot(slot, _) => AssignmentOrSlotIndex::BigSlot(slot), + } + } + + fn weight(&self) -> &Self::Weight { + self + } +} + +impl<'a> petgraph::visit::Data for &'a Assignments { + type NodeWeight = AssignmentsNodeRef<'a>; + type EdgeWeight = AssignmentsEdge; +} + +struct AssignmentsElements<'a> { + node_indexes: HashMap, + nodes: AssignmentsNodes<'a>, + edges: AssignmentsEdges<'a>, +} + +impl<'a> Iterator for AssignmentsElements<'a> { + type Item = petgraph::data::Element< + <&'a Assignments as petgraph::visit::Data>::NodeWeight, + <&'a Assignments as petgraph::visit::Data>::EdgeWeight, + >; + + fn next(&mut self) -> Option { + let Self { + node_indexes, + nodes, + edges, + } = self; + if let Some(node) = nodes.next() { + node_indexes.insert(node.id(), node_indexes.len()); + return Some(petgraph::data::Element::Node { weight: node }); + } + let edge = edges.next()?; + Some(petgraph::data::Element::Edge { + source: node_indexes[&edge.source()], + target: node_indexes[&edge.target()], + weight: *edge.weight(), + }) + } +} + +#[derive(Clone)] +struct AssignmentsNodeIdentifiers { + assignment_indexes: std::ops::Range, + small_slots: std::ops::Range, + big_slots: std::ops::Range, +} + +impl AssignmentsNodeIdentifiers { + fn internal_iter<'a>(&'a mut self) -> impl Iterator + 'a { + let Self { + assignment_indexes, + small_slots, + big_slots, + } = self; + assignment_indexes + .map(AssignmentOrSlotIndex::AssignmentIndex) + .chain(small_slots.map(|value| { + AssignmentOrSlotIndex::SmallSlot(StatePartIndex { + value, + _phantom: PhantomData, + }) + })) + .chain(big_slots.map(|value| { + AssignmentOrSlotIndex::BigSlot(StatePartIndex { + value, + _phantom: PhantomData, + }) + })) + } +} + +impl Iterator for AssignmentsNodeIdentifiers { + type Item = AssignmentOrSlotIndex; + fn next(&mut self) -> Option { + self.internal_iter().next() + } + + fn nth(&mut self, n: usize) -> Option { + self.internal_iter().nth(n) + } +} + +impl<'a> IntoNodeIdentifiers for &'a Assignments { + type NodeIdentifiers = AssignmentsNodeIdentifiers; + + fn node_identifiers(self) -> Self::NodeIdentifiers { + let TypeLen { + small_slots, + big_slots, + } = self.slot_readers().len(); + AssignmentsNodeIdentifiers { + assignment_indexes: 0..self.assignments().len(), + small_slots: 0..small_slots.value, + big_slots: 0..big_slots.value, + } + } +} + +struct AssignmentsNodes<'a> { + assignments: &'a Assignments, + nodes: AssignmentsNodeIdentifiers, +} + +impl<'a> Iterator for AssignmentsNodes<'a> { + type Item = AssignmentsNodeRef<'a>; + + fn next(&mut self) -> Option { + self.nodes.next().map(|node| match node { + AssignmentOrSlotIndex::AssignmentIndex(index) => AssignmentsNodeRef::Assignment { + index, + assignment: &self.assignments.assignments()[index], + }, + AssignmentOrSlotIndex::SmallSlot(slot) => AssignmentsNodeRef::SmallSlot( + slot, + *self.assignments.slots_layout().small_slots.debug_data(slot), + ), + AssignmentOrSlotIndex::BigSlot(slot) => AssignmentsNodeRef::BigSlot( + slot, + *self.assignments.slots_layout().big_slots.debug_data(slot), + ), + }) + } +} + +impl<'a> IntoNodeReferences for &'a Assignments { + type NodeRef = AssignmentsNodeRef<'a>; + type NodeReferences = AssignmentsNodes<'a>; + + fn node_references(self) -> Self::NodeReferences { + AssignmentsNodes { + assignments: self, + nodes: self.node_identifiers(), + } + } +} + +struct AssignmentsNeighborsDirected<'a> { + assignment_indexes: std::slice::Iter<'a, usize>, + small_slots: std::collections::btree_set::Iter<'a, StatePartIndex>, + big_slots: std::collections::btree_set::Iter<'a, StatePartIndex>, +} + +impl Iterator for AssignmentsNeighborsDirected<'_> { + type Item = AssignmentOrSlotIndex; + fn next(&mut self) -> Option { + let Self { + assignment_indexes, + small_slots, + big_slots, + } = self; + if let retval @ Some(_) = assignment_indexes + .next() + .copied() + .map(AssignmentOrSlotIndex::AssignmentIndex) + { + retval + } else if let retval @ Some(_) = small_slots + .next() + .copied() + .map(AssignmentOrSlotIndex::SmallSlot) + { + retval + } else if let retval @ Some(_) = big_slots + .next() + .copied() + .map(AssignmentOrSlotIndex::BigSlot) + { + retval + } else { + None + } + } +} + +impl<'a> IntoNeighbors for &'a Assignments { + type Neighbors = AssignmentsNeighborsDirected<'a>; + + fn neighbors(self, n: Self::NodeId) -> Self::Neighbors { + self.neighbors_directed(n, petgraph::Direction::Outgoing) + } +} + +impl<'a> IntoNeighborsDirected for &'a Assignments { + type NeighborsDirected = AssignmentsNeighborsDirected<'a>; + + fn neighbors_directed( + self, + n: Self::NodeId, + d: petgraph::Direction, + ) -> Self::NeighborsDirected { + use petgraph::Direction::*; + let slot_map = match d { + Outgoing => self.slot_readers(), + Incoming => self.slot_writers(), + }; + match n { + AssignmentOrSlotIndex::AssignmentIndex(assignment_index) => { + let assignment = &self.assignments()[assignment_index]; + let ( + assignment_indexes, + SlotSet(TypeParts { + small_slots, + big_slots, + }), + ) = match d { + Outgoing => ( + &self.assignment_immediate_successors()[assignment_index], + &assignment.outputs, + ), + Incoming => ( + &self.assignment_immediate_predecessors()[assignment_index], + &assignment.inputs, + ), + }; + AssignmentsNeighborsDirected { + assignment_indexes: assignment_indexes.iter(), + small_slots: small_slots.iter(), + big_slots: big_slots.iter(), + } + } + AssignmentOrSlotIndex::SmallSlot(slot) => AssignmentsNeighborsDirected { + assignment_indexes: slot_map[slot].iter(), + small_slots: Default::default(), + big_slots: Default::default(), + }, + AssignmentOrSlotIndex::BigSlot(slot) => AssignmentsNeighborsDirected { + assignment_indexes: slot_map[slot].iter(), + small_slots: Default::default(), + big_slots: Default::default(), + }, + } + } +} + +impl EdgeRef for AssignmentsEdge { + type NodeId = AssignmentOrSlotIndex; + type EdgeId = AssignmentsEdge; + type Weight = AssignmentsEdge; + + fn source(&self) -> Self::NodeId { + match *self { + AssignmentsEdge::IO(AssignmentIO::BigInput { + assignment_index: _, + slot, + }) => AssignmentOrSlotIndex::BigSlot(slot), + AssignmentsEdge::IO(AssignmentIO::SmallInput { + assignment_index: _, + slot, + }) => AssignmentOrSlotIndex::SmallSlot(slot), + AssignmentsEdge::IO(AssignmentIO::BigOutput { + assignment_index, + slot: _, + }) => AssignmentOrSlotIndex::AssignmentIndex(assignment_index), + AssignmentsEdge::IO(AssignmentIO::SmallOutput { + assignment_index, + slot: _, + }) => AssignmentOrSlotIndex::AssignmentIndex(assignment_index), + AssignmentsEdge::AssignmentImmediatePredecessor { + predecessor_assignment_index, + assignment_index: _, + } => AssignmentOrSlotIndex::AssignmentIndex(predecessor_assignment_index), + } + } + + fn target(&self) -> Self::NodeId { + match *self { + AssignmentsEdge::IO(AssignmentIO::BigInput { + assignment_index, + slot: _, + }) => AssignmentOrSlotIndex::AssignmentIndex(assignment_index), + AssignmentsEdge::IO(AssignmentIO::SmallInput { + assignment_index, + slot: _, + }) => AssignmentOrSlotIndex::AssignmentIndex(assignment_index), + AssignmentsEdge::IO(AssignmentIO::BigOutput { + assignment_index: _, + slot, + }) => AssignmentOrSlotIndex::BigSlot(slot), + AssignmentsEdge::IO(AssignmentIO::SmallOutput { + assignment_index: _, + slot, + }) => AssignmentOrSlotIndex::SmallSlot(slot), + AssignmentsEdge::AssignmentImmediatePredecessor { + predecessor_assignment_index: _, + assignment_index, + } => AssignmentOrSlotIndex::AssignmentIndex(assignment_index), + } + } + + fn weight(&self) -> &Self::Weight { + self + } + + fn id(&self) -> Self::EdgeId { + *self + } +} + +struct AssignmentsEdges<'a> { + assignments: &'a Assignments, + nodes: AssignmentsNodeIdentifiers, + outgoing_neighbors: Option<(AssignmentOrSlotIndex, AssignmentsNeighborsDirected<'a>)>, +} + +impl Iterator for AssignmentsEdges<'_> { + type Item = AssignmentsEdge; + + fn next(&mut self) -> Option { + loop { + if let Some((node, outgoing_neighbors)) = &mut self.outgoing_neighbors { + if let Some(outgoing_neighbor) = outgoing_neighbors.next() { + return Some(match (*node, outgoing_neighbor) { + ( + AssignmentOrSlotIndex::SmallSlot(_) | AssignmentOrSlotIndex::BigSlot(_), + AssignmentOrSlotIndex::SmallSlot(_) | AssignmentOrSlotIndex::BigSlot(_), + ) => unreachable!(), + ( + AssignmentOrSlotIndex::AssignmentIndex(predecessor_assignment_index), + AssignmentOrSlotIndex::AssignmentIndex(assignment_index), + ) => AssignmentsEdge::AssignmentImmediatePredecessor { + predecessor_assignment_index, + assignment_index, + }, + ( + AssignmentOrSlotIndex::AssignmentIndex(assignment_index), + AssignmentOrSlotIndex::SmallSlot(slot), + ) => AssignmentsEdge::IO(AssignmentIO::SmallOutput { + assignment_index, + slot, + }), + ( + AssignmentOrSlotIndex::AssignmentIndex(assignment_index), + AssignmentOrSlotIndex::BigSlot(slot), + ) => AssignmentsEdge::IO(AssignmentIO::BigOutput { + assignment_index, + slot, + }), + ( + AssignmentOrSlotIndex::SmallSlot(slot), + AssignmentOrSlotIndex::AssignmentIndex(assignment_index), + ) => AssignmentsEdge::IO(AssignmentIO::SmallInput { + assignment_index, + slot, + }), + ( + AssignmentOrSlotIndex::BigSlot(slot), + AssignmentOrSlotIndex::AssignmentIndex(assignment_index), + ) => AssignmentsEdge::IO(AssignmentIO::BigInput { + assignment_index, + slot, + }), + }); + } + } + let node = self.nodes.next()?; + self.outgoing_neighbors = Some(( + node, + self.assignments + .neighbors_directed(node, petgraph::Direction::Outgoing), + )); + } + } +} + +impl<'a> IntoEdgeReferences for &'a Assignments { + type EdgeRef = AssignmentsEdge; + type EdgeReferences = AssignmentsEdges<'a>; + + fn edge_references(self) -> Self::EdgeReferences { + AssignmentsEdges { + assignments: self, + nodes: self.node_identifiers(), + outgoing_neighbors: None, + } + } +} + +struct AssignmentsVisitMap { + assignments: Vec, + slots: DenseSlotSet, +} + +impl VisitMap for AssignmentsVisitMap { + fn visit(&mut self, n: AssignmentOrSlotIndex) -> bool { + match n { + AssignmentOrSlotIndex::AssignmentIndex(assignment_index) => { + !mem::replace(&mut self.assignments[assignment_index], true) + } + AssignmentOrSlotIndex::SmallSlot(slot) => self.slots.insert(slot), + AssignmentOrSlotIndex::BigSlot(slot) => self.slots.insert(slot), + } + } + + fn is_visited(&self, n: &AssignmentOrSlotIndex) -> bool { + match *n { + AssignmentOrSlotIndex::AssignmentIndex(assignment_index) => { + self.assignments[assignment_index] + } + AssignmentOrSlotIndex::SmallSlot(slot) => self.slots.contains(slot), + AssignmentOrSlotIndex::BigSlot(slot) => self.slots.contains(slot), + } + } + + fn unvisit(&mut self, n: AssignmentOrSlotIndex) -> bool { + match n { + AssignmentOrSlotIndex::AssignmentIndex(assignment_index) => { + mem::replace(&mut self.assignments[assignment_index], false) + } + AssignmentOrSlotIndex::SmallSlot(slot) => self.slots.remove(slot), + AssignmentOrSlotIndex::BigSlot(slot) => self.slots.remove(slot), + } + } +} + +impl Visitable for Assignments { + type Map = AssignmentsVisitMap; + + fn visit_map(self: &Self) -> Self::Map { + AssignmentsVisitMap { + assignments: vec![false; self.assignments().len()], + slots: DenseSlotSet::new(self.slot_readers().len()), + } + } + + fn reset_map(self: &Self, map: &mut Self::Map) { + let AssignmentsVisitMap { assignments, slots } = map; + assignments.clear(); + assignments.resize(self.assignments().len(), false); + if slots.len() != self.slot_readers().len() { + *slots = DenseSlotSet::new(self.slot_readers().len()); + } else { + slots.clear(); + } + } +} + +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +struct DenseSlotSet(TypeParts); + +impl DenseSlotSet { + fn new(len: TypeLen) -> Self { + let TypeLen { + small_slots, + big_slots, + } = len; + Self(TypeParts { + small_slots: vec![false; small_slots.value.try_into().expect("length too big")] + .into_boxed_slice(), + big_slots: vec![false; big_slots.value.try_into().expect("length too big")] + .into_boxed_slice(), + }) + } + fn len(&self) -> TypeLen { + TypeLen { + small_slots: StatePartLen { + value: self.0.small_slots.len() as _, + _phantom: PhantomData, + }, + big_slots: StatePartLen { + value: self.0.big_slots.len() as _, + _phantom: PhantomData, + }, + } + } + fn clear(&mut self) { + let Self(TypeParts { + small_slots, + big_slots, + }) = self; + small_slots.fill(false); + big_slots.fill(false); + } +} + +impl StatePartsValue for DenseSlotSet { + type Value = Box<[bool]>; +} + +trait DenseSlotSetMethods: Extend> { + fn contains(&self, k: StatePartIndex) -> bool; + fn remove(&mut self, k: StatePartIndex) -> bool { + self.take(k).is_some() + } + fn take(&mut self, k: StatePartIndex) -> Option>; + fn replace(&mut self, k: StatePartIndex) -> Option>; + fn insert(&mut self, k: StatePartIndex) -> bool { + self.replace(k).is_none() + } +} + +impl Extend> for DenseSlotSet +where + Self: DenseSlotSetMethods, +{ + fn extend>>(&mut self, iter: T) { + iter.into_iter().for_each(|v| { + self.insert(v); + }); + } +} + +impl DenseSlotSetMethods for DenseSlotSet { + fn contains(&self, k: StatePartIndex) -> bool { + self.0.small_slots[k.as_usize()] + } + + fn take( + &mut self, + k: StatePartIndex, + ) -> Option> { + mem::replace(self.0.small_slots.get_mut(k.as_usize())?, false).then_some(k) + } + + fn replace( + &mut self, + k: StatePartIndex, + ) -> Option> { + mem::replace(&mut self.0.small_slots[k.as_usize()], true).then_some(k) + } +} + +impl DenseSlotSetMethods for DenseSlotSet { + fn contains(&self, k: StatePartIndex) -> bool { + self.0.big_slots[k.as_usize()] + } + + fn take( + &mut self, + k: StatePartIndex, + ) -> Option> { + mem::replace(self.0.big_slots.get_mut(k.as_usize())?, false).then_some(k) + } + + fn replace( + &mut self, + k: StatePartIndex, + ) -> Option> { + mem::replace(&mut self.0.big_slots[k.as_usize()], true).then_some(k) + } +} + +#[derive(Clone, Debug, Default, PartialEq, Eq, Hash)] +struct SlotVec(TypeParts); + +impl SlotVec { + fn is_empty(&self) -> bool { + let Self(TypeParts { + small_slots, + big_slots, + }) = self; + small_slots.is_empty() && big_slots.is_empty() + } +} + +impl StatePartsValue for SlotVec { + type Value = Vec>; +} + +#[derive(Clone, Debug, Default, PartialEq, Eq, Hash)] +struct SlotSet(TypeParts); + +impl SlotSet { + fn is_empty(&self) -> bool { + let Self(TypeParts { + small_slots, + big_slots, + }) = self; + small_slots.is_empty() && big_slots.is_empty() + } + fn for_each( + &self, + small_slots_fn: impl FnMut(StatePartIndex), + big_slots_fn: impl FnMut(StatePartIndex), + ) { + let Self(TypeParts { + small_slots, + big_slots, + }) = self; + small_slots.iter().copied().for_each(small_slots_fn); + big_slots.iter().copied().for_each(big_slots_fn); + } + fn all( + &self, + small_slots_fn: impl FnMut(StatePartIndex) -> bool, + big_slots_fn: impl FnMut(StatePartIndex) -> bool, + ) -> bool { + let Self(TypeParts { + small_slots, + big_slots, + }) = self; + small_slots.iter().copied().all(small_slots_fn) + && big_slots.iter().copied().all(big_slots_fn) + } +} + +impl StatePartsValue for SlotSet { + type Value = BTreeSet>; +} + +impl Extend> for SlotSet { + fn extend>>(&mut self, iter: T) { + self.0.small_slots.extend(iter); + } +} + +impl Extend> for SlotSet { + fn extend>>(&mut self, iter: T) { + self.0.big_slots.extend(iter); + } +} + +impl Extend> for SlotSet +where + Self: Extend>, +{ + fn extend>>(&mut self, iter: T) { + self.extend(iter.into_iter().flat_map(|v| v.iter())); + } +} + +impl Extend for SlotSet { + fn extend>(&mut self, iter: T) { + iter.into_iter().for_each( + |TypeIndexRange { + small_slots, + big_slots, + }| { + self.extend(small_slots.iter()); + self.extend(big_slots.iter()); + }, + ) + } +} + +impl Extend for SlotSet { + fn extend>(&mut self, iter: T) { + iter.into_iter().for_each( + |TypeArrayIndex { + small_slots, + big_slots, + }| { + self.extend([small_slots]); + self.extend([big_slots]); + }, + ) + } +} + +impl Extend> for SlotSet { + fn extend>>(&mut self, iter: T) { + self.extend(iter.into_iter().map(|v| v.index)); + } +} + +impl Extend for SlotSet { + fn extend>(&mut self, iter: T) { + iter.into_iter().for_each(|cond_body| match cond_body { + CondBody::IfTrue { cond } | CondBody::IfFalse { cond } => { + self.extend([cond.range]); + } + CondBody::MatchArm { + discriminant, + variant_index: _, + } => self.extend([discriminant]), + }) + } +} + +impl Extend for SlotSet { + fn extend>(&mut self, iter: T) { + self.extend(iter.into_iter().map(|v| v.body)) + } +} + +#[derive(Debug)] +struct Assignment { + inputs: SlotSet, + outputs: SlotSet, + conditions: Interned<[Cond]>, + insns: Vec, + source_location: SourceLocation, +} + +#[derive(Debug)] +struct SlotToAssignmentIndexFullMap(TypeParts); + +impl StatePartsValue for SlotToAssignmentIndexFullMap { + type Value = Box<[Vec]>; +} + +impl SlotToAssignmentIndexFullMap { + fn new(len: TypeLen) -> Self { + let TypeLen { + small_slots, + big_slots, + } = len; + Self(TypeParts { + small_slots: vec![Vec::new(); small_slots.value.try_into().expect("length too big")] + .into_boxed_slice(), + big_slots: vec![Vec::new(); big_slots.value.try_into().expect("length too big")] + .into_boxed_slice(), + }) + } + fn len(&self) -> TypeLen { + TypeLen { + small_slots: StatePartLen { + value: self.0.small_slots.len() as _, + _phantom: PhantomData, + }, + big_slots: StatePartLen { + value: self.0.big_slots.len() as _, + _phantom: PhantomData, + }, + } + } + fn keys_for_assignment( + &mut self, + assignment_index: usize, + ) -> SlotToAssignmentIndexFullMapKeysForAssignment<'_> { + SlotToAssignmentIndexFullMapKeysForAssignment { + map: self, + assignment_index, + } + } + fn for_each( + &self, + mut small_slots_fn: impl FnMut(StatePartIndex, &[usize]), + mut big_slots_fn: impl FnMut(StatePartIndex, &[usize]), + ) { + let Self(TypeParts { + small_slots, + big_slots, + }) = self; + small_slots.iter().enumerate().for_each(|(k, v)| { + small_slots_fn( + StatePartIndex { + value: k as _, + _phantom: PhantomData, + }, + v, + ) + }); + big_slots.iter().enumerate().for_each(|(k, v)| { + big_slots_fn( + StatePartIndex { + value: k as _, + _phantom: PhantomData, + }, + v, + ) + }); + } +} + +impl std::ops::Index> for SlotToAssignmentIndexFullMap { + type Output = Vec; + + fn index(&self, index: StatePartIndex) -> &Self::Output { + &self.0.small_slots[index.as_usize()] + } +} + +impl std::ops::IndexMut> for SlotToAssignmentIndexFullMap { + fn index_mut(&mut self, index: StatePartIndex) -> &mut Self::Output { + &mut self.0.small_slots[index.as_usize()] + } +} + +impl std::ops::Index> for SlotToAssignmentIndexFullMap { + type Output = Vec; + + fn index(&self, index: StatePartIndex) -> &Self::Output { + &self.0.big_slots[index.as_usize()] + } +} + +impl std::ops::IndexMut> for SlotToAssignmentIndexFullMap { + fn index_mut(&mut self, index: StatePartIndex) -> &mut Self::Output { + &mut self.0.big_slots[index.as_usize()] + } +} + +struct SlotToAssignmentIndexFullMapKeysForAssignment<'a> { + map: &'a mut SlotToAssignmentIndexFullMap, + assignment_index: usize, +} + +impl<'a, K: StatePartKind> Extend<&'a StatePartIndex> + for SlotToAssignmentIndexFullMapKeysForAssignment<'_> +where + Self: Extend>, +{ + fn extend>>(&mut self, iter: T) { + self.extend(iter.into_iter().copied()); + } +} + +impl Extend> + for SlotToAssignmentIndexFullMapKeysForAssignment<'_> +where + SlotToAssignmentIndexFullMap: IndexMut, Output = Vec>, +{ + fn extend>>(&mut self, iter: T) { + iter.into_iter() + .for_each(|slot| self.map[slot].push(self.assignment_index)); + } +} + +impl<'a> Extend<&'a SlotSet> for SlotToAssignmentIndexFullMapKeysForAssignment<'_> { + fn extend>(&mut self, iter: T) { + iter.into_iter().for_each( + |SlotSet(TypeParts { + small_slots, + big_slots, + })| { + self.extend(small_slots); + self.extend(big_slots); + }, + ); + } +} + +impl<'a> Extend<&'a Cond> for SlotToAssignmentIndexFullMapKeysForAssignment<'_> { + fn extend>(&mut self, iter: T) { + iter.into_iter().for_each(|cond| match cond.body { + CondBody::IfTrue { cond } | CondBody::IfFalse { cond } => { + let CompiledValue { + range: + TypeIndexRange { + small_slots, + big_slots, + }, + layout: _, + write: _, + } = cond; + self.extend(small_slots.iter()); + self.extend(big_slots.iter()); + } + CondBody::MatchArm { + discriminant, + variant_index: _, + } => self.extend([discriminant]), + }); + } +} + +impl Assignment { + fn new( + conditions: Interned<[Cond]>, + insns: Vec, + source_location: SourceLocation, + ) -> Self { + let mut inputs = SlotSet::default(); + let mut outputs = SlotSet::default(); + for insn in &insns { + let insn = match insn { + InsnOrLabel::Insn(insn) => insn, + InsnOrLabel::Label(_) => continue, + }; + for InsnField { ty, kind } in insn.fields() { + match (kind, ty) { + (InsnFieldKind::Input, InsnFieldType::SmallSlot(&slot)) => { + inputs.extend([slot]); + } + (InsnFieldKind::Input, InsnFieldType::BigSlot(&slot)) => { + inputs.extend([slot]); + } + ( + InsnFieldKind::Input, + InsnFieldType::SmallSlotArrayIndexed(&array_indexed), + ) => { + array_indexed.for_each_target(|slot| inputs.extend([slot])); + inputs.extend(array_indexed.indexes); + } + (InsnFieldKind::Input, InsnFieldType::BigSlotArrayIndexed(&array_indexed)) => { + array_indexed.for_each_target(|slot| inputs.extend([slot])); + inputs.extend(array_indexed.indexes); + } + (InsnFieldKind::Output, InsnFieldType::SmallSlot(&slot)) => { + outputs.extend([slot]); + } + (InsnFieldKind::Output, InsnFieldType::BigSlot(&slot)) => { + outputs.extend([slot]); + } + ( + InsnFieldKind::Output, + InsnFieldType::SmallSlotArrayIndexed(&array_indexed), + ) => { + array_indexed.for_each_target(|slot| { + outputs.extend([slot]); + }); + inputs.extend(array_indexed.indexes); + } + (InsnFieldKind::Output, InsnFieldType::BigSlotArrayIndexed(&array_indexed)) => { + array_indexed.for_each_target(|slot| { + outputs.extend([slot]); + }); + inputs.extend(array_indexed.indexes); + } + ( + _, + InsnFieldType::Memory(_) + | InsnFieldType::SmallUInt(_) + | InsnFieldType::SmallSInt(_) + | InsnFieldType::InternedBigInt(_) + | InsnFieldType::U8(_) + | InsnFieldType::USize(_) + | InsnFieldType::Empty(_), + ) + | ( + InsnFieldKind::Immediate + | InsnFieldKind::Memory + | InsnFieldKind::BranchTarget, + _, + ) => {} + } + } + } + Self { + inputs, + outputs, + conditions, + insns, + source_location, + } + } +} + +#[derive(Debug)] +struct RegisterReset { + is_async: bool, + init: CompiledValue, + rst: StatePartIndex, +} + +#[derive(Debug, Clone, Copy)] +struct ClockTrigger { + last_clk_was_low: StatePartIndex, + clk: StatePartIndex, + clk_triggered: StatePartIndex, + source_location: SourceLocation, +} + +#[derive(Debug)] +struct Register { + value: CompiledValue, + clk_triggered: StatePartIndex, + reset: Option, + source_location: SourceLocation, +} + +#[derive(Debug)] + +struct MemoryPort { + clk_triggered: StatePartIndex, + addr_delayed: Vec>, + en_delayed: Vec>, + #[allow(dead_code, reason = "used in Debug impl")] + data_layout: CompiledTypeLayout, + read_data_delayed: Vec, + write_data_delayed: Vec, + write_mask_delayed: Vec, + write_mode_delayed: Vec>, + write_insns: Vec, +} + +struct MemoryPortReadInsns<'a> { + addr: StatePartIndex, + en: StatePartIndex, + write_mode: Option>, + data: TypeIndexRange, + insns: &'a mut Vec, +} + +struct MemoryPortWriteInsns<'a> { + addr: StatePartIndex, + en: StatePartIndex, + write_mode: Option>, + data: TypeIndexRange, + mask: TypeIndexRange, + insns: &'a mut Vec, +} + +#[derive(Debug)] +struct Memory { + mem: Mem, + memory: StatePartIndex, + trace: TraceMem, + ports: Vec, +} + +#[derive(Copy, Clone)] +enum MakeTraceDeclTarget { + Expr(Expr), + Memory { + id: TraceMemoryId, + depth: usize, + stride: usize, + start: usize, + ty: CanonicalType, + }, +} + +impl MakeTraceDeclTarget { + fn flow(self) -> Flow { + match self { + MakeTraceDeclTarget::Expr(expr) => Expr::flow(expr), + MakeTraceDeclTarget::Memory { .. } => Flow::Duplex, + } + } + fn ty(self) -> CanonicalType { + match self { + MakeTraceDeclTarget::Expr(expr) => Expr::ty(expr), + MakeTraceDeclTarget::Memory { ty, .. } => ty, + } + } +} + +struct DebugOpaque(T); + +impl fmt::Debug for DebugOpaque { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("<...>") + } +} + +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] +pub(crate) struct CompiledExternModule { + pub(crate) module_io_targets: Interned<[Target]>, + pub(crate) module_io: Interned<[CompiledValue]>, + pub(crate) simulation: ExternModuleSimulation, +} + +#[derive(Debug)] +pub struct Compiler { + insns: Insns, + original_base_module: Interned>, + base_module: Interned>, + modules: HashMap, + extern_modules: Vec, + compiled_values: HashMap>, + compiled_exprs: HashMap, CompiledExpr>, + compiled_exprs_to_values: HashMap, CompiledValue>, + decl_conditions: HashMap>, + compiled_values_to_dyn_array_indexes: + HashMap, StatePartIndex>, + compiled_value_bool_dest_is_small_map: + HashMap, StatePartIndex>, + assignments: Assignments, + clock_triggers: Vec, + compiled_value_to_clock_trigger_map: HashMap, ClockTrigger>, + enum_discriminants: HashMap, StatePartIndex>, + registers: Vec, + traces: SimTraces>>, + memories: Vec, + dump_assignments_dot: Option>>, +} + +impl Compiler { + pub fn new(base_module: Interned>) -> Self { + let original_base_module = base_module; + let base_module = deduce_resets(base_module, true) + .unwrap_or_else(|e| panic!("failed to deduce reset types: {e}")); + Self { + insns: Insns::new(), + original_base_module, + base_module, + modules: HashMap::default(), + extern_modules: Vec::new(), + compiled_values: HashMap::default(), + compiled_exprs: HashMap::default(), + compiled_exprs_to_values: HashMap::default(), + decl_conditions: HashMap::default(), + compiled_values_to_dyn_array_indexes: HashMap::default(), + compiled_value_bool_dest_is_small_map: HashMap::default(), + assignments: Assignments::default(), + clock_triggers: Vec::new(), + compiled_value_to_clock_trigger_map: HashMap::default(), + enum_discriminants: HashMap::default(), + registers: Vec::new(), + traces: SimTraces(Vec::new()), + memories: Vec::new(), + dump_assignments_dot: None, + } + } + #[doc(hidden)] + /// This is explicitly unstable and may be changed/removed at any time + pub fn dump_assignments_dot(&mut self, callback: Box) { + self.dump_assignments_dot = Some(DebugOpaque(callback)); + } + fn new_sim_trace(&mut self, kind: SimTraceKind) -> TraceScalarId { + let id = TraceScalarId(self.traces.0.len()); + self.traces.0.push(SimTrace { + kind, + state: (), + last_state: (), + }); + id + } + fn make_trace_scalar_helper( + &mut self, + instantiated_module: InstantiatedModule, + target: MakeTraceDeclTarget, + source_location: SourceLocation, + small_kind: impl FnOnce(StatePartIndex) -> SimTraceKind, + big_kind: impl FnOnce(StatePartIndex) -> SimTraceKind, + ) -> TraceLocation { + match target { + MakeTraceDeclTarget::Expr(target) => { + let compiled_value = self.compile_expr(instantiated_module, target); + let compiled_value = self.compiled_expr_to_value(compiled_value, source_location); + TraceLocation::Scalar(self.new_sim_trace(match compiled_value.range.len() { + TypeLen::A_SMALL_SLOT => small_kind(compiled_value.range.small_slots.start), + TypeLen::A_BIG_SLOT => big_kind(compiled_value.range.big_slots.start), + _ => unreachable!(), + })) + } + MakeTraceDeclTarget::Memory { + id, + depth, + stride, + start, + ty, + } => TraceLocation::Memory(TraceMemoryLocation { + id, + depth, + stride, + start, + len: ty.bit_width(), + }), + } + } + fn make_trace_scalar( + &mut self, + instantiated_module: InstantiatedModule, + target: MakeTraceDeclTarget, + name: Interned, + source_location: SourceLocation, + ) -> TraceDecl { + let flow = target.flow(); + match target.ty() { + CanonicalType::UInt(ty) => TraceUInt { + location: self.make_trace_scalar_helper( + instantiated_module, + target, + source_location, + |index| SimTraceKind::SmallUInt { index, ty }, + |index| SimTraceKind::BigUInt { index, ty }, + ), + name, + ty, + flow, + } + .into(), + CanonicalType::SInt(ty) => TraceSInt { + location: self.make_trace_scalar_helper( + instantiated_module, + target, + source_location, + |index| SimTraceKind::SmallSInt { index, ty }, + |index| SimTraceKind::BigSInt { index, ty }, + ), + name, + ty, + flow, + } + .into(), + CanonicalType::Bool(_) => TraceBool { + location: self.make_trace_scalar_helper( + instantiated_module, + target, + source_location, + |index| SimTraceKind::SmallBool { index }, + |index| SimTraceKind::BigBool { index }, + ), + name, + flow, + } + .into(), + CanonicalType::Array(_) => unreachable!(), + CanonicalType::Enum(ty) => { + assert_eq!(ty.discriminant_bit_width(), ty.type_properties().bit_width); + let location = match target { + MakeTraceDeclTarget::Expr(target) => { + let compiled_value = self.compile_expr(instantiated_module, target); + let compiled_value = + self.compiled_expr_to_value(compiled_value, source_location); + let discriminant = self.compile_enum_discriminant( + compiled_value.map_ty(Enum::from_canonical), + source_location, + ); + TraceLocation::Scalar(self.new_sim_trace(SimTraceKind::EnumDiscriminant { + index: discriminant, + ty, + })) + } + MakeTraceDeclTarget::Memory { + id, + depth, + stride, + start, + ty: _, + } => TraceLocation::Memory(TraceMemoryLocation { + id, + depth, + stride, + start, + len: ty.type_properties().bit_width, + }), + }; + TraceFieldlessEnum { + location, + name, + ty, + flow, + } + .into() + } + CanonicalType::Bundle(_) | CanonicalType::PhantomConst(_) => unreachable!(), + CanonicalType::AsyncReset(_) => TraceAsyncReset { + location: self.make_trace_scalar_helper( + instantiated_module, + target, + source_location, + |index| SimTraceKind::SmallAsyncReset { index }, + |index| SimTraceKind::BigAsyncReset { index }, + ), + name, + flow, + } + .into(), + CanonicalType::SyncReset(_) => TraceSyncReset { + location: self.make_trace_scalar_helper( + instantiated_module, + target, + source_location, + |index| SimTraceKind::SmallSyncReset { index }, + |index| SimTraceKind::BigSyncReset { index }, + ), + name, + flow, + } + .into(), + CanonicalType::Reset(_) => unreachable!(), + CanonicalType::Clock(_) => TraceClock { + location: self.make_trace_scalar_helper( + instantiated_module, + target, + source_location, + |index| SimTraceKind::SmallClock { index }, + |index| SimTraceKind::BigClock { index }, + ), + name, + flow, + } + .into(), + } + } + fn make_trace_decl_child( + &mut self, + instantiated_module: InstantiatedModule, + target: MakeTraceDeclTarget, + name: Interned, + source_location: SourceLocation, + ) -> TraceDecl { + match target.ty() { + CanonicalType::Array(ty) => { + let elements = Interned::from_iter((0..ty.len()).map(|index| { + self.make_trace_decl_child( + instantiated_module, + match target { + MakeTraceDeclTarget::Expr(target) => MakeTraceDeclTarget::Expr( + Expr::::from_canonical(target)[index], + ), + MakeTraceDeclTarget::Memory { + id, + depth, + stride, + start, + ty: _, + } => MakeTraceDeclTarget::Memory { + id, + depth, + stride, + start: start + ty.element().bit_width() * index, + ty: ty.element(), + }, + }, + Intern::intern_owned(format!("[{index}]")), + source_location, + ) + })); + TraceArray { + name, + elements, + ty, + flow: target.flow(), + } + .into() + } + CanonicalType::Enum(ty) => { + if ty.variants().iter().all(|v| v.ty.is_none()) { + self.make_trace_scalar(instantiated_module, target, name, source_location) + } else { + let flow = target.flow(); + let location = match target { + MakeTraceDeclTarget::Expr(target) => { + let compiled_value = self.compile_expr(instantiated_module, target); + let compiled_value = + self.compiled_expr_to_value(compiled_value, source_location); + let discriminant = self.compile_enum_discriminant( + compiled_value.map_ty(Enum::from_canonical), + source_location, + ); + TraceLocation::Scalar(self.new_sim_trace( + SimTraceKind::EnumDiscriminant { + index: discriminant, + ty, + }, + )) + } + MakeTraceDeclTarget::Memory { + id, + depth, + stride, + start, + ty: _, + } => TraceLocation::Memory(TraceMemoryLocation { + id, + depth, + stride, + start, + len: ty.discriminant_bit_width(), + }), + }; + let discriminant = TraceEnumDiscriminant { + location, + name: "$tag".intern(), + ty, + flow, + }; + let non_empty_fields = + Interned::from_iter(ty.variants().into_iter().enumerate().flat_map( + |(variant_index, variant)| { + variant.ty.map(|variant_ty| { + self.make_trace_decl_child( + instantiated_module, + match target { + MakeTraceDeclTarget::Expr(target) => { + MakeTraceDeclTarget::Expr( + ops::VariantAccess::new_by_index( + Expr::::from_canonical(target), + variant_index, + ) + .to_expr(), + ) + } + MakeTraceDeclTarget::Memory { + id, + depth, + stride, + start, + ty: _, + } => MakeTraceDeclTarget::Memory { + id, + depth, + stride, + start: start + ty.discriminant_bit_width(), + ty: variant_ty, + }, + }, + variant.name, + source_location, + ) + }) + }, + )); + TraceEnumWithFields { + name, + discriminant, + non_empty_fields, + ty, + flow, + } + .into() + } + } + CanonicalType::Bundle(ty) => { + let fields = Interned::from_iter(ty.fields().iter().zip(ty.field_offsets()).map( + |(field, field_offset)| { + self.make_trace_decl_child( + instantiated_module, + match target { + MakeTraceDeclTarget::Expr(target) => { + MakeTraceDeclTarget::Expr(Expr::field( + Expr::::from_canonical(target), + &field.name, + )) + } + MakeTraceDeclTarget::Memory { + id, + depth, + stride, + start, + ty: _, + } => MakeTraceDeclTarget::Memory { + id, + depth, + stride, + start: start + field_offset, + ty: field.ty, + }, + }, + field.name, + source_location, + ) + }, + )); + TraceBundle { + name, + fields, + ty, + flow: target.flow(), + } + .into() + } + CanonicalType::UInt(_) + | CanonicalType::SInt(_) + | CanonicalType::Bool(_) + | CanonicalType::AsyncReset(_) + | CanonicalType::SyncReset(_) + | CanonicalType::Reset(_) + | CanonicalType::Clock(_) => { + self.make_trace_scalar(instantiated_module, target, name, source_location) + } + CanonicalType::PhantomConst(_) => TraceBundle { + name, + fields: Interned::default(), + ty: Bundle::new(Interned::default()), + flow: target.flow(), + } + .into(), + } + } + fn make_trace_decl( + &mut self, + instantiated_module: InstantiatedModule, + target_base: TargetBase, + ) -> TraceDecl { + let target = MakeTraceDeclTarget::Expr(target_base.to_expr()); + match target_base { + TargetBase::ModuleIO(module_io) => TraceModuleIO { + name: module_io.name(), + child: self + .make_trace_decl_child( + instantiated_module, + target, + module_io.name(), + module_io.source_location(), + ) + .intern(), + ty: module_io.ty(), + flow: module_io.flow(), + } + .into(), + TargetBase::MemPort(mem_port) => { + let name = Intern::intern_owned(mem_port.port_name().to_string()); + let TraceDecl::Scope(TraceScope::Bundle(bundle)) = self.make_trace_decl_child( + instantiated_module, + target, + name, + mem_port.source_location(), + ) else { + unreachable!() + }; + TraceMemPort { + name, + bundle, + ty: mem_port.ty(), + } + .into() + } + TargetBase::Reg(reg) => TraceReg { + name: reg.name(), + child: self + .make_trace_decl_child( + instantiated_module, + target, + reg.name(), + reg.source_location(), + ) + .intern(), + ty: reg.ty(), + } + .into(), + TargetBase::RegSync(reg) => TraceReg { + name: reg.name(), + child: self + .make_trace_decl_child( + instantiated_module, + target, + reg.name(), + reg.source_location(), + ) + .intern(), + ty: reg.ty(), + } + .into(), + TargetBase::RegAsync(reg) => TraceReg { + name: reg.name(), + child: self + .make_trace_decl_child( + instantiated_module, + target, + reg.name(), + reg.source_location(), + ) + .intern(), + ty: reg.ty(), + } + .into(), + TargetBase::Wire(wire) => TraceWire { + name: wire.name(), + child: self + .make_trace_decl_child( + instantiated_module, + target, + wire.name(), + wire.source_location(), + ) + .intern(), + ty: wire.ty(), + } + .into(), + TargetBase::Instance(instance) => { + let TraceDecl::Scope(TraceScope::Bundle(instance_io)) = self.make_trace_decl_child( + instantiated_module, + target, + instance.name(), + instance.source_location(), + ) else { + unreachable!() + }; + let compiled_module = &self.modules[&InstantiatedModule::Child { + parent: instantiated_module.intern(), + instance: instance.intern(), + }]; + TraceInstance { + name: instance.name(), + instance_io, + module: compiled_module.trace_decls, + ty: instance.ty(), + } + .into() + } + } + } + fn compile_value( + &mut self, + target: TargetInInstantiatedModule, + ) -> CompiledValue { + if let Some(&retval) = self.compiled_values.get(&target) { + return retval; + } + let retval = match target.target { + Target::Base(base) => { + let unprefixed_layout = CompiledTypeLayout::get(base.canonical_ty()); + let layout = unprefixed_layout.with_prefixed_debug_names(&format!( + "{:?}.{:?}", + target.instantiated_module, + base.target_name() + )); + let range = self.insns.allocate_variable(&layout.layout); + let write = match *base { + TargetBase::ModuleIO(_) + | TargetBase::MemPort(_) + | TargetBase::Wire(_) + | TargetBase::Instance(_) => None, + TargetBase::Reg(_) | TargetBase::RegSync(_) | TargetBase::RegAsync(_) => { + let write_layout = unprefixed_layout.with_prefixed_debug_names(&format!( + "{:?}.{:?}$next", + target.instantiated_module, + base.target_name() + )); + Some(( + write_layout, + self.insns.allocate_variable(&write_layout.layout), + )) + } + }; + CompiledValue { + range, + layout, + write, + } + } + Target::Child(target_child) => { + let parent = self.compile_value(TargetInInstantiatedModule { + instantiated_module: target.instantiated_module, + target: *target_child.parent(), + }); + match *target_child.path_element() { + TargetPathElement::BundleField(TargetPathBundleField { name }) => { + parent.map_ty(Bundle::from_canonical).field_by_name(name) + } + TargetPathElement::ArrayElement(TargetPathArrayElement { index }) => { + parent.map_ty(Array::from_canonical).element(index) + } + TargetPathElement::DynArrayElement(_) => unreachable!(), + } + } + }; + self.compiled_values.insert(target, retval); + retval + } + fn compiled_expr_to_value( + &mut self, + expr: CompiledExpr, + source_location: SourceLocation, + ) -> CompiledValue { + if let Some(&retval) = self.compiled_exprs_to_values.get(&expr) { + return retval; + } + assert!( + expr.static_part.layout.ty.is_passive(), + "invalid expression passed to compiled_expr_to_value -- type must be passive", + ); + let CompiledExpr { + static_part, + indexes, + } = expr; + let retval = if indexes.as_ref().is_empty() { + CompiledValue { + layout: static_part.layout, + range: static_part.range, + write: None, + } + } else { + let layout = static_part.layout.with_anonymized_debug_info(); + let retval = CompiledValue { + layout, + range: self.insns.allocate_variable(&layout.layout), + write: None, + }; + let TypeIndexRange { + small_slots, + big_slots, + } = retval.range; + self.add_assignment( + Interned::default(), + small_slots + .iter() + .zip(static_part.range.small_slots.iter()) + .map(|(dest, base)| Insn::ReadSmallIndexed { + dest, + src: StatePartArrayIndexed { + base, + indexes: indexes.small_slots, + }, + }) + .chain( + big_slots + .iter() + .zip(static_part.range.big_slots.iter()) + .map(|(dest, base)| Insn::ReadIndexed { + dest, + src: StatePartArrayIndexed { + base, + indexes: indexes.big_slots, + }, + }), + ), + source_location, + ); + retval + }; + self.compiled_exprs_to_values.insert(expr, retval); + retval + } + fn add_assignment>( + &mut self, + conditions: Interned<[Cond]>, + insns: impl IntoIterator, + source_location: SourceLocation, + ) { + let insns = Vec::from_iter(insns.into_iter().map(Into::into)); + self.assignments + .push(Assignment::new(conditions, insns, source_location)); + } + fn simple_big_expr_input( + &mut self, + instantiated_module: InstantiatedModule, + input: Expr, + ) -> StatePartIndex { + let input = self.compile_expr(instantiated_module, input); + let input = + self.compiled_expr_to_value(input, instantiated_module.leaf_module().source_location()); + assert_eq!(input.range.len(), TypeLen::A_BIG_SLOT); + input.range.big_slots.start + } + fn compile_expr_helper( + &mut self, + instantiated_module: InstantiatedModule, + dest_ty: CanonicalType, + make_insns: impl FnOnce(&mut Self, TypeIndexRange) -> Vec, + ) -> CompiledValue { + let layout = CompiledTypeLayout::get(dest_ty); + let range = self.insns.allocate_variable(&layout.layout); + let retval = CompiledValue { + layout, + range, + write: None, + }; + let insns = make_insns(self, range); + self.add_assignment( + Interned::default(), + insns, + instantiated_module.leaf_module().source_location(), + ); + retval + } + fn simple_nary_big_expr_helper( + &mut self, + instantiated_module: InstantiatedModule, + dest_ty: CanonicalType, + make_insns: impl FnOnce(StatePartIndex) -> Vec, + ) -> CompiledValue { + self.compile_expr_helper(instantiated_module, dest_ty, |_, dest| { + assert_eq!(dest.len(), TypeLen::A_BIG_SLOT); + make_insns(dest.big_slots.start) + }) + } + fn simple_nary_big_expr( + &mut self, + instantiated_module: InstantiatedModule, + dest_ty: CanonicalType, + inputs: [Expr; N], + make_insns: impl FnOnce( + StatePartIndex, + [StatePartIndex; N], + ) -> Vec, + ) -> CompiledValue { + let inputs = inputs.map(|input| self.simple_big_expr_input(instantiated_module, input)); + self.simple_nary_big_expr_helper(instantiated_module, dest_ty, |dest| { + make_insns(dest, inputs) + }) + } + fn compiled_value_to_dyn_array_index( + &mut self, + compiled_value: CompiledValue, + source_location: SourceLocation, + ) -> StatePartIndex { + if let Some(&retval) = self + .compiled_values_to_dyn_array_indexes + .get(&compiled_value) + { + return retval; + } + let mut ty = compiled_value.layout.ty; + ty.width = ty.width.min(SmallUInt::BITS as usize); + let retval = match compiled_value.range.len() { + TypeLen::A_SMALL_SLOT => compiled_value.range.small_slots.start, + TypeLen::A_BIG_SLOT => { + let debug_data = SlotDebugData { + name: Interned::default(), + ty: ty.canonical(), + }; + let dest = self + .insns + .allocate_variable(&TypeLayout { + small_slots: StatePartLayout::scalar(debug_data, ()), + big_slots: StatePartLayout::empty(), + }) + .small_slots + .start; + self.add_assignment( + Interned::default(), + vec![Insn::CastBigToArrayIndex { + dest, + src: compiled_value.range.big_slots.start, + }], + source_location, + ); + dest + } + _ => unreachable!(), + }; + self.compiled_values_to_dyn_array_indexes + .insert(compiled_value, retval); + retval + } + fn compiled_value_bool_dest_is_small( + &mut self, + compiled_value: CompiledValue, + source_location: SourceLocation, + ) -> StatePartIndex { + if let Some(&retval) = self + .compiled_value_bool_dest_is_small_map + .get(&compiled_value) + { + return retval; + } + let retval = match compiled_value.range.len() { + TypeLen::A_SMALL_SLOT => compiled_value.range.small_slots.start, + TypeLen::A_BIG_SLOT => { + let debug_data = SlotDebugData { + name: Interned::default(), + ty: Bool.canonical(), + }; + let dest = self + .insns + .allocate_variable(&TypeLayout { + small_slots: StatePartLayout::scalar(debug_data, ()), + big_slots: StatePartLayout::empty(), + }) + .small_slots + .start; + self.add_assignment( + Interned::default(), + vec![Insn::IsNonZeroDestIsSmall { + dest, + src: compiled_value.range.big_slots.start, + }], + source_location, + ); + dest + } + _ => unreachable!(), + }; + self.compiled_value_bool_dest_is_small_map + .insert(compiled_value, retval); + retval + } + fn compile_cast_scalar_to_bits( + &mut self, + instantiated_module: InstantiatedModule, + arg: Expr, + cast_fn: impl FnOnce(Expr) -> Expr, + ) -> CompiledValue { + let arg = Expr::::from_canonical(arg); + let retval = cast_fn(arg); + let retval = self.compile_expr(instantiated_module, Expr::canonical(retval)); + let retval = self + .compiled_expr_to_value(retval, instantiated_module.leaf_module().source_location()); + retval.map_ty(UInt::from_canonical) + } + fn compile_cast_aggregate_to_bits( + &mut self, + instantiated_module: InstantiatedModule, + parts: impl IntoIterator>, + ) -> CompiledValue { + let retval = parts + .into_iter() + .map(|part| part.cast_to_bits()) + .reduce(|accumulator, part| accumulator | (part << Expr::ty(accumulator).width)) + .unwrap_or_else(|| UInt[0].zero().to_expr()); + let retval = self.compile_expr(instantiated_module, Expr::canonical(retval)); + let retval = self + .compiled_expr_to_value(retval, instantiated_module.leaf_module().source_location()); + retval.map_ty(UInt::from_canonical) + } + fn compile_cast_to_bits( + &mut self, + instantiated_module: InstantiatedModule, + expr: ops::CastToBits, + ) -> CompiledValue { + match Expr::ty(expr.arg()) { + CanonicalType::UInt(_) => { + self.compile_cast_scalar_to_bits(instantiated_module, expr.arg(), |arg| arg) + } + CanonicalType::SInt(ty) => self.compile_cast_scalar_to_bits( + instantiated_module, + expr.arg(), + |arg: Expr| arg.cast_to(ty.as_same_width_uint()), + ), + CanonicalType::Bool(_) + | CanonicalType::AsyncReset(_) + | CanonicalType::SyncReset(_) + | CanonicalType::Reset(_) + | CanonicalType::Clock(_) => self.compile_cast_scalar_to_bits( + instantiated_module, + expr.arg(), + |arg: Expr| arg.cast_to(UInt[1]), + ), + CanonicalType::Array(ty) => self.compile_cast_aggregate_to_bits( + instantiated_module, + (0..ty.len()).map(|index| Expr::::from_canonical(expr.arg())[index]), + ), + CanonicalType::Enum(ty) => self + .simple_nary_big_expr( + instantiated_module, + UInt[ty.type_properties().bit_width].canonical(), + [Expr::canonical(expr.arg())], + |dest, [src]| vec![Insn::Copy { dest, src }], + ) + .map_ty(UInt::from_canonical), + CanonicalType::Bundle(ty) => self.compile_cast_aggregate_to_bits( + instantiated_module, + ty.fields().iter().map(|field| { + Expr::field(Expr::::from_canonical(expr.arg()), &field.name) + }), + ), + CanonicalType::PhantomConst(_) => { + self.compile_cast_aggregate_to_bits(instantiated_module, []) + } + } + } + fn compile_cast_bits_to( + &mut self, + instantiated_module: InstantiatedModule, + expr: ops::CastBitsTo, + ) -> CompiledValue { + let retval = match expr.ty() { + CanonicalType::UInt(_) => Expr::canonical(expr.arg()), + CanonicalType::SInt(ty) => Expr::canonical(expr.arg().cast_to(ty)), + CanonicalType::Bool(ty) => Expr::canonical(expr.arg().cast_to(ty)), + CanonicalType::Array(ty) => { + let stride = ty.element().bit_width(); + Expr::::canonical( + ops::ArrayLiteral::new( + ty.element(), + Interned::from_iter((0..ty.len()).map(|index| { + let start = stride * index; + let end = start + stride; + expr.arg()[start..end].cast_bits_to(ty.element()) + })), + ) + .to_expr(), + ) + } + ty @ CanonicalType::Enum(_) => { + return self.simple_nary_big_expr( + instantiated_module, + ty, + [Expr::canonical(expr.arg())], + |dest, [src]| vec![Insn::Copy { dest, src }], + ); + } + CanonicalType::Bundle(ty) => Expr::canonical( + ops::BundleLiteral::new( + ty, + Interned::from_iter(ty.field_offsets().iter().zip(&ty.fields()).map( + |(&offset, &field)| { + let end = offset + field.ty.bit_width(); + expr.arg()[offset..end].cast_bits_to(field.ty) + }, + )), + ) + .to_expr(), + ), + CanonicalType::AsyncReset(ty) => Expr::canonical(expr.arg().cast_to(ty)), + CanonicalType::SyncReset(ty) => Expr::canonical(expr.arg().cast_to(ty)), + CanonicalType::Reset(_) => unreachable!(), + CanonicalType::Clock(ty) => Expr::canonical(expr.arg().cast_to(ty)), + CanonicalType::PhantomConst(ty) => { + let _ = self.compile_expr(instantiated_module, Expr::canonical(expr.arg())); + Expr::canonical(ty.to_expr()) + } + }; + let retval = self.compile_expr(instantiated_module, Expr::canonical(retval)); + self.compiled_expr_to_value(retval, instantiated_module.leaf_module().source_location()) + } + fn compile_aggregate_literal( + &mut self, + instantiated_module: InstantiatedModule, + dest_ty: CanonicalType, + inputs: Interned<[Expr]>, + ) -> CompiledValue { + self.compile_expr_helper(instantiated_module, dest_ty, |this, dest| { + let mut insns = Vec::new(); + let mut offset = TypeIndex::ZERO; + for input in inputs { + let input = this.compile_expr(instantiated_module, input); + let input = this + .compiled_expr_to_value( + input, + instantiated_module.leaf_module().source_location(), + ) + .range; + insns.extend( + input.insns_for_copy_to(dest.slice(TypeIndexRange::new(offset, input.len()))), + ); + offset = offset.offset(input.len().as_index()); + } + insns + }) + } + fn compile_expr( + &mut self, + instantiated_module: InstantiatedModule, + expr: Expr, + ) -> CompiledExpr { + if let Some(&retval) = self.compiled_exprs.get(&expr) { + return retval; + } + let mut cast_bit = |arg: Expr| { + let src_signed = match Expr::ty(arg) { + CanonicalType::UInt(_) => false, + CanonicalType::SInt(_) => true, + CanonicalType::Bool(_) => false, + CanonicalType::Array(_) => unreachable!(), + CanonicalType::Enum(_) => unreachable!(), + CanonicalType::Bundle(_) => unreachable!(), + CanonicalType::AsyncReset(_) => false, + CanonicalType::SyncReset(_) => false, + CanonicalType::Reset(_) => false, + CanonicalType::Clock(_) => false, + CanonicalType::PhantomConst(_) => unreachable!(), + }; + let dest_signed = match Expr::ty(expr) { + CanonicalType::UInt(_) => false, + CanonicalType::SInt(_) => true, + CanonicalType::Bool(_) => false, + CanonicalType::Array(_) => unreachable!(), + CanonicalType::Enum(_) => unreachable!(), + CanonicalType::Bundle(_) => unreachable!(), + CanonicalType::AsyncReset(_) => false, + CanonicalType::SyncReset(_) => false, + CanonicalType::Reset(_) => false, + CanonicalType::Clock(_) => false, + CanonicalType::PhantomConst(_) => unreachable!(), + }; + self.simple_nary_big_expr(instantiated_module, Expr::ty(expr), [arg], |dest, [src]| { + match (src_signed, dest_signed) { + (false, false) | (true, true) => { + vec![Insn::Copy { dest, src }] + } + (false, true) => vec![Insn::CastToSInt { + dest, + src, + dest_width: 1, + }], + (true, false) => vec![Insn::CastToUInt { + dest, + src, + dest_width: 1, + }], + } + }) + .into() + }; + let retval: CompiledExpr<_> = match *Expr::expr_enum(expr) { + ExprEnum::UIntLiteral(expr) => self + .simple_nary_big_expr( + instantiated_module, + expr.ty().canonical(), + [], + |dest, []| { + vec![Insn::Const { + dest, + value: expr.to_bigint().intern_sized(), + }] + }, + ) + .into(), + ExprEnum::SIntLiteral(expr) => self + .simple_nary_big_expr( + instantiated_module, + expr.ty().canonical(), + [], + |dest, []| { + vec![Insn::Const { + dest, + value: expr.to_bigint().intern_sized(), + }] + }, + ) + .into(), + ExprEnum::BoolLiteral(expr) => self + .simple_nary_big_expr(instantiated_module, Bool.canonical(), [], |dest, []| { + vec![Insn::Const { + dest, + value: BigInt::from(expr).intern_sized(), + }] + }) + .into(), + ExprEnum::PhantomConst(_) => self + .compile_aggregate_literal(instantiated_module, Expr::ty(expr), Interned::default()) + .into(), + ExprEnum::BundleLiteral(literal) => self + .compile_aggregate_literal( + instantiated_module, + Expr::ty(expr), + literal.field_values(), + ) + .into(), + ExprEnum::ArrayLiteral(literal) => self + .compile_aggregate_literal( + instantiated_module, + Expr::ty(expr), + literal.element_values(), + ) + .into(), + ExprEnum::EnumLiteral(expr) => { + let enum_bits_ty = UInt[expr.ty().type_properties().bit_width]; + let enum_bits = if let Some(variant_value) = expr.variant_value() { + ( + UInt[expr.ty().discriminant_bit_width()] + .from_int_wrapping(expr.variant_index()), + variant_value, + ) + .cast_to_bits() + .cast_to(enum_bits_ty) + } else { + enum_bits_ty + .from_int_wrapping(expr.variant_index()) + .to_expr() + }; + self.compile_expr( + instantiated_module, + enum_bits.cast_bits_to(expr.ty().canonical()), + ) + } + ExprEnum::Uninit(expr) => self.compile_expr( + instantiated_module, + UInt[expr.ty().bit_width()].zero().cast_bits_to(expr.ty()), + ), + ExprEnum::NotU(expr) => self + .simple_nary_big_expr( + instantiated_module, + Expr::ty(expr.arg()).canonical(), + [Expr::canonical(expr.arg())], + |dest, [src]| { + vec![Insn::NotU { + dest, + src, + width: Expr::ty(expr.arg()).width(), + }] + }, + ) + .into(), + ExprEnum::NotS(expr) => self + .simple_nary_big_expr( + instantiated_module, + Expr::ty(expr.arg()).canonical(), + [Expr::canonical(expr.arg())], + |dest, [src]| vec![Insn::NotS { dest, src }], + ) + .into(), + ExprEnum::NotB(expr) => self + .simple_nary_big_expr( + instantiated_module, + Expr::ty(expr.arg()).canonical(), + [Expr::canonical(expr.arg())], + |dest, [src]| { + vec![Insn::NotU { + dest, + src, + width: 1, + }] + }, + ) + .into(), + ExprEnum::Neg(expr) => self + .simple_nary_big_expr( + instantiated_module, + expr.ty().canonical(), + [Expr::canonical(expr.arg())], + |dest, [src]| vec![Insn::Neg { dest, src }], + ) + .into(), + ExprEnum::BitAndU(expr) => self + .simple_nary_big_expr( + instantiated_module, + expr.ty().canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::And { dest, lhs, rhs }], + ) + .into(), + ExprEnum::BitAndS(expr) => self + .simple_nary_big_expr( + instantiated_module, + expr.ty().canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::And { dest, lhs, rhs }], + ) + .into(), + ExprEnum::BitAndB(expr) => self + .simple_nary_big_expr( + instantiated_module, + Bool.canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::And { dest, lhs, rhs }], + ) + .into(), + ExprEnum::BitOrU(expr) => self + .simple_nary_big_expr( + instantiated_module, + expr.ty().canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::Or { dest, lhs, rhs }], + ) + .into(), + ExprEnum::BitOrS(expr) => self + .simple_nary_big_expr( + instantiated_module, + expr.ty().canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::Or { dest, lhs, rhs }], + ) + .into(), + ExprEnum::BitOrB(expr) => self + .simple_nary_big_expr( + instantiated_module, + Bool.canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::Or { dest, lhs, rhs }], + ) + .into(), + ExprEnum::BitXorU(expr) => self + .simple_nary_big_expr( + instantiated_module, + expr.ty().canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::Xor { dest, lhs, rhs }], + ) + .into(), + ExprEnum::BitXorS(expr) => self + .simple_nary_big_expr( + instantiated_module, + expr.ty().canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::Xor { dest, lhs, rhs }], + ) + .into(), + ExprEnum::BitXorB(expr) => self + .simple_nary_big_expr( + instantiated_module, + Bool.canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::Xor { dest, lhs, rhs }], + ) + .into(), + ExprEnum::AddU(expr) => self + .simple_nary_big_expr( + instantiated_module, + expr.ty().canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::Add { dest, lhs, rhs }], + ) + .into(), + ExprEnum::AddS(expr) => self + .simple_nary_big_expr( + instantiated_module, + expr.ty().canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::Add { dest, lhs, rhs }], + ) + .into(), + ExprEnum::SubU(expr) => self + .simple_nary_big_expr( + instantiated_module, + expr.ty().canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| { + vec![Insn::SubU { + dest, + lhs, + rhs, + dest_width: expr.ty().width(), + }] + }, + ) + .into(), + ExprEnum::SubS(expr) => self + .simple_nary_big_expr( + instantiated_module, + expr.ty().canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::SubS { dest, lhs, rhs }], + ) + .into(), + ExprEnum::MulU(expr) => self + .simple_nary_big_expr( + instantiated_module, + expr.ty().canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::Mul { dest, lhs, rhs }], + ) + .into(), + ExprEnum::MulS(expr) => self + .simple_nary_big_expr( + instantiated_module, + expr.ty().canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::Mul { dest, lhs, rhs }], + ) + .into(), + ExprEnum::DivU(expr) => self + .simple_nary_big_expr( + instantiated_module, + expr.ty().canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::Div { dest, lhs, rhs }], + ) + .into(), + ExprEnum::DivS(expr) => self + .simple_nary_big_expr( + instantiated_module, + expr.ty().canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::Div { dest, lhs, rhs }], + ) + .into(), + ExprEnum::RemU(expr) => self + .simple_nary_big_expr( + instantiated_module, + expr.ty().canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::Rem { dest, lhs, rhs }], + ) + .into(), + ExprEnum::RemS(expr) => self + .simple_nary_big_expr( + instantiated_module, + expr.ty().canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::Rem { dest, lhs, rhs }], + ) + .into(), + ExprEnum::DynShlU(expr) => self + .simple_nary_big_expr( + instantiated_module, + expr.ty().canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::DynShl { dest, lhs, rhs }], + ) + .into(), + ExprEnum::DynShlS(expr) => self + .simple_nary_big_expr( + instantiated_module, + expr.ty().canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::DynShl { dest, lhs, rhs }], + ) + .into(), + ExprEnum::DynShrU(expr) => self + .simple_nary_big_expr( + instantiated_module, + expr.ty().canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::DynShr { dest, lhs, rhs }], + ) + .into(), + ExprEnum::DynShrS(expr) => self + .simple_nary_big_expr( + instantiated_module, + expr.ty().canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::DynShr { dest, lhs, rhs }], + ) + .into(), + ExprEnum::FixedShlU(expr) => self + .simple_nary_big_expr( + instantiated_module, + expr.ty().canonical(), + [Expr::canonical(expr.lhs())], + |dest, [lhs]| { + vec![Insn::Shl { + dest, + lhs, + rhs: expr.rhs(), + }] + }, + ) + .into(), + ExprEnum::FixedShlS(expr) => self + .simple_nary_big_expr( + instantiated_module, + expr.ty().canonical(), + [Expr::canonical(expr.lhs())], + |dest, [lhs]| { + vec![Insn::Shl { + dest, + lhs, + rhs: expr.rhs(), + }] + }, + ) + .into(), + ExprEnum::FixedShrU(expr) => self + .simple_nary_big_expr( + instantiated_module, + expr.ty().canonical(), + [Expr::canonical(expr.lhs())], + |dest, [lhs]| { + vec![Insn::Shr { + dest, + lhs, + rhs: expr.rhs(), + }] + }, + ) + .into(), + ExprEnum::FixedShrS(expr) => self + .simple_nary_big_expr( + instantiated_module, + expr.ty().canonical(), + [Expr::canonical(expr.lhs())], + |dest, [lhs]| { + vec![Insn::Shr { + dest, + lhs, + rhs: expr.rhs(), + }] + }, + ) + .into(), + ExprEnum::CmpLtB(expr) => self + .simple_nary_big_expr( + instantiated_module, + Bool.canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::CmpLt { dest, lhs, rhs }], + ) + .into(), + ExprEnum::CmpLeB(expr) => self + .simple_nary_big_expr( + instantiated_module, + Bool.canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::CmpLe { dest, lhs, rhs }], + ) + .into(), + ExprEnum::CmpGtB(expr) => self + .simple_nary_big_expr( + instantiated_module, + Bool.canonical(), + // swap both comparison direction and lhs/rhs + [Expr::canonical(expr.rhs()), Expr::canonical(expr.lhs())], + |dest, [lhs, rhs]| vec![Insn::CmpLt { dest, lhs, rhs }], + ) + .into(), + ExprEnum::CmpGeB(expr) => self + .simple_nary_big_expr( + instantiated_module, + Bool.canonical(), + // swap both comparison direction and lhs/rhs + [Expr::canonical(expr.rhs()), Expr::canonical(expr.lhs())], + |dest, [lhs, rhs]| vec![Insn::CmpLe { dest, lhs, rhs }], + ) + .into(), + ExprEnum::CmpEqB(expr) => self + .simple_nary_big_expr( + instantiated_module, + Bool.canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::CmpEq { dest, lhs, rhs }], + ) + .into(), + ExprEnum::CmpNeB(expr) => self + .simple_nary_big_expr( + instantiated_module, + Bool.canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::CmpNe { dest, lhs, rhs }], + ) + .into(), + ExprEnum::CmpLtU(expr) => self + .simple_nary_big_expr( + instantiated_module, + Bool.canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::CmpLt { dest, lhs, rhs }], + ) + .into(), + ExprEnum::CmpLeU(expr) => self + .simple_nary_big_expr( + instantiated_module, + Bool.canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::CmpLe { dest, lhs, rhs }], + ) + .into(), + ExprEnum::CmpGtU(expr) => self + .simple_nary_big_expr( + instantiated_module, + Bool.canonical(), + // swap both comparison direction and lhs/rhs + [Expr::canonical(expr.rhs()), Expr::canonical(expr.lhs())], + |dest, [lhs, rhs]| vec![Insn::CmpLt { dest, lhs, rhs }], + ) + .into(), + ExprEnum::CmpGeU(expr) => self + .simple_nary_big_expr( + instantiated_module, + Bool.canonical(), + // swap both comparison direction and lhs/rhs + [Expr::canonical(expr.rhs()), Expr::canonical(expr.lhs())], + |dest, [lhs, rhs]| vec![Insn::CmpLe { dest, lhs, rhs }], + ) + .into(), + ExprEnum::CmpEqU(expr) => self + .simple_nary_big_expr( + instantiated_module, + Bool.canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::CmpEq { dest, lhs, rhs }], + ) + .into(), + ExprEnum::CmpNeU(expr) => self + .simple_nary_big_expr( + instantiated_module, + Bool.canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::CmpNe { dest, lhs, rhs }], + ) + .into(), + ExprEnum::CmpLtS(expr) => self + .simple_nary_big_expr( + instantiated_module, + Bool.canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::CmpLt { dest, lhs, rhs }], + ) + .into(), + ExprEnum::CmpLeS(expr) => self + .simple_nary_big_expr( + instantiated_module, + Bool.canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::CmpLe { dest, lhs, rhs }], + ) + .into(), + ExprEnum::CmpGtS(expr) => self + .simple_nary_big_expr( + instantiated_module, + Bool.canonical(), + // swap both comparison direction and lhs/rhs + [Expr::canonical(expr.rhs()), Expr::canonical(expr.lhs())], + |dest, [lhs, rhs]| vec![Insn::CmpLt { dest, lhs, rhs }], + ) + .into(), + ExprEnum::CmpGeS(expr) => self + .simple_nary_big_expr( + instantiated_module, + Bool.canonical(), + // swap both comparison direction and lhs/rhs + [Expr::canonical(expr.rhs()), Expr::canonical(expr.lhs())], + |dest, [lhs, rhs]| vec![Insn::CmpLe { dest, lhs, rhs }], + ) + .into(), + ExprEnum::CmpEqS(expr) => self + .simple_nary_big_expr( + instantiated_module, + Bool.canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::CmpEq { dest, lhs, rhs }], + ) + .into(), + ExprEnum::CmpNeS(expr) => self + .simple_nary_big_expr( + instantiated_module, + Bool.canonical(), + [Expr::canonical(expr.lhs()), Expr::canonical(expr.rhs())], + |dest, [lhs, rhs]| vec![Insn::CmpNe { dest, lhs, rhs }], + ) + .into(), + ExprEnum::CastUIntToUInt(expr) => self + .simple_nary_big_expr( + instantiated_module, + expr.ty().canonical(), + [Expr::canonical(expr.arg())], + |dest, [src]| { + vec![Insn::CastToUInt { + dest, + src, + dest_width: expr.ty().width(), + }] + }, + ) + .into(), + ExprEnum::CastUIntToSInt(expr) => self + .simple_nary_big_expr( + instantiated_module, + expr.ty().canonical(), + [Expr::canonical(expr.arg())], + |dest, [src]| { + vec![Insn::CastToSInt { + dest, + src, + dest_width: expr.ty().width(), + }] + }, + ) + .into(), + ExprEnum::CastSIntToUInt(expr) => self + .simple_nary_big_expr( + instantiated_module, + expr.ty().canonical(), + [Expr::canonical(expr.arg())], + |dest, [src]| { + vec![Insn::CastToUInt { + dest, + src, + dest_width: expr.ty().width(), + }] + }, + ) + .into(), + ExprEnum::CastSIntToSInt(expr) => self + .simple_nary_big_expr( + instantiated_module, + expr.ty().canonical(), + [Expr::canonical(expr.arg())], + |dest, [src]| { + vec![Insn::CastToSInt { + dest, + src, + dest_width: expr.ty().width(), + }] + }, + ) + .into(), + ExprEnum::CastBoolToUInt(expr) => cast_bit(Expr::canonical(expr.arg())), + ExprEnum::CastBoolToSInt(expr) => cast_bit(Expr::canonical(expr.arg())), + ExprEnum::CastUIntToBool(expr) => cast_bit(Expr::canonical(expr.arg())), + ExprEnum::CastSIntToBool(expr) => cast_bit(Expr::canonical(expr.arg())), + ExprEnum::CastBoolToSyncReset(expr) => cast_bit(Expr::canonical(expr.arg())), + ExprEnum::CastUIntToSyncReset(expr) => cast_bit(Expr::canonical(expr.arg())), + ExprEnum::CastSIntToSyncReset(expr) => cast_bit(Expr::canonical(expr.arg())), + ExprEnum::CastBoolToAsyncReset(expr) => cast_bit(Expr::canonical(expr.arg())), + ExprEnum::CastUIntToAsyncReset(expr) => cast_bit(Expr::canonical(expr.arg())), + ExprEnum::CastSIntToAsyncReset(expr) => cast_bit(Expr::canonical(expr.arg())), + ExprEnum::CastSyncResetToBool(expr) => cast_bit(Expr::canonical(expr.arg())), + ExprEnum::CastSyncResetToUInt(expr) => cast_bit(Expr::canonical(expr.arg())), + ExprEnum::CastSyncResetToSInt(expr) => cast_bit(Expr::canonical(expr.arg())), + ExprEnum::CastSyncResetToReset(expr) => cast_bit(Expr::canonical(expr.arg())), + ExprEnum::CastAsyncResetToBool(expr) => cast_bit(Expr::canonical(expr.arg())), + ExprEnum::CastAsyncResetToUInt(expr) => cast_bit(Expr::canonical(expr.arg())), + ExprEnum::CastAsyncResetToSInt(expr) => cast_bit(Expr::canonical(expr.arg())), + ExprEnum::CastAsyncResetToReset(expr) => cast_bit(Expr::canonical(expr.arg())), + ExprEnum::CastResetToBool(expr) => cast_bit(Expr::canonical(expr.arg())), + ExprEnum::CastResetToUInt(expr) => cast_bit(Expr::canonical(expr.arg())), + ExprEnum::CastResetToSInt(expr) => cast_bit(Expr::canonical(expr.arg())), + ExprEnum::CastBoolToClock(expr) => cast_bit(Expr::canonical(expr.arg())), + ExprEnum::CastUIntToClock(expr) => cast_bit(Expr::canonical(expr.arg())), + ExprEnum::CastSIntToClock(expr) => cast_bit(Expr::canonical(expr.arg())), + ExprEnum::CastClockToBool(expr) => cast_bit(Expr::canonical(expr.arg())), + ExprEnum::CastClockToUInt(expr) => cast_bit(Expr::canonical(expr.arg())), + ExprEnum::CastClockToSInt(expr) => cast_bit(Expr::canonical(expr.arg())), + ExprEnum::FieldAccess(expr) => self + .compile_expr(instantiated_module, Expr::canonical(expr.base())) + .map_ty(Bundle::from_canonical) + .field_by_index(expr.field_index()), + ExprEnum::VariantAccess(variant_access) => { + let start = Expr::ty(variant_access.base()).discriminant_bit_width(); + let len = Expr::ty(expr).bit_width(); + self.compile_expr( + instantiated_module, + variant_access.base().cast_to_bits()[start..start + len] + .cast_bits_to(Expr::ty(expr)), + ) + } + ExprEnum::ArrayIndex(expr) => self + .compile_expr(instantiated_module, Expr::canonical(expr.base())) + .map_ty(Array::from_canonical) + .element(expr.element_index()), + ExprEnum::DynArrayIndex(expr) => { + let element_index = + self.compile_expr(instantiated_module, Expr::canonical(expr.element_index())); + let element_index = self.compiled_expr_to_value( + element_index, + instantiated_module.leaf_module().source_location(), + ); + let index_slot = self.compiled_value_to_dyn_array_index( + element_index.map_ty(UInt::from_canonical), + instantiated_module.leaf_module().source_location(), + ); + self.compile_expr(instantiated_module, Expr::canonical(expr.base())) + .map_ty(Array::from_canonical) + .element_dyn(index_slot) + } + ExprEnum::ReduceBitAndU(expr) => if Expr::ty(expr.arg()).width() == 0 { + self.compile_expr(instantiated_module, Expr::canonical(true.to_expr())) + } else { + self.compile_expr( + instantiated_module, + Expr::canonical( + expr.arg() + .cmp_eq(Expr::ty(expr.arg()).from_int_wrapping(-1)), + ), + ) + } + .into(), + ExprEnum::ReduceBitAndS(expr) => if Expr::ty(expr.arg()).width() == 0 { + self.compile_expr(instantiated_module, Expr::canonical(true.to_expr())) + } else { + self.compile_expr( + instantiated_module, + Expr::canonical( + expr.arg() + .cmp_eq(Expr::ty(expr.arg()).from_int_wrapping(-1)), + ), + ) + } + .into(), + ExprEnum::ReduceBitOrU(expr) => if Expr::ty(expr.arg()).width() == 0 { + self.compile_expr(instantiated_module, Expr::canonical(false.to_expr())) + } else { + self.compile_expr( + instantiated_module, + Expr::canonical(expr.arg().cmp_ne(Expr::ty(expr.arg()).from_int_wrapping(0))), + ) + } + .into(), + ExprEnum::ReduceBitOrS(expr) => if Expr::ty(expr.arg()).width() == 0 { + self.compile_expr(instantiated_module, Expr::canonical(false.to_expr())) + } else { + self.compile_expr( + instantiated_module, + Expr::canonical(expr.arg().cmp_ne(Expr::ty(expr.arg()).from_int_wrapping(0))), + ) + } + .into(), + ExprEnum::ReduceBitXorU(expr) => self + .simple_nary_big_expr( + instantiated_module, + UInt::<1>::TYPE.canonical(), + [Expr::canonical(expr.arg())], + |dest, [src]| { + vec![Insn::ReduceBitXor { + dest, + src, + input_width: Expr::ty(expr.arg()).width(), + }] + }, + ) + .into(), + ExprEnum::ReduceBitXorS(expr) => self + .simple_nary_big_expr( + instantiated_module, + UInt::<1>::TYPE.canonical(), + [Expr::canonical(expr.arg())], + |dest, [src]| { + vec![Insn::ReduceBitXor { + dest, + src, + input_width: Expr::ty(expr.arg()).width(), + }] + }, + ) + .into(), + ExprEnum::SliceUInt(expr) => self + .simple_nary_big_expr( + instantiated_module, + UInt::new_dyn(expr.range().len()).canonical(), + [Expr::canonical(expr.base())], + |dest, [src]| { + vec![Insn::SliceInt { + dest, + src, + start: expr.range().start, + len: expr.range().len(), + }] + }, + ) + .into(), + ExprEnum::SliceSInt(expr) => self + .simple_nary_big_expr( + instantiated_module, + UInt::new_dyn(expr.range().len()).canonical(), + [Expr::canonical(expr.base())], + |dest, [src]| { + vec![Insn::SliceInt { + dest, + src, + start: expr.range().start, + len: expr.range().len(), + }] + }, + ) + .into(), + ExprEnum::CastToBits(expr) => self + .compile_cast_to_bits(instantiated_module, expr) + .map_ty(CanonicalType::UInt) + .into(), + ExprEnum::CastBitsTo(expr) => { + self.compile_cast_bits_to(instantiated_module, expr).into() + } + ExprEnum::ModuleIO(expr) => self + .compile_value(TargetInInstantiatedModule { + instantiated_module, + target: expr.into(), + }) + .into(), + ExprEnum::Instance(expr) => self + .compile_value(TargetInInstantiatedModule { + instantiated_module, + target: expr.into(), + }) + .into(), + ExprEnum::Wire(expr) => self + .compile_value(TargetInInstantiatedModule { + instantiated_module, + target: expr.into(), + }) + .into(), + ExprEnum::Reg(expr) => self + .compile_value(TargetInInstantiatedModule { + instantiated_module, + target: expr.into(), + }) + .into(), + ExprEnum::RegSync(expr) => self + .compile_value(TargetInInstantiatedModule { + instantiated_module, + target: expr.into(), + }) + .into(), + ExprEnum::RegAsync(expr) => self + .compile_value(TargetInInstantiatedModule { + instantiated_module, + target: expr.into(), + }) + .into(), + ExprEnum::MemPort(expr) => self + .compile_value(TargetInInstantiatedModule { + instantiated_module, + target: expr.into(), + }) + .into(), + }; + self.compiled_exprs.insert(expr, retval); + retval + } + fn compile_simple_connect( + &mut self, + conditions: Interned<[Cond]>, + lhs: CompiledExpr, + rhs: CompiledValue, + source_location: SourceLocation, + ) { + let CompiledExpr { + static_part: lhs_static_part, + indexes, + } = lhs; + let (lhs_layout, lhs_range) = lhs_static_part.write(); + assert!( + lhs_layout.ty.is_passive(), + "invalid expression passed to compile_simple_connect -- type must be passive", + ); + let TypeIndexRange { + small_slots, + big_slots, + } = lhs_range; + self.add_assignment( + conditions, + small_slots + .iter() + .zip(rhs.range.small_slots.iter()) + .map(|(base, src)| { + if indexes.small_slots.is_empty() { + Insn::CopySmall { dest: base, src } + } else { + Insn::WriteSmallIndexed { + dest: StatePartArrayIndexed { + base, + indexes: indexes.small_slots, + }, + src, + } + } + }) + .chain( + big_slots + .iter() + .zip(rhs.range.big_slots.iter()) + .map(|(base, src)| { + if indexes.big_slots.is_empty() { + Insn::Copy { dest: base, src } + } else { + Insn::WriteIndexed { + dest: StatePartArrayIndexed { + base, + indexes: indexes.big_slots, + }, + src, + } + } + }), + ), + source_location, + ); + } + fn compile_connect( + &mut self, + lhs_instantiated_module: InstantiatedModule, + lhs_conditions: Interned<[Cond]>, + lhs: Expr, + rhs_instantiated_module: InstantiatedModule, + rhs_conditions: Interned<[Cond]>, + mut rhs: Expr, + source_location: SourceLocation, + ) { + if Expr::ty(lhs) != Expr::ty(rhs) || !Expr::ty(lhs).is_passive() { + match Expr::ty(lhs) { + CanonicalType::UInt(lhs_ty) => { + rhs = Expr::canonical(Expr::::from_canonical(rhs).cast_to(lhs_ty)); + } + CanonicalType::SInt(lhs_ty) => { + rhs = Expr::canonical(Expr::::from_canonical(rhs).cast_to(lhs_ty)); + } + CanonicalType::Bool(_) => unreachable!(), + CanonicalType::Array(lhs_ty) => { + let CanonicalType::Array(rhs_ty) = Expr::ty(rhs) else { + unreachable!(); + }; + assert_eq!(lhs_ty.len(), rhs_ty.len()); + let lhs = Expr::::from_canonical(lhs); + let rhs = Expr::::from_canonical(rhs); + for index in 0..lhs_ty.len() { + self.compile_connect( + lhs_instantiated_module, + lhs_conditions, + lhs[index], + rhs_instantiated_module, + rhs_conditions, + rhs[index], + source_location, + ); + } + return; + } + CanonicalType::Enum(lhs_ty) => { + let CanonicalType::Enum(rhs_ty) = Expr::ty(rhs) else { + unreachable!(); + }; + todo!("handle connect with different enum types"); + } + CanonicalType::Bundle(lhs_ty) => { + let CanonicalType::Bundle(rhs_ty) = Expr::ty(rhs) else { + unreachable!(); + }; + assert_eq!(lhs_ty.fields().len(), rhs_ty.fields().len()); + let lhs = Expr::::from_canonical(lhs); + let rhs = Expr::::from_canonical(rhs); + for ( + field_index, + ( + BundleField { + name, + flipped, + ty: _, + }, + rhs_field, + ), + ) in lhs_ty.fields().into_iter().zip(rhs_ty.fields()).enumerate() + { + assert_eq!(name, rhs_field.name); + assert_eq!(flipped, rhs_field.flipped); + let lhs_expr = ops::FieldAccess::new_by_index(lhs, field_index).to_expr(); + let rhs_expr = ops::FieldAccess::new_by_index(rhs, field_index).to_expr(); + if flipped { + // swap lhs/rhs + self.compile_connect( + rhs_instantiated_module, + rhs_conditions, + rhs_expr, + lhs_instantiated_module, + lhs_conditions, + lhs_expr, + source_location, + ); + } else { + self.compile_connect( + lhs_instantiated_module, + lhs_conditions, + lhs_expr, + rhs_instantiated_module, + rhs_conditions, + rhs_expr, + source_location, + ); + } + } + return; + } + CanonicalType::AsyncReset(_) => unreachable!(), + CanonicalType::SyncReset(_) => unreachable!(), + CanonicalType::Reset(_) => unreachable!(), + CanonicalType::Clock(_) => unreachable!(), + CanonicalType::PhantomConst(_) => unreachable!("PhantomConst mismatch"), + } + } + let Some(target) = lhs.target() else { + unreachable!("connect lhs must have target"); + }; + let lhs_decl_conditions = self.decl_conditions[&TargetInInstantiatedModule { + instantiated_module: lhs_instantiated_module, + target: target.base().into(), + }]; + let lhs = self.compile_expr(lhs_instantiated_module, lhs); + let rhs = self.compile_expr(rhs_instantiated_module, rhs); + let rhs = self.compiled_expr_to_value(rhs, source_location); + self.compile_simple_connect( + lhs_conditions[lhs_decl_conditions.len()..].intern(), + lhs, + rhs, + source_location, + ); + } + fn compile_clock( + &mut self, + clk: CompiledValue, + source_location: SourceLocation, + ) -> ClockTrigger { + if let Some(&retval) = self.compiled_value_to_clock_trigger_map.get(&clk) { + return retval; + } + let mut alloc_small_slot = |part_name: &str| { + self.insns + .state_layout + .ty + .small_slots + .allocate(&StatePartLayout::scalar( + SlotDebugData { + name: Interned::default(), + ty: Bool.canonical(), + }, + (), + )) + .start + }; + let last_clk_was_low = alloc_small_slot("last_clk_was_low"); + let clk_triggered = alloc_small_slot("clk_triggered"); + let retval = ClockTrigger { + last_clk_was_low, + clk: self.compiled_value_bool_dest_is_small( + clk.map_ty(CanonicalType::Clock), + source_location, + ), + clk_triggered, + source_location, + }; + self.add_assignment( + Interned::default(), + [Insn::AndSmall { + dest: clk_triggered, + lhs: retval.clk, + rhs: last_clk_was_low, + }], + source_location, + ); + self.clock_triggers.push(retval); + self.compiled_value_to_clock_trigger_map.insert(clk, retval); + retval + } + fn compile_enum_discriminant( + &mut self, + enum_value: CompiledValue, + source_location: SourceLocation, + ) -> StatePartIndex { + if let Some(&retval) = self.enum_discriminants.get(&enum_value) { + return retval; + } + let retval_ty = Enum::new( + enum_value + .layout + .ty + .variants() + .iter() + .map(|variant| EnumVariant { + name: variant.name, + ty: None, + }) + .collect(), + ); + let retval = if retval_ty == enum_value.layout.ty + && enum_value.range.len() == TypeLen::A_SMALL_SLOT + { + enum_value.range.small_slots.start + } else { + let retval = self + .insns + .state_layout + .ty + .small_slots + .allocate(&StatePartLayout::scalar( + SlotDebugData { + name: Interned::default(), + ty: retval_ty.canonical(), + }, + (), + )) + .start; + let discriminant_bit_width = enum_value.layout.ty.discriminant_bit_width(); + let discriminant_mask = !(!0u64 << discriminant_bit_width); + let insn = match enum_value.range.len() { + TypeLen::A_BIG_SLOT => Insn::AndBigWithSmallImmediate { + dest: retval, + lhs: enum_value.range.big_slots.start, + rhs: discriminant_mask, + }, + TypeLen::A_SMALL_SLOT => { + if discriminant_bit_width == enum_value.layout.ty.type_properties().bit_width { + Insn::CopySmall { + dest: retval, + src: enum_value.range.small_slots.start, + } + } else { + Insn::AndSmallImmediate { + dest: retval, + lhs: enum_value.range.small_slots.start, + rhs: discriminant_mask, + } + } + } + _ => unreachable!(), + }; + self.add_assignment(Interned::default(), [insn], source_location); + retval + }; + self.enum_discriminants.insert(enum_value, retval); + retval + } + fn compile_stmt_reg( + &mut self, + stmt_reg: StmtReg, + instantiated_module: InstantiatedModule, + value: CompiledValue, + ) { + let StmtReg { annotations, reg } = stmt_reg; + let clk = self.compile_expr(instantiated_module, Expr::canonical(reg.clock_domain().clk)); + let clk = self + .compiled_expr_to_value(clk, reg.source_location()) + .map_ty(Clock::from_canonical); + let clk = self.compile_clock(clk, reg.source_location()); + struct Dispatch; + impl ResetTypeDispatch for Dispatch { + type Input = (); + + type Output = bool; + + fn reset(self, _input: Self::Input) -> Self::Output { + unreachable!() + } + + fn sync_reset(self, _input: Self::Input) -> Self::Output { + false + } + + fn async_reset(self, _input: Self::Input) -> Self::Output { + true + } + } + let reset = if let Some(init) = reg.init() { + let init = self.compile_expr(instantiated_module, init); + let init = self.compiled_expr_to_value(init, reg.source_location()); + let rst = + self.compile_expr(instantiated_module, Expr::canonical(reg.clock_domain().rst)); + let rst = self.compiled_expr_to_value(rst, reg.source_location()); + let rst = self.compiled_value_bool_dest_is_small(rst, reg.source_location()); + let is_async = R::dispatch((), Dispatch); + if is_async { + let cond = Expr::canonical(reg.clock_domain().rst.cast_to(Bool)); + let cond = self.compile_expr(instantiated_module, cond); + let cond = self.compiled_expr_to_value(cond, reg.source_location()); + let cond = cond.map_ty(Bool::from_canonical); + // write to the register's current value since asynchronous reset is combinational + let lhs = CompiledValue { + layout: value.layout, + range: value.range, + write: None, + } + .into(); + self.compile_simple_connect( + [Cond { + body: CondBody::IfTrue { cond }, + source_location: reg.source_location(), + }][..] + .intern(), + lhs, + init, + reg.source_location(), + ); + } + Some(RegisterReset { + is_async, + init, + rst, + }) + } else { + None + }; + self.registers.push(Register { + value, + clk_triggered: clk.clk_triggered, + reset, + source_location: reg.source_location(), + }); + } + fn compile_declaration( + &mut self, + declaration: StmtDeclaration, + parent_module: Interned, + conditions: Interned<[Cond]>, + ) -> TraceDecl { + let target_base: TargetBase = match &declaration { + StmtDeclaration::Wire(v) => v.wire.into(), + StmtDeclaration::Reg(v) => v.reg.into(), + StmtDeclaration::RegSync(v) => v.reg.into(), + StmtDeclaration::RegAsync(v) => v.reg.into(), + StmtDeclaration::Instance(v) => v.instance.into(), + }; + let target = TargetInInstantiatedModule { + instantiated_module: *parent_module, + target: target_base.into(), + }; + self.decl_conditions.insert(target, conditions); + let compiled_value = self.compile_value(target); + match declaration { + StmtDeclaration::Wire(StmtWire { annotations, wire }) => {} + StmtDeclaration::Reg(_) => { + unreachable!("Reset types were already replaced by SyncReset or AsyncReset"); + } + StmtDeclaration::RegSync(stmt_reg) => { + self.compile_stmt_reg(stmt_reg, *parent_module, compiled_value) + } + StmtDeclaration::RegAsync(stmt_reg) => { + self.compile_stmt_reg(stmt_reg, *parent_module, compiled_value) + } + StmtDeclaration::Instance(StmtInstance { + annotations, + instance, + }) => { + let inner_instantiated_module = InstantiatedModule::Child { + parent: parent_module, + instance: instance.intern_sized(), + } + .intern_sized(); + let instance_expr = instance.to_expr(); + self.compile_module(inner_instantiated_module); + for (field_index, module_io) in + instance.instantiated().module_io().into_iter().enumerate() + { + let instance_field = + ops::FieldAccess::new_by_index(instance_expr, field_index).to_expr(); + match Expr::flow(instance_field) { + Flow::Source => { + // we need to supply the value to the instance since the + // parent module expects to read from the instance + self.compile_connect( + *parent_module, + conditions, + instance_field, + *inner_instantiated_module, + Interned::default(), + module_io.module_io.to_expr(), + instance.source_location(), + ); + } + Flow::Sink => { + // we need to take the value from the instance since the + // parent module expects to write to the instance + self.compile_connect( + *inner_instantiated_module, + Interned::default(), + module_io.module_io.to_expr(), + *parent_module, + conditions, + instance_field, + instance.source_location(), + ); + } + Flow::Duplex => unreachable!(), + } + } + } + } + self.make_trace_decl(*parent_module, target_base) + } + fn allocate_delay_chain( + &mut self, + len: usize, + layout: &TypeLayout, + first: Option, + last: Option, + mut from_allocation: impl FnMut(TypeIndexRange) -> T, + ) -> Vec { + match (len, first, last) { + (0, _, _) => Vec::new(), + (1, Some(v), _) | (1, None, Some(v)) => vec![v], + (2, Some(first), Some(last)) => vec![first, last], + (len, first, last) => { + let inner_len = len - first.is_some() as usize - last.is_some() as usize; + first + .into_iter() + .chain( + (0..inner_len) + .map(|_| from_allocation(self.insns.allocate_variable(layout))), + ) + .chain(last) + .collect() + } + } + } + fn allocate_delay_chain_small( + &mut self, + len: usize, + ty: CanonicalType, + first: Option>, + last: Option>, + ) -> Vec> { + self.allocate_delay_chain( + len, + &TypeLayout { + small_slots: StatePartLayout::scalar( + SlotDebugData { + name: Interned::default(), + ty, + }, + (), + ), + big_slots: StatePartLayout::empty(), + }, + first, + last, + |range| range.small_slots.start, + ) + } + fn compile_memory_port_rw_helper( + &mut self, + memory: StatePartIndex, + stride: usize, + mut start: usize, + data_layout: CompiledTypeLayout, + mask_layout: CompiledTypeLayout, + mut read: Option>, + mut write: Option>, + ) { + match data_layout.body { + CompiledTypeLayoutBody::Scalar => { + let CompiledTypeLayoutBody::Scalar = mask_layout.body else { + unreachable!(); + }; + let signed = match data_layout.ty { + CanonicalType::UInt(_) => false, + CanonicalType::SInt(_) => true, + CanonicalType::Bool(_) => false, + CanonicalType::Array(_) => unreachable!(), + CanonicalType::Enum(_) => false, + CanonicalType::Bundle(_) => unreachable!(), + CanonicalType::AsyncReset(_) => false, + CanonicalType::SyncReset(_) => false, + CanonicalType::Reset(_) => false, + CanonicalType::Clock(_) => false, + CanonicalType::PhantomConst(_) => unreachable!(), + }; + let width = data_layout.ty.bit_width(); + if let Some(MemoryPortReadInsns { + addr, + en: _, + write_mode: _, + data, + insns, + }) = read + { + insns.push( + match data.len() { + TypeLen::A_BIG_SLOT => { + let dest = data.big_slots.start; + if signed { + Insn::MemoryReadSInt { + dest, + memory, + addr, + stride, + start, + width, + } + } else { + Insn::MemoryReadUInt { + dest, + memory, + addr, + stride, + start, + width, + } + } + } + TypeLen::A_SMALL_SLOT => { + let _dest = data.small_slots.start; + todo!("memory ports' data are always big for now"); + } + _ => unreachable!(), + } + .into(), + ); + } + if let Some(MemoryPortWriteInsns { + addr, + en: _, + write_mode: _, + data, + mask, + insns, + }) = write + { + let end_label = self.insns.new_label(); + insns.push( + match mask.len() { + TypeLen::A_BIG_SLOT => Insn::BranchIfZero { + target: end_label.0, + value: mask.big_slots.start, + }, + TypeLen::A_SMALL_SLOT => Insn::BranchIfSmallZero { + target: end_label.0, + value: mask.small_slots.start, + }, + _ => unreachable!(), + } + .into(), + ); + insns.push( + match data.len() { + TypeLen::A_BIG_SLOT => { + let value = data.big_slots.start; + if signed { + Insn::MemoryWriteSInt { + value, + memory, + addr, + stride, + start, + width, + } + } else { + Insn::MemoryWriteUInt { + value, + memory, + addr, + stride, + start, + width, + } + } + } + TypeLen::A_SMALL_SLOT => { + let _value = data.small_slots.start; + todo!("memory ports' data are always big for now"); + } + _ => unreachable!(), + } + .into(), + ); + insns.push(end_label.into()); + } + } + CompiledTypeLayoutBody::Array { element } => { + let CompiledTypeLayoutBody::Array { + element: mask_element, + } = mask_layout.body + else { + unreachable!(); + }; + let ty = ::from_canonical(data_layout.ty); + let element_bit_width = ty.element().bit_width(); + let element_size = element.layout.len(); + let mask_element_size = mask_element.layout.len(); + for element_index in 0..ty.len() { + self.compile_memory_port_rw_helper( + memory, + stride, + start, + *element, + *mask_element, + read.as_mut().map( + |MemoryPortReadInsns { + addr, + en, + write_mode, + data, + insns, + }| MemoryPortReadInsns { + addr: *addr, + en: *en, + write_mode: *write_mode, + data: data.index_array(element_size, element_index), + insns, + }, + ), + write.as_mut().map( + |MemoryPortWriteInsns { + addr, + en, + write_mode, + data, + mask, + insns, + }| { + MemoryPortWriteInsns { + addr: *addr, + en: *en, + write_mode: *write_mode, + data: data.index_array(element_size, element_index), + mask: mask.index_array(mask_element_size, element_index), + insns, + } + }, + ), + ); + start += element_bit_width; + } + } + CompiledTypeLayoutBody::Bundle { fields } => { + let CompiledTypeLayoutBody::Bundle { + fields: mask_fields, + } = mask_layout.body + else { + unreachable!(); + }; + assert_eq!(fields.len(), mask_fields.len()); + for (field, mask_field) in fields.into_iter().zip(mask_fields) { + let field_index_range = + TypeIndexRange::new(field.offset, field.ty.layout.len()); + let mask_field_index_range = + TypeIndexRange::new(mask_field.offset, mask_field.ty.layout.len()); + self.compile_memory_port_rw_helper( + memory, + stride, + start, + field.ty, + mask_field.ty, + read.as_mut().map( + |MemoryPortReadInsns { + addr, + en, + write_mode, + data, + insns, + }| MemoryPortReadInsns { + addr: *addr, + en: *en, + write_mode: *write_mode, + data: data.slice(field_index_range), + insns, + }, + ), + write.as_mut().map( + |MemoryPortWriteInsns { + addr, + en, + write_mode, + data, + mask, + insns, + }| { + MemoryPortWriteInsns { + addr: *addr, + en: *en, + write_mode: *write_mode, + data: data.slice(field_index_range), + mask: mask.slice(mask_field_index_range), + insns, + } + }, + ), + ); + start = start + field.ty.ty.bit_width(); + } + } + } + } + fn compile_memory_port_rw( + &mut self, + memory: StatePartIndex, + data_layout: CompiledTypeLayout, + mask_layout: CompiledTypeLayout, + mut read: Option>, + mut write: Option>, + ) { + let read_else_label = read.as_mut().map( + |MemoryPortReadInsns { + addr: _, + en, + write_mode, + data: _, + insns, + }| { + let else_label = self.insns.new_label(); + insns.push( + Insn::BranchIfSmallZero { + target: else_label.0, + value: *en, + } + .into(), + ); + if let Some(write_mode) = *write_mode { + insns.push( + Insn::BranchIfSmallNonZero { + target: else_label.0, + value: write_mode, + } + .into(), + ); + } + else_label + }, + ); + let write_end_label = write.as_mut().map( + |MemoryPortWriteInsns { + addr: _, + en, + write_mode, + data: _, + mask: _, + insns, + }| { + let end_label = self.insns.new_label(); + insns.push( + Insn::BranchIfSmallZero { + target: end_label.0, + value: *en, + } + .into(), + ); + if let Some(write_mode) = *write_mode { + insns.push( + Insn::BranchIfSmallZero { + target: end_label.0, + value: write_mode, + } + .into(), + ); + } + end_label + }, + ); + self.compile_memory_port_rw_helper( + memory, + data_layout.ty.bit_width(), + 0, + data_layout, + mask_layout, + read.as_mut().map( + |MemoryPortReadInsns { + addr, + en, + write_mode, + data, + insns, + }| MemoryPortReadInsns { + addr: *addr, + en: *en, + write_mode: *write_mode, + data: *data, + insns: *insns, + }, + ), + write.as_mut().map( + |MemoryPortWriteInsns { + addr, + en, + write_mode, + data, + mask, + insns, + }| MemoryPortWriteInsns { + addr: *addr, + en: *en, + write_mode: *write_mode, + data: *data, + mask: *mask, + insns: *insns, + }, + ), + ); + if let ( + Some(else_label), + Some(MemoryPortReadInsns { + addr: _, + en: _, + write_mode: _, + data, + insns, + }), + ) = (read_else_label, read) + { + let end_label = self.insns.new_label(); + insns.push( + Insn::Branch { + target: end_label.0, + } + .into(), + ); + insns.push(else_label.into()); + let TypeIndexRange { + small_slots, + big_slots, + } = data; + for dest in small_slots.iter() { + insns.push(Insn::ConstSmall { dest, value: 0 }.into()); + } + for dest in big_slots.iter() { + insns.push( + Insn::Const { + dest, + value: BigInt::ZERO.intern_sized(), + } + .into(), + ); + } + insns.push(end_label.into()); + } + if let (Some(end_label), Some(write)) = (write_end_label, write) { + write.insns.push(end_label.into()); + } + } + fn compile_memory( + &mut self, + mem: Mem, + instantiated_module: InstantiatedModule, + conditions: Interned<[Cond]>, + trace_decls: &mut Vec, + ) { + let data_layout = CompiledTypeLayout::get(mem.array_type().element()); + let mask_layout = CompiledTypeLayout::get(mem.array_type().element().mask_type()); + let read_latency_plus_1 = mem + .read_latency() + .checked_add(1) + .expect("read latency too big"); + let write_latency_plus_1 = mem + .write_latency() + .get() + .checked_add(1) + .expect("write latency too big"); + let read_cycle = match mem.read_under_write() { + ReadUnderWrite::Old => 0, + ReadUnderWrite::New => mem.read_latency(), + ReadUnderWrite::Undefined => mem.read_latency() / 2, // something other than Old or New + }; + let memory = self + .insns + .state_layout + .memories + .allocate(&StatePartLayout::scalar( + (), + MemoryData { + array_type: mem.array_type(), + data: mem.initial_value().unwrap_or_else(|| { + Intern::intern_owned(BitVec::repeat( + false, + mem.array_type().type_properties().bit_width, + )) + }), + }, + )) + .start; + let (ports, trace_ports) = mem + .ports() + .iter() + .map(|&port| { + let target_base = TargetBase::MemPort(port); + let target = TargetInInstantiatedModule { + instantiated_module, + target: target_base.into(), + }; + self.decl_conditions.insert(target, conditions); + let TraceDecl::Scope(TraceScope::MemPort(trace_port)) = + self.make_trace_decl(instantiated_module, target_base) + else { + unreachable!(); + }; + let clk = Expr::field(port.to_expr(), "clk"); + let clk = self.compile_expr(instantiated_module, clk); + let clk = self.compiled_expr_to_value(clk, mem.source_location()); + let clk_triggered = self + .compile_clock(clk.map_ty(Clock::from_canonical), mem.source_location()) + .clk_triggered; + let en = Expr::field(port.to_expr(), "en"); + let en = self.compile_expr(instantiated_module, en); + let en = self.compiled_expr_to_value(en, mem.source_location()); + let en = self.compiled_value_bool_dest_is_small(en, mem.source_location()); + let addr = Expr::field(port.to_expr(), "addr"); + let addr = self.compile_expr(instantiated_module, addr); + let addr = self.compiled_expr_to_value(addr, mem.source_location()); + let addr_ty = addr.layout.ty; + let addr = self.compiled_value_to_dyn_array_index( + addr.map_ty(UInt::from_canonical), + mem.source_location(), + ); + let read_data = port.port_kind().rdata_name().map(|name| { + let read_data = + self.compile_expr(instantiated_module, Expr::field(port.to_expr(), name)); + let read_data = self.compiled_expr_to_value(read_data, mem.source_location()); + read_data.range + }); + let write_data = port.port_kind().wdata_name().map(|name| { + let write_data = + self.compile_expr(instantiated_module, Expr::field(port.to_expr(), name)); + let write_data = self.compiled_expr_to_value(write_data, mem.source_location()); + write_data.range + }); + let write_mask = port.port_kind().wmask_name().map(|name| { + let write_mask = + self.compile_expr(instantiated_module, Expr::field(port.to_expr(), name)); + let write_mask = self.compiled_expr_to_value(write_mask, mem.source_location()); + write_mask.range + }); + let write_mode = port.port_kind().wmode_name().map(|name| { + let write_mode = + self.compile_expr(instantiated_module, Expr::field(port.to_expr(), name)); + let write_mode = self.compiled_expr_to_value(write_mode, mem.source_location()); + self.compiled_value_bool_dest_is_small(write_mode, mem.source_location()) + }); + struct PortParts { + en_delayed_len: usize, + addr_delayed_len: usize, + read_data_delayed_len: usize, + write_data_delayed_len: usize, + write_mask_delayed_len: usize, + write_mode_delayed_len: usize, + read_cycle: Option, + write_cycle: Option, + } + let PortParts { + en_delayed_len, + addr_delayed_len, + read_data_delayed_len, + write_data_delayed_len, + write_mask_delayed_len, + write_mode_delayed_len, + read_cycle, + write_cycle, + } = match port.port_kind() { + PortKind::ReadOnly => PortParts { + en_delayed_len: read_cycle + 1, + addr_delayed_len: read_cycle + 1, + read_data_delayed_len: read_latency_plus_1 - read_cycle, + write_data_delayed_len: 0, + write_mask_delayed_len: 0, + write_mode_delayed_len: 0, + read_cycle: Some(read_cycle), + write_cycle: None, + }, + PortKind::WriteOnly => PortParts { + en_delayed_len: write_latency_plus_1, + addr_delayed_len: write_latency_plus_1, + read_data_delayed_len: 0, + write_data_delayed_len: write_latency_plus_1, + write_mask_delayed_len: write_latency_plus_1, + write_mode_delayed_len: 0, + read_cycle: None, + write_cycle: Some(mem.write_latency().get()), + }, + PortKind::ReadWrite => { + let can_rw_at_end = match mem.read_under_write() { + ReadUnderWrite::Old => false, + ReadUnderWrite::New | ReadUnderWrite::Undefined => true, + }; + let latency_plus_1 = read_latency_plus_1; + if latency_plus_1 != write_latency_plus_1 || !can_rw_at_end { + todo!( + "not sure what to do, issue: \ + https://github.com/chipsalliance/firrtl-spec/issues/263" + ); + } + PortParts { + en_delayed_len: latency_plus_1, + addr_delayed_len: latency_plus_1, + read_data_delayed_len: 1, + write_data_delayed_len: latency_plus_1, + write_mask_delayed_len: latency_plus_1, + write_mode_delayed_len: latency_plus_1, + read_cycle: Some(latency_plus_1 - 1), + write_cycle: Some(latency_plus_1 - 1), + } + } + }; + let addr_delayed = self.allocate_delay_chain_small( + addr_delayed_len, + addr_ty.canonical(), + Some(addr), + None, + ); + let en_delayed = self.allocate_delay_chain_small( + en_delayed_len, + Bool.canonical(), + Some(en), + None, + ); + let read_data_delayed = self.allocate_delay_chain( + read_data_delayed_len, + &data_layout.layout, + None, + read_data, + |v| v, + ); + let write_data_delayed = self.allocate_delay_chain( + write_data_delayed_len, + &data_layout.layout, + write_data, + None, + |v| v, + ); + let write_mask_delayed = self.allocate_delay_chain( + write_mask_delayed_len, + &mask_layout.layout, + write_mask, + None, + |v| v, + ); + let write_mode_delayed = self.allocate_delay_chain_small( + write_mode_delayed_len, + Bool.canonical(), + write_mode, + None, + ); + let mut read_insns = Vec::new(); + let mut write_insns = Vec::new(); + self.compile_memory_port_rw( + memory, + data_layout, + mask_layout, + read_cycle.map(|read_cycle| MemoryPortReadInsns { + addr: addr_delayed[read_cycle], + en: en_delayed[read_cycle], + write_mode: write_mode_delayed.get(read_cycle).copied(), + data: read_data_delayed[0], + insns: &mut read_insns, + }), + write_cycle.map(|write_cycle| MemoryPortWriteInsns { + addr: addr_delayed[write_cycle], + en: en_delayed[write_cycle], + write_mode: write_mode_delayed.get(write_cycle).copied(), + data: write_data_delayed[write_cycle], + mask: write_mask_delayed[write_cycle], + insns: &mut write_insns, + }), + ); + self.add_assignment(Interned::default(), read_insns, mem.source_location()); + ( + MemoryPort { + clk_triggered, + addr_delayed, + en_delayed, + data_layout, + read_data_delayed, + write_data_delayed, + write_mask_delayed, + write_mode_delayed, + write_insns, + }, + trace_port, + ) + }) + .unzip(); + let name = mem.scoped_name().1.0; + let id = TraceMemoryId(self.memories.len()); + let stride = mem.array_type().element().bit_width(); + let trace = TraceMem { + id, + name, + stride, + element_type: self + .make_trace_decl_child( + instantiated_module, + MakeTraceDeclTarget::Memory { + id, + depth: mem.array_type().len(), + stride, + start: 0, + ty: mem.array_type().element(), + }, + name, + mem.source_location(), + ) + .intern_sized(), + ports: Intern::intern_owned(trace_ports), + array_type: mem.array_type(), + }; + trace_decls.push(trace.into()); + self.memories.push(Memory { + mem, + memory, + trace, + ports, + }); + } + fn compile_block( + &mut self, + parent_module: Interned, + block: Block, + conditions: Interned<[Cond]>, + trace_decls: &mut Vec, + ) { + let Block { memories, stmts } = block; + for memory in memories { + self.compile_memory(memory, *parent_module, conditions, trace_decls); + } + for stmt in stmts { + match stmt { + Stmt::Connect(StmtConnect { + lhs, + rhs, + source_location, + }) => self.compile_connect( + *parent_module, + conditions, + lhs, + *parent_module, + conditions, + rhs, + source_location, + ), + Stmt::Formal(StmtFormal { .. }) => todo!("implement simulating formal statements"), + Stmt::If(StmtIf { + cond, + source_location, + blocks: [then_block, else_block], + }) => { + let cond = self.compile_expr(*parent_module, Expr::canonical(cond)); + let cond = self.compiled_expr_to_value(cond, source_location); + let cond = cond.map_ty(Bool::from_canonical); + self.compile_block( + parent_module, + then_block, + Interned::from_iter(conditions.iter().copied().chain([Cond { + body: CondBody::IfTrue { cond }, + source_location, + }])), + trace_decls, + ); + self.compile_block( + parent_module, + else_block, + Interned::from_iter(conditions.iter().copied().chain([Cond { + body: CondBody::IfFalse { cond }, + source_location, + }])), + trace_decls, + ); + } + Stmt::Match(StmtMatch { + expr, + source_location, + blocks, + }) => { + let enum_expr = self.compile_expr(*parent_module, Expr::canonical(expr)); + let enum_expr = self.compiled_expr_to_value(enum_expr, source_location); + let enum_expr = enum_expr.map_ty(Enum::from_canonical); + let discriminant = self.compile_enum_discriminant(enum_expr, source_location); + for (variant_index, block) in blocks.into_iter().enumerate() { + self.compile_block( + parent_module, + block, + Interned::from_iter(conditions.iter().copied().chain([Cond { + body: CondBody::MatchArm { + discriminant, + variant_index, + }, + source_location, + }])), + trace_decls, + ); + } + } + Stmt::Declaration(declaration) => { + trace_decls.push(self.compile_declaration( + declaration, + parent_module, + conditions, + )); + } + } + } + } + fn compile_module(&mut self, module: Interned) -> &CompiledModule { + let mut trace_decls = Vec::new(); + let module_io = module + .leaf_module() + .module_io() + .iter() + .map( + |&AnnotatedModuleIO { + annotations: _, + module_io, + }| { + let target = TargetInInstantiatedModule { + instantiated_module: *module, + target: Target::from(module_io), + }; + self.decl_conditions.insert(target, Interned::default()); + trace_decls.push(self.make_trace_decl(*module, module_io.into())); + self.compile_value(target) + }, + ) + .collect(); + match module.leaf_module().body() { + ModuleBody::Normal(NormalModuleBody { body }) => { + self.compile_block(module, body, Interned::default(), &mut trace_decls); + } + ModuleBody::Extern(ExternModuleBody { + verilog_name: _, + parameters: _, + simulation, + }) => { + let Some(simulation) = simulation else { + panic!( + "can't simulate extern module without extern_module_simulation: {}", + module.leaf_module().source_location() + ); + }; + self.extern_modules.push(CompiledExternModule { + module_io_targets: module + .leaf_module() + .module_io() + .iter() + .map(|v| Target::from(v.module_io)) + .collect(), + module_io, + simulation, + }); + } + } + let hashbrown::hash_map::Entry::Vacant(entry) = self.modules.entry(*module) else { + unreachable!("compiled same instantiated module twice"); + }; + entry.insert(CompiledModule { + module_io, + trace_decls: TraceModule { + name: module.leaf_module().name(), + children: Intern::intern_owned(trace_decls), + }, + }) + } + fn process_assignments(&mut self) { + self.assignments + .finalize(self.insns.state_layout().ty.clone().into()); + if let Some(DebugOpaque(dump_assignments_dot)) = &self.dump_assignments_dot { + let graph = + petgraph::graph::DiGraph::<_, _, usize>::from_elements(self.assignments.elements()); + dump_assignments_dot(&petgraph::dot::Dot::new(&graph)); + } + let assignments_order: Vec<_> = match petgraph::algo::toposort(&self.assignments, None) { + Ok(nodes) => nodes + .into_iter() + .filter_map(|n| match n { + AssignmentOrSlotIndex::AssignmentIndex(v) => Some(v), + _ => None, + }) + .collect(), + Err(e) => match e.node_id() { + AssignmentOrSlotIndex::AssignmentIndex(assignment_index) => panic!( + "combinatorial logic cycle detected at: {}", + self.assignments.assignments()[assignment_index].source_location, + ), + AssignmentOrSlotIndex::SmallSlot(slot) => panic!( + "combinatorial logic cycle detected through: {}", + self.insns.state_layout().ty.small_slots.debug_data[slot.as_usize()].name, + ), + AssignmentOrSlotIndex::BigSlot(slot) => panic!( + "combinatorial logic cycle detected through: {}", + self.insns.state_layout().ty.big_slots.debug_data[slot.as_usize()].name, + ), + }, + }; + struct CondStackEntry<'a> { + cond: &'a Cond, + end_label: Label, + } + let mut cond_stack = Vec::>::new(); + for assignment_index in assignments_order { + let Assignment { + inputs: _, + outputs: _, + conditions, + insns, + source_location, + } = &self.assignments.assignments()[assignment_index]; + let mut same_len = 0; + for (index, (entry, cond)) in cond_stack.iter().zip(conditions).enumerate() { + if entry.cond != cond { + break; + } + same_len = index + 1; + } + while cond_stack.len() > same_len { + let CondStackEntry { cond: _, end_label } = + cond_stack.pop().expect("just checked len"); + self.insns.define_label_at_next_insn(end_label); + } + for cond in &conditions[cond_stack.len()..] { + let end_label = self.insns.new_label(); + match cond.body { + CondBody::IfTrue { cond: cond_value } + | CondBody::IfFalse { cond: cond_value } => { + let (branch_if_zero, branch_if_non_zero) = match cond_value.range.len() { + TypeLen::A_SMALL_SLOT => ( + Insn::BranchIfSmallZero { + target: end_label.0, + value: cond_value.range.small_slots.start, + }, + Insn::BranchIfSmallNonZero { + target: end_label.0, + value: cond_value.range.small_slots.start, + }, + ), + TypeLen::A_BIG_SLOT => ( + Insn::BranchIfZero { + target: end_label.0, + value: cond_value.range.big_slots.start, + }, + Insn::BranchIfNonZero { + target: end_label.0, + value: cond_value.range.big_slots.start, + }, + ), + _ => unreachable!(), + }; + self.insns.push( + if let CondBody::IfTrue { .. } = cond.body { + branch_if_zero + } else { + branch_if_non_zero + }, + cond.source_location, + ); + } + CondBody::MatchArm { + discriminant, + variant_index, + } => { + self.insns.push( + Insn::BranchIfSmallNeImmediate { + target: end_label.0, + lhs: discriminant, + rhs: variant_index as _, + }, + cond.source_location, + ); + } + } + cond_stack.push(CondStackEntry { cond, end_label }); + } + self.insns.extend(insns.iter().copied(), *source_location); + } + for CondStackEntry { cond: _, end_label } in cond_stack { + self.insns.define_label_at_next_insn(end_label); + } + } + fn process_clocks(&mut self) -> Interned<[StatePartIndex]> { + mem::take(&mut self.clock_triggers) + .into_iter() + .map( + |ClockTrigger { + last_clk_was_low, + clk, + clk_triggered, + source_location, + }| { + self.insns.push( + Insn::XorSmallImmediate { + dest: last_clk_was_low, + lhs: clk, + rhs: 1, + }, + source_location, + ); + clk_triggered + }, + ) + .collect() + } + fn process_registers(&mut self) { + for Register { + value, + clk_triggered, + reset, + source_location, + } in mem::take(&mut self.registers) + { + match reset { + Some(RegisterReset { + is_async, + init, + rst, + }) => { + let reg_end = self.insns.new_label(); + let reg_reset = self.insns.new_label(); + let branch_if_reset = Insn::BranchIfSmallNonZero { + target: reg_reset.0, + value: rst, + }; + let branch_if_not_triggered = Insn::BranchIfSmallZero { + target: reg_end.0, + value: clk_triggered, + }; + if is_async { + self.insns.push(branch_if_reset, source_location); + self.insns.push(branch_if_not_triggered, source_location); + } else { + self.insns.push(branch_if_not_triggered, source_location); + self.insns.push(branch_if_reset, source_location); + } + self.insns.extend( + value.range.insns_for_copy_from(value.write_value().range), + source_location, + ); + self.insns + .push(Insn::Branch { target: reg_end.0 }, source_location); + self.insns.define_label_at_next_insn(reg_reset); + self.insns + .extend(value.range.insns_for_copy_from(init.range), source_location); + self.insns.define_label_at_next_insn(reg_end); + } + None => { + let reg_end = self.insns.new_label(); + self.insns.push( + Insn::BranchIfSmallZero { + target: reg_end.0, + value: clk_triggered, + }, + source_location, + ); + self.insns.extend( + value.range.insns_for_copy_from(value.write_value().range), + source_location, + ); + self.insns.define_label_at_next_insn(reg_end); + } + } + } + } + fn process_memories(&mut self) { + for memory_index in 0..self.memories.len() { + let Memory { + mem, + memory: _, + trace: _, + ref mut ports, + } = self.memories[memory_index]; + for MemoryPort { + clk_triggered, + addr_delayed, + en_delayed, + data_layout: _, + read_data_delayed, + write_data_delayed, + write_mask_delayed, + write_mode_delayed, + write_insns, + } in mem::take(ports) + { + let port_end = self.insns.new_label(); + let small_shift_reg = + |this: &mut Self, values: &[StatePartIndex]| { + for pair in values.windows(2).rev() { + this.insns.push( + Insn::CopySmall { + dest: pair[1], + src: pair[0], + }, + mem.source_location(), + ); + } + }; + let shift_reg = |this: &mut Self, values: &[TypeIndexRange]| { + for pair in values.windows(2).rev() { + this.insns + .extend(pair[0].insns_for_copy_to(pair[1]), mem.source_location()); + } + }; + self.insns.push( + Insn::BranchIfSmallZero { + target: port_end.0, + value: clk_triggered, + }, + mem.source_location(), + ); + small_shift_reg(self, &addr_delayed); + small_shift_reg(self, &en_delayed); + shift_reg(self, &write_data_delayed); + shift_reg(self, &write_mask_delayed); + small_shift_reg(self, &write_mode_delayed); + shift_reg(self, &read_data_delayed); + self.insns.extend(write_insns, mem.source_location()); + self.insns.define_label_at_next_insn(port_end); + } + } + } + pub fn compile(mut self) -> Compiled { + let base_module = + *self.compile_module(InstantiatedModule::Base(self.base_module).intern_sized()); + self.process_assignments(); + self.process_registers(); + self.process_memories(); + let clocks_triggered = self.process_clocks(); + self.insns + .push(Insn::Return, self.base_module.source_location()); + Compiled { + insns: Insns::from(self.insns).intern_sized(), + base_module, + extern_modules: Intern::intern_owned(self.extern_modules), + io: Instance::new_unchecked( + ScopedNameId( + NameId("".intern(), Id::new()), + self.original_base_module.name_id(), + ), + self.original_base_module, + self.original_base_module.source_location(), + ), + traces: SimTraces(Intern::intern_owned(self.traces.0)), + trace_memories: Interned::from_iter(self.memories.iter().map( + |&Memory { + mem: _, + memory, + trace, + ports: _, + }| (memory, trace), + )), + clocks_triggered, + } + } +} + +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] +pub(crate) struct CompiledModule { + pub(crate) module_io: Interned<[CompiledValue]>, + pub(crate) trace_decls: TraceModule, +} + +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] +pub struct Compiled { + pub(crate) insns: Interned>, + pub(crate) base_module: CompiledModule, + pub(crate) extern_modules: Interned<[CompiledExternModule]>, + pub(crate) io: Instance, + pub(crate) traces: SimTraces]>>, + pub(crate) trace_memories: Interned<[(StatePartIndex, TraceMem)]>, + pub(crate) clocks_triggered: Interned<[StatePartIndex]>, +} + +impl Compiled { + pub fn new(module: Interned>) -> Self { + Self::from_canonical(Compiler::new(module.canonical().intern()).compile()) + } + pub fn canonical(self) -> Compiled { + let Self { + insns, + base_module, + extern_modules, + io, + traces, + trace_memories, + clocks_triggered, + } = self; + Compiled { + insns, + base_module, + extern_modules, + io: Instance::from_canonical(io.canonical()), + traces, + trace_memories, + clocks_triggered, + } + } + pub fn from_canonical(canonical: Compiled) -> Self { + let Compiled { + insns, + base_module, + extern_modules, + io, + traces, + trace_memories, + clocks_triggered, + } = canonical; + Self { + insns, + base_module, + extern_modules, + io: Instance::from_canonical(io.canonical()), + traces, + trace_memories, + clocks_triggered, + } + } +} From 6d36698adfd854f2c21508eef9ccdd575a6881ea Mon Sep 17 00:00:00 2001 From: Jacob Lifshay Date: Tue, 26 Aug 2025 19:23:21 -0700 Subject: [PATCH 2/3] move public paths of sim::{Compiled,Compiler} to sim::compiler --- crates/fayalite/src/sim.rs | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/crates/fayalite/src/sim.rs b/crates/fayalite/src/sim.rs index 596e323..d91427f 100644 --- a/crates/fayalite/src/sim.rs +++ b/crates/fayalite/src/sim.rs @@ -17,7 +17,8 @@ use crate::{ reset::ResetType, sim::{ compiler::{ - CompiledBundleField, CompiledExternModule, CompiledTypeLayoutBody, CompiledValue, + Compiled, CompiledBundleField, CompiledExternModule, CompiledTypeLayoutBody, + CompiledValue, }, interpreter::{ BreakAction, BreakpointsSet, RunResult, SmallUInt, State, StatePartIndex, @@ -47,14 +48,12 @@ use std::{ task::Poll, }; -mod compiler; +pub mod compiler; mod interpreter; pub mod time; pub mod value; pub mod vcd; -pub use compiler::{Compiled, Compiler}; - #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct TraceScalarId(usize); From 9b12ff54e680a4fde8d8f762ee098bd470acfa60 Mon Sep 17 00:00:00 2001 From: Jacob Lifshay Date: Mon, 1 Sep 2025 04:46:24 -0700 Subject: [PATCH 3/3] WIP making progress --- .../src/hdl_bundle.rs | 75 +- .../fayalite-proc-macros-impl/src/hdl_enum.rs | 64 +- crates/fayalite/src/array.rs | 66 +- crates/fayalite/src/bundle.rs | 199 +++--- crates/fayalite/src/clock.rs | 36 +- crates/fayalite/src/enum_.rs | 167 +++-- crates/fayalite/src/expr/ops.rs | 3 +- crates/fayalite/src/firrtl.rs | 495 +++++++------ crates/fayalite/src/int.rs | 93 ++- crates/fayalite/src/int/uint_in_range.rs | 51 +- crates/fayalite/src/memory.rs | 3 +- crates/fayalite/src/module.rs | 3 +- crates/fayalite/src/phantom_const.rs | 24 +- crates/fayalite/src/reset.rs | 36 +- crates/fayalite/src/sim.rs | 73 +- crates/fayalite/src/sim/value.rs | 510 ++++++++++++-- .../src/sim/value/sim_only_value_unsafe.rs | 357 ++++++++++ crates/fayalite/src/sim/vcd.rs | 9 + crates/fayalite/src/ty.rs | 649 +++++++++++++++++- crates/fayalite/src/ty/serde_impls.rs | 5 + crates/fayalite/src/util.rs | 2 +- crates/fayalite/src/util/misc.rs | 19 + 22 files changed, 2326 insertions(+), 613 deletions(-) create mode 100644 crates/fayalite/src/sim/value/sim_only_value_unsafe.rs diff --git a/crates/fayalite-proc-macros-impl/src/hdl_bundle.rs b/crates/fayalite-proc-macros-impl/src/hdl_bundle.rs index d881ecd..538c2da 100644 --- a/crates/fayalite-proc-macros-impl/src/hdl_bundle.rs +++ b/crates/fayalite-proc-macros-impl/src/hdl_bundle.rs @@ -674,23 +674,24 @@ impl ToTokens for ParsedBundle { } }, )); - let sim_value_from_bits_fields = Vec::from_iter(fields.named().into_iter().map(|field| { - let ident: &Ident = field.ident().as_ref().unwrap(); - quote_spanned! {span=> - #ident: v.field_from_bits(), - } - })); - let sim_value_clone_from_bits_fields = + let sim_value_from_opaque_fields = Vec::from_iter(fields.named().into_iter().map(|field| { let ident: &Ident = field.ident().as_ref().unwrap(); quote_spanned! {span=> - v.field_clone_from_bits(&mut value.#ident); + #ident: v.field_from_opaque(), } })); - let sim_value_to_bits_fields = Vec::from_iter(fields.named().into_iter().map(|field| { + let sim_value_clone_from_opaque_fields = + Vec::from_iter(fields.named().into_iter().map(|field| { + let ident: &Ident = field.ident().as_ref().unwrap(); + quote_spanned! {span=> + v.field_clone_from_opaque(&mut value.#ident); + } + })); + let sim_value_to_opaque_fields = Vec::from_iter(fields.named().into_iter().map(|field| { let ident: &Ident = field.ident().as_ref().unwrap(); quote_spanned! {span=> - v.field_to_bits(&value.#ident); + v.field(&value.#ident); } })); let to_sim_value_fields = Vec::from_iter(fields.named().into_iter().map(|field| { @@ -745,33 +746,34 @@ impl ToTokens for ParsedBundle { fn source_location() -> ::fayalite::source_location::SourceLocation { ::fayalite::source_location::SourceLocation::caller() } - fn sim_value_from_bits( + fn sim_value_from_opaque( &self, - bits: &::fayalite::bitvec::slice::BitSlice, + opaque: ::fayalite::ty::OpaqueSimValueSlice<'_>, ) -> ::SimValue { #![allow(unused_mut, unused_variables)] - let mut v = ::fayalite::bundle::BundleSimValueFromBits::new(*self, bits); + let mut v = ::fayalite::bundle::BundleSimValueFromOpaque::new(*self, opaque); #mask_type_sim_value_ident { - #(#sim_value_from_bits_fields)* + #(#sim_value_from_opaque_fields)* } } - fn sim_value_clone_from_bits( + fn sim_value_clone_from_opaque( &self, value: &mut ::SimValue, - bits: &::fayalite::bitvec::slice::BitSlice, + opaque: ::fayalite::ty::OpaqueSimValueSlice<'_>, ) { #![allow(unused_mut, unused_variables)] - let mut v = ::fayalite::bundle::BundleSimValueFromBits::new(*self, bits); - #(#sim_value_clone_from_bits_fields)* + let mut v = ::fayalite::bundle::BundleSimValueFromOpaque::new(*self, opaque); + #(#sim_value_clone_from_opaque_fields)* } - fn sim_value_to_bits( + fn sim_value_to_opaque<'__w>( &self, value: &::SimValue, - bits: &mut ::fayalite::bitvec::slice::BitSlice, - ) { + writer: ::fayalite::ty::OpaqueSimValueWriter<'__w>, + ) -> ::fayalite::ty::OpaqueSimValueWritten<'__w> { #![allow(unused_mut, unused_variables)] - let mut v = ::fayalite::bundle::BundleSimValueToBits::new(*self, bits); - #(#sim_value_to_bits_fields)* + let mut v = ::fayalite::bundle::BundleSimValueToOpaque::new(*self, writer); + #(#sim_value_to_opaque_fields)* + v.finish() } } #[automatically_derived] @@ -894,33 +896,34 @@ impl ToTokens for ParsedBundle { fn source_location() -> ::fayalite::source_location::SourceLocation { ::fayalite::source_location::SourceLocation::caller() } - fn sim_value_from_bits( + fn sim_value_from_opaque( &self, - bits: &::fayalite::bitvec::slice::BitSlice, + opaque: ::fayalite::ty::OpaqueSimValueSlice<'_>, ) -> ::SimValue { #![allow(unused_mut, unused_variables)] - let mut v = ::fayalite::bundle::BundleSimValueFromBits::new(*self, bits); + let mut v = ::fayalite::bundle::BundleSimValueFromOpaque::new(*self, opaque); #sim_value_ident { - #(#sim_value_from_bits_fields)* + #(#sim_value_from_opaque_fields)* } } - fn sim_value_clone_from_bits( + fn sim_value_clone_from_opaque( &self, value: &mut ::SimValue, - bits: &::fayalite::bitvec::slice::BitSlice, + opaque: ::fayalite::ty::OpaqueSimValueSlice<'_>, ) { #![allow(unused_mut, unused_variables)] - let mut v = ::fayalite::bundle::BundleSimValueFromBits::new(*self, bits); - #(#sim_value_clone_from_bits_fields)* + let mut v = ::fayalite::bundle::BundleSimValueFromOpaque::new(*self, opaque); + #(#sim_value_clone_from_opaque_fields)* } - fn sim_value_to_bits( + fn sim_value_to_opaque<'__w>( &self, value: &::SimValue, - bits: &mut ::fayalite::bitvec::slice::BitSlice, - ) { + writer: ::fayalite::ty::OpaqueSimValueWriter<'__w>, + ) -> ::fayalite::ty::OpaqueSimValueWritten<'__w> { #![allow(unused_mut, unused_variables)] - let mut v = ::fayalite::bundle::BundleSimValueToBits::new(*self, bits); - #(#sim_value_to_bits_fields)* + let mut v = ::fayalite::bundle::BundleSimValueToOpaque::new(*self, writer); + #(#sim_value_to_opaque_fields)* + v.finish() } } #[automatically_derived] diff --git a/crates/fayalite-proc-macros-impl/src/hdl_enum.rs b/crates/fayalite-proc-macros-impl/src/hdl_enum.rs index a891f5c..e5cbe27 100644 --- a/crates/fayalite-proc-macros-impl/src/hdl_enum.rs +++ b/crates/fayalite-proc-macros-impl/src/hdl_enum.rs @@ -701,18 +701,18 @@ impl ToTokens for ParsedEnum { } }, )); - let sim_value_from_bits_unknown_match_arm = if let Some(sim_value_unknown_variant_name) = + let sim_value_from_opaque_unknown_match_arm = if let Some(sim_value_unknown_variant_name) = &sim_value_unknown_variant_name { quote_spanned! {span=> - _ => #sim_value_ident::#sim_value_unknown_variant_name(v.unknown_variant_from_bits()), + _ => #sim_value_ident::#sim_value_unknown_variant_name(v.unknown_variant_from_opaque()), } } else { quote_spanned! {span=> _ => ::fayalite::__std::unreachable!(), } }; - let sim_value_from_bits_match_arms = Vec::from_iter( + let sim_value_from_opaque_match_arms = Vec::from_iter( variants .iter() .enumerate() @@ -729,29 +729,29 @@ impl ToTokens for ParsedEnum { if let Some(_) = field { quote_spanned! {span=> #index => { - let (field, padding) = v.variant_with_field_from_bits(); + let (field, padding) = v.variant_with_field_from_opaque(); #sim_value_ident::#ident(field, padding) } } } else { quote_spanned! {span=> #index => #sim_value_ident::#ident( - v.variant_no_field_from_bits(), + v.variant_no_field_from_opaque(), ), } } }, ) - .chain([sim_value_from_bits_unknown_match_arm]), + .chain([sim_value_from_opaque_unknown_match_arm]), ); - let sim_value_clone_from_bits_unknown_match_arm = + let sim_value_clone_from_opaque_unknown_match_arm = if let Some(sim_value_unknown_variant_name) = &sim_value_unknown_variant_name { quote_spanned! {span=> _ => if let #sim_value_ident::#sim_value_unknown_variant_name(value) = value { - v.unknown_variant_clone_from_bits(value); + v.unknown_variant_clone_from_opaque(value); } else { *value = #sim_value_ident::#sim_value_unknown_variant_name( - v.unknown_variant_from_bits(), + v.unknown_variant_from_opaque(), ); }, } @@ -760,7 +760,7 @@ impl ToTokens for ParsedEnum { _ => ::fayalite::__std::unreachable!(), } }; - let sim_value_clone_from_bits_match_arms = Vec::from_iter( + let sim_value_clone_from_opaque_match_arms = Vec::from_iter( variants .iter() .enumerate() @@ -777,28 +777,28 @@ impl ToTokens for ParsedEnum { if let Some(_) = field { quote_spanned! {span=> #index => if let #sim_value_ident::#ident(field, padding) = value { - v.variant_with_field_clone_from_bits(field, padding); + v.variant_with_field_clone_from_opaque(field, padding); } else { - let (field, padding) = v.variant_with_field_from_bits(); + let (field, padding) = v.variant_with_field_from_opaque(); *value = #sim_value_ident::#ident(field, padding); }, } } else { quote_spanned! {span=> #index => if let #sim_value_ident::#ident(padding) = value { - v.variant_no_field_clone_from_bits(padding); + v.variant_no_field_clone_from_opaque(padding); } else { *value = #sim_value_ident::#ident( - v.variant_no_field_from_bits(), + v.variant_no_field_from_opaque(), ); }, } } }, ) - .chain([sim_value_clone_from_bits_unknown_match_arm]), + .chain([sim_value_clone_from_opaque_unknown_match_arm]), ); - let sim_value_to_bits_match_arms = Vec::from_iter( + let sim_value_to_opaque_match_arms = Vec::from_iter( variants .iter() .enumerate() @@ -815,13 +815,13 @@ impl ToTokens for ParsedEnum { if let Some(_) = field { quote_spanned! {span=> #sim_value_ident::#ident(field, padding) => { - v.variant_with_field_to_bits(#index, field, padding); + v.variant_with_field_to_opaque(#index, field, padding) } } } else { quote_spanned! {span=> #sim_value_ident::#ident(padding) => { - v.variant_no_field_to_bits(#index, padding); + v.variant_no_field_to_opaque(#index, padding) } } } @@ -831,7 +831,7 @@ impl ToTokens for ParsedEnum { |sim_value_unknown_variant_name| { quote_spanned! {span=> #sim_value_ident::#sim_value_unknown_variant_name(value) => { - v.unknown_variant_to_bits(value); + v.unknown_variant_to_opaque(value) } } }, @@ -878,33 +878,33 @@ impl ToTokens for ParsedEnum { fn source_location() -> ::fayalite::source_location::SourceLocation { ::fayalite::source_location::SourceLocation::caller() } - fn sim_value_from_bits( + fn sim_value_from_opaque( &self, - bits: &::fayalite::bitvec::slice::BitSlice, + opaque: ::fayalite::ty::OpaqueSimValueSlice<'_>, ) -> ::SimValue { - let v = ::fayalite::enum_::EnumSimValueFromBits::new(*self, bits); + let v = ::fayalite::enum_::EnumSimValueFromOpaque::new(*self, opaque); match v.discriminant() { - #(#sim_value_from_bits_match_arms)* + #(#sim_value_from_opaque_match_arms)* } } - fn sim_value_clone_from_bits( + fn sim_value_clone_from_opaque( &self, value: &mut ::SimValue, - bits: &::fayalite::bitvec::slice::BitSlice, + opaque: ::fayalite::ty::OpaqueSimValueSlice<'_>, ) { - let v = ::fayalite::enum_::EnumSimValueFromBits::new(*self, bits); + let v = ::fayalite::enum_::EnumSimValueFromOpaque::new(*self, opaque); match v.discriminant() { - #(#sim_value_clone_from_bits_match_arms)* + #(#sim_value_clone_from_opaque_match_arms)* } } - fn sim_value_to_bits( + fn sim_value_to_opaque<'__w>( &self, value: &::SimValue, - bits: &mut ::fayalite::bitvec::slice::BitSlice, - ) { - let v = ::fayalite::enum_::EnumSimValueToBits::new(*self, bits); + writer: ::fayalite::ty::OpaqueSimValueWriter<'__w>, + ) -> ::fayalite::ty::OpaqueSimValueWritten<'__w> { + let v = ::fayalite::enum_::EnumSimValueToOpaque::new(*self, writer); match value { - #(#sim_value_to_bits_match_arms)* + #(#sim_value_to_opaque_match_arms)* } } } diff --git a/crates/fayalite/src/array.rs b/crates/fayalite/src/array.rs index c953aea..569f2e2 100644 --- a/crates/fayalite/src/array.rs +++ b/crates/fayalite/src/array.rs @@ -12,12 +12,12 @@ use crate::{ sim::value::{SimValue, SimValuePartialEq}, source_location::SourceLocation, ty::{ - CanonicalType, MatchVariantWithoutScope, StaticType, Type, TypeProperties, TypeWithDeref, + CanonicalType, MatchVariantWithoutScope, OpaqueSimValueSlice, OpaqueSimValueWriter, + OpaqueSimValueWritten, StaticType, Type, TypeProperties, TypeWithDeref, serde_impls::SerdeCanonicalType, }, util::ConstUsize, }; -use bitvec::slice::BitSlice; use serde::{Deserialize, Deserializer, Serialize, Serializer, de::Error}; use std::{iter::FusedIterator, ops::Index}; @@ -48,15 +48,20 @@ impl ArrayType { is_storable, is_castable_from_bits, bit_width, + sim_only_values_len, } = element; let Some(bit_width) = bit_width.checked_mul(len) else { panic!("array too big"); }; + let Some(sim_only_values_len) = sim_only_values_len.checked_mul(len) else { + panic!("array too big"); + }; TypeProperties { is_passive, is_storable, is_castable_from_bits, bit_width, + sim_only_values_len, } } pub fn new(element: T, len: Len::SizeType) -> Self { @@ -194,42 +199,51 @@ impl Type for ArrayType { SourceLocation::builtin() } - fn sim_value_from_bits(&self, bits: &BitSlice) -> Self::SimValue { - assert_eq!(bits.len(), self.type_properties.bit_width); - let element = self.element(); - let element_bit_width = element.canonical().bit_width(); - TryFrom::try_from(Vec::from_iter((0..self.len()).map(|i| { - SimValue::from_bitslice(element, &bits[i * element_bit_width..][..element_bit_width]) - }))) - .ok() - .expect("used correct length") + fn sim_value_from_opaque(&self, mut opaque: OpaqueSimValueSlice<'_>) -> Self::SimValue { + let element_ty = self.element(); + let element_size = element_ty.canonical().size(); + let mut value = Vec::with_capacity(self.len()); + for _ in 0..self.len() { + let (element_opaque, rest) = opaque.split_at(element_size); + value.push(SimValue::from_opaque(element_ty, element_opaque.to_owned())); + opaque = rest; + } + value.try_into().ok().expect("used correct length") } - fn sim_value_clone_from_bits(&self, value: &mut Self::SimValue, bits: &BitSlice) { - assert_eq!(bits.len(), self.type_properties.bit_width); + fn sim_value_clone_from_opaque( + &self, + value: &mut Self::SimValue, + mut opaque: OpaqueSimValueSlice<'_>, + ) { let element_ty = self.element(); - let element_bit_width = element_ty.canonical().bit_width(); - let value: &mut [SimValue] = value.as_mut(); + let element_size = element_ty.canonical().size(); + let value = AsMut::<[SimValue]>::as_mut(value); assert_eq!(self.len(), value.len()); - for (i, element_value) in value.iter_mut().enumerate() { + for element_value in value { assert_eq!(SimValue::ty(element_value), element_ty); - SimValue::bits_mut(element_value) - .bits_mut() - .copy_from_bitslice(&bits[i * element_bit_width..][..element_bit_width]); + let (element_opaque, rest) = opaque.split_at(element_size); + SimValue::opaque_mut(element_value).clone_from_slice(element_opaque); + opaque = rest; } } - fn sim_value_to_bits(&self, value: &Self::SimValue, bits: &mut BitSlice) { - assert_eq!(bits.len(), self.type_properties.bit_width); + fn sim_value_to_opaque<'w>( + &self, + value: &Self::SimValue, + mut writer: OpaqueSimValueWriter<'w>, + ) -> OpaqueSimValueWritten<'w> { let element_ty = self.element(); - let element_bit_width = element_ty.canonical().bit_width(); - let value: &[SimValue] = value.as_ref(); + let element_size = element_ty.canonical().size(); + let value = AsRef::<[SimValue]>::as_ref(value); assert_eq!(self.len(), value.len()); - for (i, element_value) in value.iter().enumerate() { + for element_value in value { assert_eq!(SimValue::ty(element_value), element_ty); - bits[i * element_bit_width..][..element_bit_width] - .copy_from_bitslice(SimValue::bits(element_value).bits()); + writer.fill_prefix_with(element_size, |writer| { + writer.fill_cloned_from_slice(SimValue::opaque(element_value).as_slice()) + }); } + writer.fill_cloned_from_slice(OpaqueSimValueSlice::empty()) } } diff --git a/crates/fayalite/src/bundle.rs b/crates/fayalite/src/bundle.rs index 30a70d5..55843ea 100644 --- a/crates/fayalite/src/bundle.rs +++ b/crates/fayalite/src/bundle.rs @@ -11,12 +11,12 @@ use crate::{ sim::value::{SimValue, SimValuePartialEq, ToSimValue, ToSimValueWithType}, source_location::SourceLocation, ty::{ - CanonicalType, MatchVariantWithoutScope, OpaqueSimValue, StaticType, Type, TypeProperties, - TypeWithDeref, impl_match_variant_as_self, + CanonicalType, MatchVariantWithoutScope, OpaqueSimValue, OpaqueSimValueSize, + OpaqueSimValueSlice, OpaqueSimValueWriter, OpaqueSimValueWritten, StaticType, Type, + TypeProperties, TypeWithDeref, impl_match_variant_as_self, }, util::HashMap, }; -use bitvec::{slice::BitSlice, vec::BitVec}; use serde::{Deserialize, Serialize}; use std::{fmt, marker::PhantomData}; @@ -69,7 +69,7 @@ impl fmt::Display for FmtDebugInStruct { struct BundleImpl { fields: Interned<[BundleField]>, name_indexes: HashMap, usize>, - field_offsets: Interned<[usize]>, + field_offsets: Interned<[OpaqueSimValueSize]>, type_properties: TypeProperties, } @@ -89,12 +89,9 @@ impl std::fmt::Debug for BundleImpl { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.write_str("Bundle ")?; f.debug_set() - .entries( - self.fields - .iter() - .enumerate() - .map(|(index, field)| field.fmt_debug_in_struct(self.field_offsets[index])), - ) + .entries(self.fields.iter().enumerate().map(|(index, field)| { + field.fmt_debug_in_struct(self.field_offsets[index].bit_width) + })) .finish() } } @@ -119,6 +116,7 @@ impl BundleTypePropertiesBuilder { is_storable: true, is_castable_from_bits: true, bit_width: 0, + sim_only_values_len: 0, }) } pub const fn clone(&self) -> Self { @@ -126,8 +124,12 @@ impl BundleTypePropertiesBuilder { } #[must_use] pub const fn field(self, flipped: bool, field_props: TypeProperties) -> Self { - let Some(bit_width) = self.0.bit_width.checked_add(field_props.bit_width) else { - panic!("bundle is too big: bit-width overflowed"); + let Some(OpaqueSimValueSize { + bit_width, + sim_only_values_len, + }) = self.0.size().checked_add(field_props.size()) + else { + panic!("bundle is too big: size overflowed"); }; if flipped { Self(TypeProperties { @@ -135,6 +137,7 @@ impl BundleTypePropertiesBuilder { is_storable: false, is_castable_from_bits: false, bit_width, + sim_only_values_len, }) } else { Self(TypeProperties { @@ -143,6 +146,7 @@ impl BundleTypePropertiesBuilder { is_castable_from_bits: self.0.is_castable_from_bits & field_props.is_castable_from_bits, bit_width, + sim_only_values_len, }) } } @@ -167,7 +171,7 @@ impl Bundle { if let Some(old_index) = name_indexes.insert(name, index) { panic!("duplicate field name {name:?}: at both index {old_index} and {index}"); } - field_offsets.push(type_props_builder.0.bit_width); + field_offsets.push(type_props_builder.0.size()); type_props_builder = type_props_builder.field(flipped, ty.type_properties()); } Self(Intern::intern_sized(BundleImpl { @@ -183,7 +187,7 @@ impl Bundle { pub fn field_by_name(&self, name: Interned) -> Option { Some(self.0.fields[*self.0.name_indexes.get(&name)?]) } - pub fn field_offsets(self) -> Interned<[usize]> { + pub fn field_offsets(self) -> Interned<[OpaqueSimValueSize]> { self.0.field_offsets } pub fn type_properties(self) -> TypeProperties { @@ -241,19 +245,27 @@ impl Type for Bundle { fn source_location() -> SourceLocation { SourceLocation::builtin() } - fn sim_value_from_bits(&self, bits: &BitSlice) -> Self::SimValue { - assert_eq!(bits.len(), self.type_properties().bit_width); - OpaqueSimValue::from_bitslice(bits) + fn sim_value_from_opaque(&self, opaque: OpaqueSimValueSlice<'_>) -> Self::SimValue { + assert_eq!(self.type_properties().size(), opaque.size()); + opaque.to_owned() } - fn sim_value_clone_from_bits(&self, value: &mut Self::SimValue, bits: &BitSlice) { - assert_eq!(bits.len(), self.type_properties().bit_width); - assert_eq!(value.bit_width(), self.type_properties().bit_width); - value.bits_mut().bits_mut().copy_from_bitslice(bits); + fn sim_value_clone_from_opaque( + &self, + value: &mut Self::SimValue, + opaque: OpaqueSimValueSlice<'_>, + ) { + assert_eq!(self.type_properties().size(), opaque.size()); + assert_eq!(value.size(), opaque.size()); + value.clone_from_slice(opaque); } - fn sim_value_to_bits(&self, value: &Self::SimValue, bits: &mut BitSlice) { - assert_eq!(bits.len(), self.type_properties().bit_width); - assert_eq!(value.bit_width(), self.type_properties().bit_width); - bits.copy_from_bitslice(value.bits().bits()); + fn sim_value_to_opaque<'w>( + &self, + value: &Self::SimValue, + writer: OpaqueSimValueWriter<'w>, + ) -> OpaqueSimValueWritten<'w> { + assert_eq!(self.type_properties().size(), writer.size()); + assert_eq!(value.size(), writer.size()); + writer.fill_cloned_from_slice(value.as_slice()) } } @@ -263,29 +275,29 @@ pub trait BundleType: Type { fn fields(&self) -> Interned<[BundleField]>; } -pub struct BundleSimValueFromBits<'a> { +pub struct BundleSimValueFromOpaque<'a> { fields: std::slice::Iter<'static, BundleField>, - bits: &'a BitSlice, + opaque: OpaqueSimValueSlice<'a>, } -impl<'a> BundleSimValueFromBits<'a> { +impl<'a> BundleSimValueFromOpaque<'a> { #[track_caller] - pub fn new(bundle_ty: T, bits: &'a BitSlice) -> Self { + pub fn new(bundle_ty: T, opaque: OpaqueSimValueSlice<'a>) -> Self { let fields = bundle_ty.fields(); assert_eq!( - bits.len(), + opaque.size(), fields .iter() - .map(|BundleField { ty, .. }| ty.bit_width()) - .sum::() + .map(|BundleField { ty, .. }| ty.size()) + .sum::() ); Self { fields: Interned::into_inner(fields).iter(), - bits, + opaque, } } #[track_caller] - fn field_ty_and_bits(&mut self) -> (T, &'a BitSlice) { + fn field_ty_and_opaque(&mut self) -> (T, OpaqueSimValueSlice<'a>) { let Some(&BundleField { name: _, flipped: _, @@ -294,59 +306,68 @@ impl<'a> BundleSimValueFromBits<'a> { else { panic!("tried to read too many fields from BundleSimValueFromBits"); }; - let (field_bits, rest) = self.bits.split_at(ty.bit_width()); - self.bits = rest; - (T::from_canonical(ty), field_bits) + let (field_opaque, rest) = self.opaque.split_at(ty.size()); + self.opaque = rest; + (T::from_canonical(ty), field_opaque) } #[track_caller] - pub fn field_from_bits(&mut self) -> SimValue { - let (field_ty, field_bits) = self.field_ty_and_bits::(); - SimValue::from_bitslice(field_ty, field_bits) + pub fn field_from_opaque(&mut self) -> SimValue { + let (field_ty, field_opaque) = self.field_ty_and_opaque::(); + SimValue::from_opaque(field_ty, field_opaque.to_owned()) } #[track_caller] - pub fn field_clone_from_bits(&mut self, field_value: &mut SimValue) { - let (field_ty, field_bits) = self.field_ty_and_bits::(); + pub fn field_clone_from_opaque(&mut self, field_value: &mut SimValue) { + let (field_ty, field_opaque) = self.field_ty_and_opaque::(); assert_eq!(field_ty, SimValue::ty(field_value)); - SimValue::bits_mut(field_value) - .bits_mut() - .copy_from_bitslice(field_bits); + SimValue::opaque_mut(field_value).clone_from_slice(field_opaque); } } -pub struct BundleSimValueToBits<'a> { +pub struct BundleSimValueToOpaque<'a> { fields: std::slice::Iter<'static, BundleField>, - bits: &'a mut BitSlice, + writer: OpaqueSimValueWriter<'a>, } -impl<'a> BundleSimValueToBits<'a> { +impl<'a> BundleSimValueToOpaque<'a> { #[track_caller] - pub fn new(bundle_ty: T, bits: &'a mut BitSlice) -> Self { + pub fn new(bundle_ty: T, writer: OpaqueSimValueWriter<'a>) -> Self { let fields = bundle_ty.fields(); assert_eq!( - bits.len(), + writer.size(), fields .iter() - .map(|BundleField { ty, .. }| ty.bit_width()) - .sum::() + .map(|BundleField { ty, .. }| ty.size()) + .sum::() ); Self { fields: Interned::into_inner(fields).iter(), - bits, + writer, } } #[track_caller] - pub fn field_to_bits(&mut self, field_value: &SimValue) { + pub fn field(&mut self, field_value: &SimValue) { let Some(&BundleField { name: _, flipped: _, ty, }) = self.fields.next() else { - panic!("tried to read too many fields from BundleSimValueFromBits"); + panic!("tried to write too many fields with BundleSimValueToOpaque"); }; assert_eq!(T::from_canonical(ty), SimValue::ty(field_value)); - self.bits[..ty.bit_width()].copy_from_bitslice(SimValue::bits(field_value).bits()); - self.bits = &mut std::mem::take(&mut self.bits)[ty.bit_width()..]; + self.writer.fill_prefix_with(ty.size(), |writer| { + writer.fill_cloned_from_slice(SimValue::opaque(field_value).as_slice()) + }); + } + #[track_caller] + pub fn finish(mut self) -> OpaqueSimValueWritten<'a> { + assert_eq!( + self.fields.next(), + None, + "wrote too few fields with BundleSimValueToOpaque" + ); + self.writer + .fill_cloned_from_slice(OpaqueSimValueSlice::empty()) } } @@ -495,23 +516,32 @@ macro_rules! impl_tuples { fn source_location() -> SourceLocation { SourceLocation::builtin() } - fn sim_value_from_bits(&self, bits: &BitSlice) -> Self::SimValue { + fn sim_value_from_opaque(&self, opaque: OpaqueSimValueSlice<'_>) -> Self::SimValue { #![allow(unused_mut, unused_variables)] - let mut v = BundleSimValueFromBits::new(*self, bits); - $(let $var = v.field_from_bits();)* + let mut v = BundleSimValueFromOpaque::new(*self, opaque); + $(let $var = v.field_from_opaque();)* ($($var,)*) } - fn sim_value_clone_from_bits(&self, value: &mut Self::SimValue, bits: &BitSlice) { + fn sim_value_clone_from_opaque( + &self, + value: &mut Self::SimValue, + opaque: OpaqueSimValueSlice<'_>, + ) { #![allow(unused_mut, unused_variables)] - let mut v = BundleSimValueFromBits::new(*self, bits); + let mut v = BundleSimValueFromOpaque::new(*self, opaque); let ($($var,)*) = value; - $(v.field_clone_from_bits($var);)* + $(v.field_clone_from_opaque($var);)* } - fn sim_value_to_bits(&self, value: &Self::SimValue, bits: &mut BitSlice) { + fn sim_value_to_opaque<'w>( + &self, + value: &Self::SimValue, + writer: OpaqueSimValueWriter<'w>, + ) -> OpaqueSimValueWritten<'w> { #![allow(unused_mut, unused_variables)] - let mut v = BundleSimValueToBits::new(*self, bits); + let mut v = BundleSimValueToOpaque::new(*self, writer); let ($($var,)*) = value; - $(v.field_to_bits($var);)* + $(v.field($var);)* + v.finish() } } impl<$($T: Type,)*> BundleType for ($($T,)*) { @@ -592,12 +622,12 @@ macro_rules! impl_tuples { let [$($ty_var,)*] = *ty.fields() else { panic!("bundle has wrong number of fields"); }; - let mut bits = BitVec::new(); + let mut opaque = OpaqueSimValue::empty(); $(let $var = $var.into_sim_value_with_type($ty_var.ty); assert_eq!(SimValue::ty(&$var), $ty_var.ty); - bits.extend_from_bitslice(SimValue::bits(&$var).bits()); + opaque.extend_from_slice(SimValue::opaque(&$var).as_slice()); )* - bits.into_sim_value_with_type(ty) + SimValue::from_opaque(ty, opaque) } } impl<$($T: ToSimValueWithType<$Ty>, $Ty: Type,)*> ToSimValueWithType<($($Ty,)*)> for ($($T,)*) { @@ -723,15 +753,23 @@ impl Type for PhantomData { fn source_location() -> SourceLocation { SourceLocation::builtin() } - fn sim_value_from_bits(&self, bits: &BitSlice) -> Self::SimValue { - assert!(bits.is_empty()); + fn sim_value_from_opaque(&self, opaque: OpaqueSimValueSlice<'_>) -> Self::SimValue { + assert!(opaque.is_empty()); *self } - fn sim_value_clone_from_bits(&self, _value: &mut Self::SimValue, bits: &BitSlice) { - assert!(bits.is_empty()); + fn sim_value_clone_from_opaque( + &self, + _value: &mut Self::SimValue, + opaque: OpaqueSimValueSlice<'_>, + ) { + assert!(opaque.is_empty()); } - fn sim_value_to_bits(&self, _value: &Self::SimValue, bits: &mut BitSlice) { - assert!(bits.is_empty()); + fn sim_value_to_opaque<'w>( + &self, + _value: &Self::SimValue, + writer: OpaqueSimValueWriter<'w>, + ) -> OpaqueSimValueWritten<'w> { + writer.fill_cloned_from_slice(OpaqueSimValueSlice::empty()) } } @@ -800,18 +838,15 @@ impl ToSimValueWithType for PhantomData { #[track_caller] fn to_sim_value_with_type(&self, ty: Bundle) -> SimValue { assert!(ty.fields().is_empty()); - ToSimValueWithType::into_sim_value_with_type(BitVec::new(), ty) + SimValue::from_opaque(ty, OpaqueSimValue::empty()) } } impl ToSimValueWithType for PhantomData { #[track_caller] - fn to_sim_value_with_type(&self, ty: CanonicalType) -> SimValue { - let ty = Bundle::from_canonical(ty); + fn to_sim_value_with_type(&self, canonical_ty: CanonicalType) -> SimValue { + let ty = Bundle::from_canonical(canonical_ty); assert!(ty.fields().is_empty()); - SimValue::into_canonical(ToSimValueWithType::into_sim_value_with_type( - BitVec::new(), - ty, - )) + SimValue::from_opaque(canonical_ty, OpaqueSimValue::empty()) } } diff --git a/crates/fayalite/src/clock.rs b/crates/fayalite/src/clock.rs index 66b0e20..909edbd 100644 --- a/crates/fayalite/src/clock.rs +++ b/crates/fayalite/src/clock.rs @@ -6,9 +6,12 @@ use crate::{ int::Bool, reset::{Reset, ResetType}, source_location::SourceLocation, - ty::{CanonicalType, StaticType, Type, TypeProperties, impl_match_variant_as_self}, + ty::{ + CanonicalType, OpaqueSimValueSize, OpaqueSimValueSlice, OpaqueSimValueWriter, + OpaqueSimValueWritten, StaticType, Type, TypeProperties, impl_match_variant_as_self, + }, }; -use bitvec::slice::BitSlice; +use bitvec::{bits, order::Lsb0}; #[derive(Copy, Clone, Eq, PartialEq, Hash, Debug, Default)] pub struct Clock; @@ -39,19 +42,29 @@ impl Type for Clock { retval } - fn sim_value_from_bits(&self, bits: &BitSlice) -> Self::SimValue { - assert_eq!(bits.len(), 1); - bits[0] + fn sim_value_from_opaque(&self, opaque: OpaqueSimValueSlice<'_>) -> Self::SimValue { + assert_eq!(opaque.size(), OpaqueSimValueSize::from_bit_width(1)); + opaque.bits()[0] } - fn sim_value_clone_from_bits(&self, value: &mut Self::SimValue, bits: &BitSlice) { - assert_eq!(bits.len(), 1); - *value = bits[0]; + fn sim_value_clone_from_opaque( + &self, + value: &mut Self::SimValue, + opaque: OpaqueSimValueSlice<'_>, + ) { + assert_eq!(opaque.size(), OpaqueSimValueSize::from_bit_width(1)); + *value = opaque.bits()[0]; } - fn sim_value_to_bits(&self, value: &Self::SimValue, bits: &mut BitSlice) { - assert_eq!(bits.len(), 1); - bits.set(0, *value); + fn sim_value_to_opaque<'w>( + &self, + value: &Self::SimValue, + writer: OpaqueSimValueWriter<'w>, + ) -> OpaqueSimValueWritten<'w> { + assert_eq!(writer.size(), OpaqueSimValueSize::from_bit_width(1)); + writer.fill_cloned_from_slice(OpaqueSimValueSlice::from_bitslice( + [bits![0], bits![1]][*value as usize], + )) } } @@ -72,6 +85,7 @@ impl StaticType for Clock { is_storable: false, is_castable_from_bits: true, bit_width: 1, + sim_only_values_len: 0, }; const MASK_TYPE_PROPERTIES: TypeProperties = Bool::TYPE_PROPERTIES; } diff --git a/crates/fayalite/src/enum_.rs b/crates/fayalite/src/enum_.rs index 283e4ff..5fbac9f 100644 --- a/crates/fayalite/src/enum_.rs +++ b/crates/fayalite/src/enum_.rs @@ -16,7 +16,8 @@ use crate::{ sim::value::{SimValue, SimValuePartialEq}, source_location::SourceLocation, ty::{ - CanonicalType, MatchVariantAndInactiveScope, OpaqueSimValue, StaticType, Type, + CanonicalType, MatchVariantAndInactiveScope, OpaqueSimValue, OpaqueSimValueSize, + OpaqueSimValueSlice, OpaqueSimValueWriter, OpaqueSimValueWritten, StaticType, Type, TypeProperties, }, util::HashMap, @@ -120,6 +121,7 @@ impl EnumTypePropertiesBuilder { is_storable: true, is_castable_from_bits: true, bit_width: 0, + sim_only_values_len: 0, }, variant_count: 0, } @@ -138,9 +140,14 @@ impl EnumTypePropertiesBuilder { is_storable, is_castable_from_bits, bit_width, + sim_only_values_len, }) = field_props { assert!(is_passive, "variant type must be a passive type"); + assert!( + sim_only_values_len == 0, + "can't have `SimOnlyValue`s in an Enum" + ); type_properties = TypeProperties { is_passive: true, is_storable: type_properties.is_storable & is_storable, @@ -151,6 +158,7 @@ impl EnumTypePropertiesBuilder { } else { type_properties.bit_width }, + sim_only_values_len: 0, }; } Self { @@ -381,19 +389,27 @@ impl Type for Enum { fn source_location() -> SourceLocation { SourceLocation::builtin() } - fn sim_value_from_bits(&self, bits: &BitSlice) -> Self::SimValue { - assert_eq!(bits.len(), self.type_properties().bit_width); - OpaqueSimValue::from_bitslice(bits) + fn sim_value_from_opaque(&self, opaque: OpaqueSimValueSlice<'_>) -> Self::SimValue { + assert_eq!(self.type_properties().size(), opaque.size()); + opaque.to_owned() } - fn sim_value_clone_from_bits(&self, value: &mut Self::SimValue, bits: &BitSlice) { - assert_eq!(bits.len(), self.type_properties().bit_width); - assert_eq!(value.bit_width(), self.type_properties().bit_width); - value.bits_mut().bits_mut().copy_from_bitslice(bits); + fn sim_value_clone_from_opaque( + &self, + value: &mut Self::SimValue, + opaque: OpaqueSimValueSlice<'_>, + ) { + assert_eq!(self.type_properties().size(), opaque.size()); + assert_eq!(value.size(), opaque.size()); + value.clone_from_slice(opaque); } - fn sim_value_to_bits(&self, value: &Self::SimValue, bits: &mut BitSlice) { - assert_eq!(bits.len(), self.type_properties().bit_width); - assert_eq!(value.bit_width(), self.type_properties().bit_width); - bits.copy_from_bitslice(value.bits().bits()); + fn sim_value_to_opaque<'w>( + &self, + value: &Self::SimValue, + writer: OpaqueSimValueWriter<'w>, + ) -> OpaqueSimValueWritten<'w> { + assert_eq!(self.type_properties().size(), writer.size()); + assert_eq!(value.size(), writer.size()); + writer.fill_cloned_from_slice(value.as_slice()) } } @@ -458,23 +474,30 @@ impl UnknownVariantSimValue { } } -pub struct EnumSimValueFromBits<'a> { +pub struct EnumSimValueFromOpaque<'a> { variants: Interned<[EnumVariant]>, discriminant: usize, body_bits: &'a BitSlice, } -impl<'a> EnumSimValueFromBits<'a> { +impl<'a> EnumSimValueFromOpaque<'a> { #[track_caller] - pub fn new(ty: T, bits: &'a BitSlice) -> Self { + pub fn new(ty: T, opaque: OpaqueSimValueSlice<'a>) -> Self { let variants = ty.variants(); - let bit_width = EnumTypePropertiesBuilder::new() + let size @ OpaqueSimValueSize { + bit_width: _, + sim_only_values_len: 0, + } = EnumTypePropertiesBuilder::new() .variants(variants) .finish() - .bit_width; - assert_eq!(bit_width, bits.len()); - let (discriminant_bits, body_bits) = - bits.split_at(discriminant_bit_width_impl(variants.len())); + .size() + else { + unreachable!(); + }; + assert_eq!(size, opaque.size()); + let (discriminant_bits, body_bits) = opaque + .bits() + .split_at(discriminant_bit_width_impl(variants.len())); let mut discriminant = 0usize; discriminant.view_bits_mut::()[..discriminant_bits.len()] .copy_from_bitslice(discriminant_bits); @@ -517,7 +540,7 @@ impl<'a> EnumSimValueFromBits<'a> { (*ty, variant_bits, padding_bits) } #[track_caller] - pub fn unknown_variant_from_bits(self) -> UnknownVariantSimValue { + pub fn unknown_variant_from_opaque(self) -> UnknownVariantSimValue { let None = self.variants.get(self.discriminant) else { self.usage_error(false); }; @@ -527,7 +550,7 @@ impl<'a> EnumSimValueFromBits<'a> { ) } #[track_caller] - pub fn unknown_variant_clone_from_bits(self, value: &mut UnknownVariantSimValue) { + pub fn unknown_variant_clone_from_opaque(self, value: &mut UnknownVariantSimValue) { let None = self.variants.get(self.discriminant) else { self.usage_error(true); }; @@ -539,14 +562,14 @@ impl<'a> EnumSimValueFromBits<'a> { .copy_from_bitslice(self.body_bits); } #[track_caller] - pub fn variant_no_field_from_bits(self) -> EnumPaddingSimValue { + pub fn variant_no_field_from_opaque(self) -> EnumPaddingSimValue { let (None, _variant_bits, padding_bits) = self.known_variant(false) else { self.usage_error(false); }; EnumPaddingSimValue::from_bitslice(padding_bits) } #[track_caller] - pub fn variant_with_field_from_bits(self) -> (SimValue, EnumPaddingSimValue) { + pub fn variant_with_field_from_opaque(self) -> (SimValue, EnumPaddingSimValue) { let (Some(variant_ty), variant_bits, padding_bits) = self.known_variant(false) else { self.usage_error(false); }; @@ -566,14 +589,14 @@ impl<'a> EnumSimValueFromBits<'a> { } } #[track_caller] - pub fn variant_no_field_clone_from_bits(self, padding: &mut EnumPaddingSimValue) { + pub fn variant_no_field_clone_from_opaque(self, padding: &mut EnumPaddingSimValue) { let (None, _variant_bits, padding_bits) = self.known_variant(true) else { self.usage_error(true); }; Self::clone_padding_from_bits(padding, padding_bits); } #[track_caller] - pub fn variant_with_field_clone_from_bits( + pub fn variant_with_field_clone_from_opaque( self, value: &mut SimValue, padding: &mut EnumPaddingSimValue, @@ -589,35 +612,46 @@ impl<'a> EnumSimValueFromBits<'a> { } } -pub struct EnumSimValueToBits<'a> { +pub struct EnumSimValueToOpaque<'a> { variants: Interned<[EnumVariant]>, bit_width: usize, discriminant_bit_width: usize, - bits: &'a mut BitSlice, + writer: OpaqueSimValueWriter<'a>, } -impl<'a> EnumSimValueToBits<'a> { +impl<'a> EnumSimValueToOpaque<'a> { #[track_caller] - pub fn new(ty: T, bits: &'a mut BitSlice) -> Self { + pub fn new(ty: T, writer: OpaqueSimValueWriter<'a>) -> Self { let variants = ty.variants(); - let bit_width = EnumTypePropertiesBuilder::new() + let size @ OpaqueSimValueSize { + bit_width, + sim_only_values_len: 0, + } = EnumTypePropertiesBuilder::new() .variants(variants) .finish() - .bit_width; - assert_eq!(bit_width, bits.len()); + .size() + else { + unreachable!(); + }; + assert_eq!(size, writer.size()); Self { variants, bit_width, discriminant_bit_width: discriminant_bit_width_impl(variants.len()), - bits, + writer, } } #[track_caller] - fn discriminant_to_bits(&mut self, mut discriminant: usize) { + fn write_discriminant(&mut self, mut discriminant: usize) { let orig_discriminant = discriminant; let discriminant_bits = &mut discriminant.view_bits_mut::()[..self.discriminant_bit_width]; - self.bits[..self.discriminant_bit_width].copy_from_bitslice(discriminant_bits); + self.writer.fill_prefix_with( + OpaqueSimValueSize::from_bit_width(self.discriminant_bit_width), + |writer| { + writer.fill_cloned_from_slice(OpaqueSimValueSlice::from_bitslice(discriminant_bits)) + }, + ); discriminant_bits.fill(false); assert!( discriminant == 0, @@ -625,8 +659,11 @@ impl<'a> EnumSimValueToBits<'a> { ); } #[track_caller] - pub fn unknown_variant_to_bits(mut self, value: &UnknownVariantSimValue) { - self.discriminant_to_bits(value.discriminant); + pub fn unknown_variant_to_opaque( + mut self, + value: &UnknownVariantSimValue, + ) -> OpaqueSimValueWritten<'a> { + self.write_discriminant(value.discriminant); let None = self.variants.get(value.discriminant) else { panic!("can't use UnknownVariantSimValue to set known discriminant"); }; @@ -634,45 +671,57 @@ impl<'a> EnumSimValueToBits<'a> { self.bit_width - self.discriminant_bit_width, value.body_bits.width() ); - self.bits[self.discriminant_bit_width..].copy_from_bitslice(value.body_bits.bits()); + self.writer + .fill_cloned_from_slice(OpaqueSimValueSlice::from_bitslice(value.body_bits.bits())) } #[track_caller] fn known_variant( mut self, discriminant: usize, + value: Option<&OpaqueSimValue>, padding: &EnumPaddingSimValue, - ) -> (Option, &'a mut BitSlice) { - self.discriminant_to_bits(discriminant); + ) -> OpaqueSimValueWritten<'a> { + self.write_discriminant(discriminant); let variant_ty = self.variants[discriminant].ty; - let variant_bit_width = variant_ty.map_or(0, CanonicalType::bit_width); - let padding_bits = &mut self.bits[self.discriminant_bit_width..][variant_bit_width..]; - if let Some(padding) = padding.bits() { - assert_eq!(padding.width(), padding_bits.len()); - padding_bits.copy_from_bitslice(padding.bits()); - } else { - padding_bits.fill(false); + let variant_size = variant_ty.map_or(OpaqueSimValueSize::empty(), CanonicalType::size); + if let Some(value) = value { + if variant_ty.is_none() { + panic!("expected variant to have no field"); + } + self.writer.fill_prefix_with(variant_size, |writer| { + writer.fill_cloned_from_slice(value.as_slice()) + }); + } else if variant_ty.is_some() { + panic!("expected variant to have a field"); + } + if let Some(padding) = padding.bits() { + assert_eq!(padding.ty().type_properties().size(), self.writer.size()); + self.writer + .fill_cloned_from_slice(OpaqueSimValueSlice::from_bitslice(padding.bits())) + } else { + self.writer.fill_with_zeros() } - let variant_bits = &mut self.bits[self.discriminant_bit_width..][..variant_bit_width]; - (variant_ty, variant_bits) } #[track_caller] - pub fn variant_no_field_to_bits(self, discriminant: usize, padding: &EnumPaddingSimValue) { - let (None, _variant_bits) = self.known_variant(discriminant, padding) else { - panic!("expected variant to have no field"); - }; + pub fn variant_no_field_to_opaque( + self, + discriminant: usize, + padding: &EnumPaddingSimValue, + ) -> OpaqueSimValueWritten<'a> { + self.known_variant(discriminant, None, padding) } #[track_caller] - pub fn variant_with_field_to_bits( + pub fn variant_with_field_to_opaque( self, discriminant: usize, value: &SimValue, padding: &EnumPaddingSimValue, - ) { - let (Some(variant_ty), variant_bits) = self.known_variant(discriminant, padding) else { - panic!("expected variant to have a field"); + ) -> OpaqueSimValueWritten<'a> { + let Some(variant_ty) = self.variants[discriminant].ty else { + panic!("expected variant to have no field"); }; assert_eq!(SimValue::ty(value), T::from_canonical(variant_ty)); - variant_bits.copy_from_bitslice(SimValue::bits(value).bits()); + self.known_variant(discriminant, Some(SimValue::opaque(value)), padding) } } diff --git a/crates/fayalite/src/expr/ops.rs b/crates/fayalite/src/expr/ops.rs index 4f482ab..b10e3ae 100644 --- a/crates/fayalite/src/expr/ops.rs +++ b/crates/fayalite/src/expr/ops.rs @@ -1937,7 +1937,8 @@ impl FieldAccess { let field = Expr::ty(base).fields()[field_index]; let field_type = FieldType::from_canonical(field.ty); let literal_bits = base.to_literal_bits().map(|bits| { - bits[Expr::ty(base).field_offsets()[field_index]..][..field.ty.bit_width()].intern() + bits[Expr::ty(base).field_offsets()[field_index].bit_width..][..field.ty.bit_width()] + .intern() }); let target = base.target().map(|base| { Intern::intern_sized(base.join(TargetPathElement::intern_sized( diff --git a/crates/fayalite/src/firrtl.rs b/crates/fayalite/src/firrtl.rs index b766cf6..abfe518 100644 --- a/crates/fayalite/src/firrtl.rs +++ b/crates/fayalite/src/firrtl.rs @@ -33,7 +33,7 @@ use crate::{ }, reset::{AsyncReset, Reset, ResetType, SyncReset}, source_location::SourceLocation, - ty::{CanonicalType, Type}, + ty::{CanonicalType, OpaqueSimValueSize, Type}, util::{ BitSliceWriteWithBase, DebugAsRawString, GenericConstBool, HashMap, HashSet, const_str_array_is_strictly_ascending, @@ -57,6 +57,43 @@ use std::{ rc::Rc, }; +#[derive(Clone, Debug)] +#[non_exhaustive] +enum FirrtlError { + SimOnlyValuesAreNotPermitted, +} + +impl fmt::Display for FirrtlError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + FirrtlError::SimOnlyValuesAreNotPermitted => { + f.write_str("`SimOnlyValue`s are not permitted") + } + } + } +} + +impl std::error::Error for FirrtlError {} + +enum FirrtlOrWrappedError { + FirrtlError(FirrtlError), + WrappedError(WrappedError), +} + +impl From for FirrtlOrWrappedError { + fn from(value: FirrtlError) -> Self { + Self::FirrtlError(value) + } +} + +impl From for FirrtlOrWrappedError { + fn from(value: WrappedError) -> Self { + Self::WrappedError(value) + } +} + +type Result = std::result::Result; + struct EscapedString<'a> { value: &'a str, raw: bool, @@ -320,20 +357,20 @@ impl DefinitionsMap { map: Default::default(), } } - fn get_or_make<'a>( + fn get_or_make<'a, E>( &'a self, key: K, - make: impl FnOnce(&K, &'a RcDefinitions) -> (Ident, V), - ) -> (Ident, V) + make: impl FnOnce(&K, &'a RcDefinitions) -> Result<(Ident, V), E>, + ) -> Result<(Ident, V), E> where K: Hash + Eq, V: Clone, { if let Some(retval) = self.map.borrow().get(&key) { - return retval.clone(); + return Ok(retval.clone()); } - let value = make(&key, &self.definitions); - self.map.borrow_mut().entry(key).or_insert(value).clone() + let value = make(&key, &self.definitions)?; + Ok(self.map.borrow_mut().entry(key).or_insert(value).clone()) } } @@ -367,10 +404,10 @@ impl TypeState { self.next_type_name.set(id + 1); Ident(Intern::intern_owned(format!("Ty{id}"))) } - fn get_bundle_field(&mut self, ty: Bundle, name: Interned) -> Ident { - self.bundle_ns(ty).borrow_mut().get(name) + fn get_bundle_field(&mut self, ty: Bundle, name: Interned) -> Result { + Ok(self.bundle_ns(ty)?.borrow_mut().get(name)) } - fn bundle_def(&self, ty: Bundle) -> (Ident, Rc>) { + fn bundle_def(&self, ty: Bundle) -> Result<(Ident, Rc>)> { self.bundle_defs.get_or_make(ty, |&ty, definitions| { let mut ns = Namespace::default(); let mut body = String::new(); @@ -383,21 +420,21 @@ impl TypeState { body.push_str("flip "); } write!(body, "{}: ", ns.get(name)).unwrap(); - body.push_str(&self.ty(ty)); + body.push_str(&self.ty(ty)?); } body.push('}'); let name = self.make_type_name(); definitions.add_definition_line(format_args!("type {name} = {body}")); - (name, Rc::new(RefCell::new(ns))) + Ok((name, Rc::new(RefCell::new(ns)))) }) } - fn bundle_ty(&self, ty: Bundle) -> Ident { - self.bundle_def(ty).0 + fn bundle_ty(&self, ty: Bundle) -> Result { + Ok(self.bundle_def(ty)?.0) } - fn bundle_ns(&self, ty: Bundle) -> Rc> { - self.bundle_def(ty).1 + fn bundle_ns(&self, ty: Bundle) -> Result>> { + Ok(self.bundle_def(ty)?.1) } - fn enum_def(&self, ty: Enum) -> (Ident, Rc) { + fn enum_def(&self, ty: Enum) -> Result<(Ident, Rc)> { self.enum_defs.get_or_make(ty, |&ty, definitions| { let mut variants = Namespace::default(); let mut body = String::new(); @@ -409,33 +446,33 @@ impl TypeState { write!(body, "{}", variants.get(name)).unwrap(); if let Some(ty) = ty { body.push_str(": "); - body.push_str(&self.ty(ty)); + body.push_str(&self.ty(ty)?); } } body.push_str("|}"); let name = self.make_type_name(); definitions.add_definition_line(format_args!("type {name} = {body}")); - ( + Ok(( name, Rc::new(EnumDef { variants: RefCell::new(variants), body, }), - ) + )) }) } - fn enum_ty(&self, ty: Enum) -> Ident { - self.enum_def(ty).0 + fn enum_ty(&self, ty: Enum) -> Result { + Ok(self.enum_def(ty)?.0) } - fn get_enum_variant(&mut self, ty: Enum, name: Interned) -> Ident { - self.enum_def(ty).1.variants.borrow_mut().get(name) + fn get_enum_variant(&mut self, ty: Enum, name: Interned) -> Result { + Ok(self.enum_def(ty)?.1.variants.borrow_mut().get(name)) } - fn ty(&self, ty: T) -> String { - match ty.canonical() { - CanonicalType::Bundle(ty) => self.bundle_ty(ty).to_string(), - CanonicalType::Enum(ty) => self.enum_ty(ty).to_string(), + fn ty(&self, ty: T) -> Result { + Ok(match ty.canonical() { + CanonicalType::Bundle(ty) => self.bundle_ty(ty)?.to_string(), + CanonicalType::Enum(ty) => self.enum_ty(ty)?.to_string(), CanonicalType::Array(ty) => { - let mut retval = self.ty(ty.element()); + let mut retval = self.ty(ty.element())?; write!(retval, "[{}]", ty.len()).unwrap(); retval } @@ -447,7 +484,10 @@ impl TypeState { CanonicalType::SyncReset(SyncReset {}) => "UInt<1>".into(), CanonicalType::Reset(Reset {}) => "Reset".into(), CanonicalType::PhantomConst(_) => "{}".into(), - } + CanonicalType::DynSimOnlyValueType(_) => { + return Err(FirrtlError::SimOnlyValuesAreNotPermitted.into()); + } + }) } } @@ -483,6 +523,7 @@ trait WrappedFileBackendTrait { contents: String, ) -> Result<(), WrappedError>; fn simplify_enums_error(&mut self, error: SimplifyEnumsError) -> WrappedError; + fn firrtl_error(&mut self, error: FirrtlError) -> WrappedError; } struct WrappedFileBackend { @@ -545,6 +586,11 @@ impl WrappedFileBackendTrait for WrappedFileBackend { self.error = Err(error.into()); WrappedError } + + fn firrtl_error(&mut self, error: FirrtlError) -> WrappedError { + self.error = Err(self.file_backend.custom_error(Box::new(error))); + WrappedError + } } #[derive(Clone)] @@ -747,7 +793,10 @@ impl<'a> Exporter<'a> { } fn run(&mut self, top_module: Interned>) -> Result<(), WrappedError> { let mut contents = self.version(); - let circuit = self.circuit(top_module)?; + let circuit = self.circuit(top_module).map_err(|e| match e { + FirrtlOrWrappedError::FirrtlError(e) => self.file_backend.firrtl_error(e), + FirrtlOrWrappedError::WrappedError(e) => e, + })?; contents.push_str(&circuit); self.file_backend .write_top_fir_file(self.circuit_name.to_string(), contents) @@ -755,7 +804,7 @@ impl<'a> Exporter<'a> { fn version(&mut self) -> String { "FIRRTL version 3.2.0\n".to_string() } - fn circuit(&mut self, top_module: Interned>) -> Result { + fn circuit(&mut self, top_module: Interned>) -> Result { let indent = self.indent; self.add_module(top_module); let circuit_indent = indent.push(); @@ -785,9 +834,9 @@ impl<'a> Exporter<'a> { enum_ty: Enum, variant_name: Interned, variant_expr: Option, - ) -> String { - let (_, enum_def) = self.type_state.enum_def(enum_ty); - let variant_ident = self.type_state.get_enum_variant(enum_ty, variant_name); + ) -> Result { + let (_, enum_def) = self.type_state.enum_def(enum_ty)?; + let variant_ident = self.type_state.get_enum_variant(enum_ty, variant_name)?; let mut retval = enum_def.body.clone(); write!(retval, "({variant_ident}").unwrap(); if let Some(variant_expr) = variant_expr { @@ -795,7 +844,7 @@ impl<'a> Exporter<'a> { retval.push_str(&variant_expr); } retval.push(')'); - retval + Ok(retval) } fn uint_literal(&mut self, value: &UIntValue) -> String { format!( @@ -824,32 +873,32 @@ impl<'a> Exporter<'a> { to_ty: ToTy, definitions: &RcDefinitions, const_ty: bool, - ) -> String { + ) -> Result { let from_ty = Expr::ty(value); - let mut value = self.expr(Expr::canonical(value), definitions, const_ty); + let mut value = self.expr(Expr::canonical(value), definitions, const_ty)?; if from_ty.width().checked_add(1) == Some(to_ty.width()) && !FromTy::Signed::VALUE && ToTy::Signed::VALUE { - format!("cvt({value})") + Ok(format!("cvt({value})")) } else if from_ty.width() <= to_ty.width() { // must pad before changing type to preserve value modulo 2^to_ty.width if from_ty.width() < to_ty.width() { value = format!("pad({value}, {})", to_ty.width()); } if FromTy::Signed::VALUE == ToTy::Signed::VALUE { - value + Ok(value) } else if ToTy::Signed::VALUE { - format!("asSInt({value})") + Ok(format!("asSInt({value})")) } else { - format!("asUInt({value})") + Ok(format!("asUInt({value})")) } } else { value = format!("tail({value}, {})", from_ty.width() - to_ty.width()); if ToTy::Signed::VALUE { - format!("asSInt({value})") + Ok(format!("asSInt({value})")) } else { - value + Ok(value) } } } @@ -859,12 +908,12 @@ impl<'a> Exporter<'a> { value: Expr, definitions: &RcDefinitions, const_ty: bool, - ) -> String { - let value = self.expr(Expr::canonical(value), definitions, const_ty); + ) -> Result { + let value = self.expr(Expr::canonical(value), definitions, const_ty)?; if let Some(firrtl_cast_fn) = firrtl_cast_fn { - format!("{firrtl_cast_fn}({value})") + Ok(format!("{firrtl_cast_fn}({value})")) } else { - value + Ok(value) } } fn slice( @@ -873,17 +922,17 @@ impl<'a> Exporter<'a> { range: Range, definitions: &RcDefinitions, const_ty: bool, - ) -> String { + ) -> Result { let base_width = Expr::ty(base).width(); - let base = self.expr(Expr::canonical(base), definitions, const_ty); + let base = self.expr(Expr::canonical(base), definitions, const_ty)?; if range.is_empty() { - format!("tail({base}, {base_width})") + Ok(format!("tail({base}, {base_width})")) } else { - format!( + Ok(format!( "bits({base}, {hi}, {lo})", hi = range.end - 1, lo = range.start, - ) + )) } } fn array_literal_expr( @@ -891,29 +940,29 @@ impl<'a> Exporter<'a> { expr: ops::ArrayLiteral, definitions: &RcDefinitions, const_ty: bool, - ) -> String { + ) -> Result { let ident = self.module.ns.make_new("_array_literal_expr"); - let ty_str = self.type_state.ty(expr.ty()); + let ty_str = self.type_state.ty(expr.ty())?; let const_ = if const_ty { "const " } else { "" }; definitions.add_definition_line(format_args!("wire {ident}: {const_}{ty_str}")); for (index, element) in expr.element_values().into_iter().enumerate() { - let element = self.expr(Expr::canonical(element), definitions, const_ty); + let element = self.expr(Expr::canonical(element), definitions, const_ty)?; definitions.add_definition_line(format_args!("connect {ident}[{index}], {element}")); } if expr.element_values().is_empty() { definitions.add_definition_line(format_args!("invalidate {ident}")); } - ident.to_string() + Ok(ident.to_string()) } fn bundle_literal_expr( &mut self, expr: ops::BundleLiteral, definitions: &RcDefinitions, const_ty: bool, - ) -> String { + ) -> Result { let ident = self.module.ns.make_new("_bundle_literal_expr"); let ty = expr.ty(); - let (ty_ident, bundle_ns) = self.type_state.bundle_def(ty); + let (ty_ident, bundle_ns) = self.type_state.bundle_def(ty)?; let const_ = if const_ty { "const " } else { "" }; definitions.add_definition_line(format_args!("wire {ident}: {const_}{ty_ident}")); for ( @@ -930,37 +979,38 @@ impl<'a> Exporter<'a> { "can't have bundle literal with flipped field -- this should have been caught in BundleLiteral::new_unchecked" ); let name = bundle_ns.borrow_mut().get(name); - let field_value = self.expr(Expr::canonical(field_value), definitions, const_ty); + let field_value = self.expr(Expr::canonical(field_value), definitions, const_ty)?; definitions.add_definition_line(format_args!("connect {ident}.{name}, {field_value}")); } if ty.fields().is_empty() { definitions.add_definition_line(format_args!("invalidate {ident}")); } - ident.to_string() + Ok(ident.to_string()) } fn uninit_expr( &mut self, expr: ops::Uninit, definitions: &RcDefinitions, const_ty: bool, - ) -> String { + ) -> Result { let ident = self.module.ns.make_new("_uninit_expr"); let ty = expr.ty(); - let ty_ident = self.type_state.ty(ty); + let ty_ident = self.type_state.ty(ty)?; let const_ = if const_ty { "const " } else { "" }; definitions.add_definition_line(format_args!("wire {ident}: {const_}{ty_ident}")); definitions.add_definition_line(format_args!("invalidate {ident}")); - ident.to_string() + Ok(ident.to_string()) } fn enum_literal_expr( &mut self, expr: ops::EnumLiteral, definitions: &RcDefinitions, const_ty: bool, - ) -> String { + ) -> Result { let variant_expr = expr .variant_value() - .map(|variant_value| self.expr(variant_value, definitions, const_ty)); + .map(|variant_value| self.expr(variant_value, definitions, const_ty)) + .transpose()?; self.enum_expr_impl(expr.ty(), expr.variant_name(), variant_expr) } fn expr_cast_bundle_to_bits( @@ -969,12 +1019,12 @@ impl<'a> Exporter<'a> { ty: Bundle, definitions: &RcDefinitions, extra_indent: Indent<'_>, - ) -> String { + ) -> Result { if ty.fields().is_empty() { - return "UInt<0>(0)".into(); + return Ok("UInt<0>(0)".into()); } if let [field] = *ty.fields() { - let field_ident = self.type_state.get_bundle_field(ty, field.name); + let field_ident = self.type_state.get_bundle_field(ty, field.name)?; return self.expr_cast_to_bits( format!("{value_str}.{field_ident}"), field.ty, @@ -993,23 +1043,23 @@ impl<'a> Exporter<'a> { ty: UInt[field_ty.bit_width()].canonical(), }, ))); - let (flattened_ty_ident, _) = self.type_state.bundle_def(flattened_bundle_ty); + let (flattened_ty_ident, _) = self.type_state.bundle_def(flattened_bundle_ty)?; let ident = self.module.ns.make_new("_cast_bundle_to_bits_expr"); definitions.add_definition_line(format_args!( "{extra_indent}wire {ident}: {flattened_ty_ident}" )); let mut cat_expr = None; for field in ty.fields() { - let field_ident = self.type_state.get_bundle_field(ty, field.name); + let field_ident = self.type_state.get_bundle_field(ty, field.name)?; let flattened_field_ident = self .type_state - .get_bundle_field(flattened_bundle_ty, field.name); + .get_bundle_field(flattened_bundle_ty, field.name)?; let field_bits = self.expr_cast_to_bits( format!("{value_str}.{field_ident}"), field.ty, definitions, extra_indent, - ); + )?; definitions.add_definition_line(format_args!( "{extra_indent}connect {ident}.{flattened_field_ident}, {field_bits}" )); @@ -1026,7 +1076,7 @@ impl<'a> Exporter<'a> { )); let cat_expr = cat_expr.expect("bundle already checked to have fields"); definitions.add_definition_line(format_args!("{extra_indent}connect {retval}, {cat_expr}")); - retval.to_string() + Ok(retval.to_string()) } fn expr_cast_enum_to_bits( &mut self, @@ -1034,9 +1084,9 @@ impl<'a> Exporter<'a> { ty: Enum, definitions: &RcDefinitions, extra_indent: Indent<'_>, - ) -> String { + ) -> Result { if ty.variants().is_empty() { - return "UInt<0>(0)".into(); + return Ok("UInt<0>(0)".into()); } let retval = self.module.ns.make_new("_cast_enum_to_bits_expr"); definitions.add_definition_line(format_args!( @@ -1053,7 +1103,7 @@ impl<'a> Exporter<'a> { .make_new(&format!("_cast_enum_to_bits_expr_{}", variant.name)); definitions.add_definition_line(format_args!( "{extra_indent}{}({variant_value}):", - self.type_state.get_enum_variant(ty, variant.name), + self.type_state.get_enum_variant(ty, variant.name)?, )); let _match_arm_indent = extra_indent.push(); let variant_bits = self.expr_cast_to_bits( @@ -1061,7 +1111,7 @@ impl<'a> Exporter<'a> { variant_ty, definitions, extra_indent, - ); + )?; definitions.add_definition_line(format_args!( "{extra_indent}connect {retval}, pad(cat({variant_bits}, UInt<{}>({variant_index})), {})", ty.discriminant_bit_width(), @@ -1070,7 +1120,7 @@ impl<'a> Exporter<'a> { } else { definitions.add_definition_line(format_args!( "{extra_indent}{}:", - self.type_state.get_enum_variant(ty, variant.name), + self.type_state.get_enum_variant(ty, variant.name)?, )); let _match_arm_indent = extra_indent.push(); definitions.add_definition_line(format_args!( @@ -1079,7 +1129,7 @@ impl<'a> Exporter<'a> { )); } } - retval.to_string() + Ok(retval.to_string()) } fn expr_cast_array_to_bits( &mut self, @@ -1087,9 +1137,9 @@ impl<'a> Exporter<'a> { ty: Array, definitions: &RcDefinitions, extra_indent: Indent<'_>, - ) -> String { + ) -> Result { if ty.is_empty() { - return "UInt<0>(0)".into(); + return Ok("UInt<0>(0)".into()); } if ty.len() == 1 { return self.expr_cast_to_bits( @@ -1112,7 +1162,7 @@ impl<'a> Exporter<'a> { ty.element(), definitions, extra_indent, - ); + )?; definitions.add_definition_line(format_args!( "{extra_indent}connect {ident}[{index}], {element_bits}" )); @@ -1129,7 +1179,7 @@ impl<'a> Exporter<'a> { )); let cat_expr = cat_expr.expect("array already checked to have elements"); definitions.add_definition_line(format_args!("{extra_indent}connect {retval}, {cat_expr}")); - retval.to_string() + Ok(retval.to_string()) } fn expr_cast_to_bits( &mut self, @@ -1137,7 +1187,7 @@ impl<'a> Exporter<'a> { ty: CanonicalType, definitions: &RcDefinitions, extra_indent: Indent<'_>, - ) -> String { + ) -> Result { match ty { CanonicalType::Bundle(ty) => { self.expr_cast_bundle_to_bits(value_str, ty, definitions, extra_indent) @@ -1149,13 +1199,16 @@ impl<'a> Exporter<'a> { self.expr_cast_array_to_bits(value_str, ty, definitions, extra_indent) } CanonicalType::UInt(_) | CanonicalType::SyncReset(_) | CanonicalType::Bool(_) => { - value_str + Ok(value_str) } CanonicalType::SInt(_) | CanonicalType::Clock(_) | CanonicalType::AsyncReset(_) - | CanonicalType::Reset(_) => format!("asUInt({value_str})"), - CanonicalType::PhantomConst(_) => "UInt<0>(0)".into(), + | CanonicalType::Reset(_) => Ok(format!("asUInt({value_str})")), + CanonicalType::PhantomConst(_) => Ok("UInt<0>(0)".into()), + CanonicalType::DynSimOnlyValueType(_) => { + Err(FirrtlError::SimOnlyValuesAreNotPermitted.into()) + } } } fn expr_cast_bits_to_bundle( @@ -1164,13 +1217,13 @@ impl<'a> Exporter<'a> { ty: Bundle, definitions: &RcDefinitions, extra_indent: Indent<'_>, - ) -> String { - let (ty_ident, _) = self.type_state.bundle_def(ty); + ) -> Result { + let (ty_ident, _) = self.type_state.bundle_def(ty)?; let retval = self.module.ns.make_new("_cast_bits_to_bundle_expr"); definitions.add_definition_line(format_args!("{extra_indent}wire {retval}: {ty_ident}")); if ty.fields().is_empty() { definitions.add_definition_line(format_args!("{extra_indent}invalidate {retval}")); - return retval.to_string(); + return Ok(retval.to_string()); } let flattened_bundle_ty = Bundle::new(Interned::from_iter(ty.fields().iter().map( |&BundleField { @@ -1183,7 +1236,7 @@ impl<'a> Exporter<'a> { ty: UInt[field_ty.bit_width()].canonical(), }, ))); - let (flattened_ty_ident, _) = self.type_state.bundle_def(flattened_bundle_ty); + let (flattened_ty_ident, _) = self.type_state.bundle_def(flattened_bundle_ty)?; let flattened_ident = self .module .ns @@ -1191,11 +1244,18 @@ impl<'a> Exporter<'a> { definitions.add_definition_line(format_args!( "{extra_indent}wire {flattened_ident}: {flattened_ty_ident}" )); - for (field, field_offset) in ty.fields().into_iter().zip(ty.field_offsets()) { + for ( + field, + OpaqueSimValueSize { + bit_width: field_offset, + sim_only_values_len: _, + }, + ) in ty.fields().into_iter().zip(ty.field_offsets()) + { let flattened_field_ident = self .type_state - .get_bundle_field(flattened_bundle_ty, field.name); - let field_ident = self.type_state.get_bundle_field(ty, field.name); + .get_bundle_field(flattened_bundle_ty, field.name)?; + let field_ident = self.type_state.get_bundle_field(ty, field.name)?; if let Some(field_bit_width_minus_one) = field.ty.bit_width().checked_sub(1usize) { definitions.add_definition_line(format_args!( "{extra_indent}connect {flattened_ident}.{flattened_field_ident}, bits({value_str}, {}, {field_offset})", @@ -1211,12 +1271,12 @@ impl<'a> Exporter<'a> { field.ty, definitions, extra_indent, - ); + )?; definitions.add_definition_line(format_args!( "{extra_indent}connect {retval}.{field_ident}, {field_value}" )); } - retval.to_string() + Ok(retval.to_string()) } fn expr_cast_bits_to_enum( &mut self, @@ -1224,19 +1284,19 @@ impl<'a> Exporter<'a> { ty: Enum, definitions: &RcDefinitions, extra_indent: Indent<'_>, - ) -> String { - let (ty_ident, enum_def) = self.type_state.enum_def(ty); + ) -> Result { + let (ty_ident, enum_def) = self.type_state.enum_def(ty)?; let retval = self.module.ns.make_new("_cast_bits_to_enum_expr"); definitions.add_definition_line(format_args!("{extra_indent}wire {retval}: {ty_ident}")); if ty.variants().is_empty() { definitions.add_definition_line(format_args!("{extra_indent}invalidate {retval}")); - return retval.to_string(); + return Ok(retval.to_string()); } if let [variant] = *ty.variants() { - let enum_variant = self.type_state.get_enum_variant(ty, variant.name); + let enum_variant = self.type_state.get_enum_variant(ty, variant.name)?; if let Some(variant_ty) = variant.ty { let variant_value = - self.expr_cast_bits_to(value_str, variant_ty, definitions, extra_indent); + self.expr_cast_bits_to(value_str, variant_ty, definitions, extra_indent)?; definitions.add_definition_line(format_args!( "{extra_indent}connect {retval}, {}({enum_variant}, {variant_value})", enum_def.body @@ -1247,7 +1307,7 @@ impl<'a> Exporter<'a> { enum_def.body )); } - return retval.to_string(); + return Ok(retval.to_string()); } let discriminant_bit_width = ty.discriminant_bit_width(); let body_bit_width = ty.type_properties().bit_width - discriminant_bit_width; @@ -1276,14 +1336,14 @@ impl<'a> Exporter<'a> { .add_definition_line(format_args!("{extra_indent}else when {when_cond}:")); } let when_pushed_indent = extra_indent.push(); - let enum_variant = self.type_state.get_enum_variant(ty, variant.name); + let enum_variant = self.type_state.get_enum_variant(ty, variant.name)?; if let Some(variant_ty) = variant.ty { let variant_value = self.expr_cast_bits_to( body_value.clone(), variant_ty, definitions, extra_indent, - ); + )?; definitions.add_definition_line(format_args!( "{extra_indent}connect {retval}, {}({enum_variant}, {variant_value})", enum_def.body @@ -1296,7 +1356,7 @@ impl<'a> Exporter<'a> { } drop(when_pushed_indent); } - retval.to_string() + Ok(retval.to_string()) } fn expr_cast_bits_to_array( &mut self, @@ -1304,14 +1364,14 @@ impl<'a> Exporter<'a> { ty: Array, definitions: &RcDefinitions, extra_indent: Indent<'_>, - ) -> String { + ) -> Result { let retval = self.module.ns.make_new("_cast_bits_to_array_expr"); - let array_ty = self.type_state.ty(ty); + let array_ty = self.type_state.ty(ty)?; definitions.add_definition_line(format_args!("{extra_indent}wire {retval}: {array_ty}")); let element_bit_width = ty.element().bit_width(); if ty.is_empty() || element_bit_width == 0 { definitions.add_definition_line(format_args!("{extra_indent}invalidate {retval}")); - return retval.to_string(); + return Ok(retval.to_string()); } let flattened_ident = self .module @@ -1332,12 +1392,12 @@ impl<'a> Exporter<'a> { ty.element(), definitions, extra_indent, - ); + )?; definitions.add_definition_line(format_args!( "{extra_indent}connect {retval}[{index}], {element_value}" )); } - retval.to_string() + Ok(retval.to_string()) } fn expr_cast_bits_to( &mut self, @@ -1345,7 +1405,7 @@ impl<'a> Exporter<'a> { ty: CanonicalType, definitions: &RcDefinitions, extra_indent: Indent<'_>, - ) -> String { + ) -> Result { match ty { CanonicalType::Bundle(ty) => { self.expr_cast_bits_to_bundle(value_str, ty, definitions, extra_indent) @@ -1356,18 +1416,21 @@ impl<'a> Exporter<'a> { CanonicalType::Array(ty) => { self.expr_cast_bits_to_array(value_str, ty, definitions, extra_indent) } - CanonicalType::UInt(_) => value_str, - CanonicalType::SInt(_) => format!("asSInt({value_str})"), - CanonicalType::Bool(_) => value_str, - CanonicalType::Clock(_) => format!("asClock({value_str})"), - CanonicalType::AsyncReset(_) => format!("asAsyncReset({value_str})"), - CanonicalType::SyncReset(_) => value_str, + CanonicalType::UInt(_) => Ok(value_str), + CanonicalType::SInt(_) => Ok(format!("asSInt({value_str})")), + CanonicalType::Bool(_) => Ok(value_str), + CanonicalType::Clock(_) => Ok(format!("asClock({value_str})")), + CanonicalType::AsyncReset(_) => Ok(format!("asAsyncReset({value_str})")), + CanonicalType::SyncReset(_) => Ok(value_str), CanonicalType::Reset(_) => unreachable!("Reset is not bit castable to"), CanonicalType::PhantomConst(_) => { let retval = self.module.ns.make_new("_cast_bits_to_phantom_const_expr"); definitions.add_definition_line(format_args!("{extra_indent}wire {retval}: {{}}")); definitions.add_definition_line(format_args!("{extra_indent}invalidate {retval}")); - return retval.to_string(); + return Ok(retval.to_string()); + } + CanonicalType::DynSimOnlyValueType(_) => { + Err(FirrtlError::SimOnlyValuesAreNotPermitted.into()) } } } @@ -1377,11 +1440,11 @@ impl<'a> Exporter<'a> { arg: Expr, definitions: &RcDefinitions, const_ty: bool, - ) -> String { - format!( + ) -> Result { + Ok(format!( "{func}({arg})", - arg = self.expr(Expr::canonical(arg), definitions, const_ty) - ) + arg = self.expr(Expr::canonical(arg), definitions, const_ty)?, + )) } fn expr_binary( &mut self, @@ -1390,23 +1453,23 @@ impl<'a> Exporter<'a> { rhs: Expr, definitions: &RcDefinitions, const_ty: bool, - ) -> String { - format!( + ) -> Result { + Ok(format!( "{func}({lhs}, {rhs})", - lhs = self.expr(Expr::canonical(lhs), definitions, const_ty), - rhs = self.expr(Expr::canonical(rhs), definitions, const_ty) - ) + lhs = self.expr(Expr::canonical(lhs), definitions, const_ty)?, + rhs = self.expr(Expr::canonical(rhs), definitions, const_ty)?, + )) } fn expr( &mut self, expr: Expr, definitions: &RcDefinitions, const_ty: bool, - ) -> String { + ) -> Result { match *Expr::expr_enum(expr) { - ExprEnum::UIntLiteral(literal) => self.uint_literal(&literal), - ExprEnum::SIntLiteral(literal) => self.sint_literal(&literal), - ExprEnum::BoolLiteral(literal) => self.bool_literal(literal), + ExprEnum::UIntLiteral(literal) => Ok(self.uint_literal(&literal)), + ExprEnum::SIntLiteral(literal) => Ok(self.sint_literal(&literal)), + ExprEnum::BoolLiteral(literal) => Ok(self.bool_literal(literal)), ExprEnum::PhantomConst(ty) => self.expr( UInt[0].zero().cast_bits_to(ty.canonical()), definitions, @@ -1495,34 +1558,26 @@ impl<'a> Exporter<'a> { ExprEnum::DynShrS(expr) => { self.expr_binary("dshr", expr.lhs(), expr.rhs(), definitions, const_ty) } - ExprEnum::FixedShlU(expr) => { - format!( - "shl({lhs}, {rhs})", - lhs = self.expr(Expr::canonical(expr.lhs()), definitions, const_ty), - rhs = expr.rhs(), - ) - } - ExprEnum::FixedShlS(expr) => { - format!( - "shl({lhs}, {rhs})", - lhs = self.expr(Expr::canonical(expr.lhs()), definitions, const_ty), - rhs = expr.rhs(), - ) - } - ExprEnum::FixedShrU(expr) => { - format!( - "shr({lhs}, {rhs})", - lhs = self.expr(Expr::canonical(expr.lhs()), definitions, const_ty), - rhs = expr.rhs(), - ) - } - ExprEnum::FixedShrS(expr) => { - format!( - "shr({lhs}, {rhs})", - lhs = self.expr(Expr::canonical(expr.lhs()), definitions, const_ty), - rhs = expr.rhs(), - ) - } + ExprEnum::FixedShlU(expr) => Ok(format!( + "shl({lhs}, {rhs})", + lhs = self.expr(Expr::canonical(expr.lhs()), definitions, const_ty)?, + rhs = expr.rhs(), + )), + ExprEnum::FixedShlS(expr) => Ok(format!( + "shl({lhs}, {rhs})", + lhs = self.expr(Expr::canonical(expr.lhs()), definitions, const_ty)?, + rhs = expr.rhs(), + )), + ExprEnum::FixedShrU(expr) => Ok(format!( + "shr({lhs}, {rhs})", + lhs = self.expr(Expr::canonical(expr.lhs()), definitions, const_ty)?, + rhs = expr.rhs(), + )), + ExprEnum::FixedShrS(expr) => Ok(format!( + "shr({lhs}, {rhs})", + lhs = self.expr(Expr::canonical(expr.lhs()), definitions, const_ty)?, + rhs = expr.rhs(), + )), ExprEnum::CmpLtU(expr) => { self.expr_binary("lt", expr.lhs(), expr.rhs(), definitions, const_ty) } @@ -1596,7 +1651,7 @@ impl<'a> Exporter<'a> { self.slice(expr.base(), expr.range(), definitions, const_ty) } ExprEnum::CastToBits(expr) => { - let value_str = self.expr(expr.arg(), definitions, const_ty); + let value_str = self.expr(expr.arg(), definitions, const_ty)?; self.expr_cast_to_bits( value_str, Expr::ty(expr.arg()), @@ -1608,7 +1663,7 @@ impl<'a> Exporter<'a> { ) } ExprEnum::CastBitsTo(expr) => { - let value_str = self.expr(Expr::canonical(expr.arg()), definitions, const_ty); + let value_str = self.expr(Expr::canonical(expr.arg()), definitions, const_ty)?; self.expr_cast_bits_to( value_str, expr.ty(), @@ -1719,56 +1774,57 @@ impl<'a> Exporter<'a> { self.expr_unary("xorr", expr.arg(), definitions, const_ty) } ExprEnum::FieldAccess(expr) => { - let mut out = self.expr(Expr::canonical(expr.base()), definitions, const_ty); + let mut out = self.expr(Expr::canonical(expr.base()), definitions, const_ty)?; let name = self .type_state - .get_bundle_field(Expr::ty(expr.base()), expr.field_name()); + .get_bundle_field(Expr::ty(expr.base()), expr.field_name())?; write!(out, ".{name}").unwrap(); - out + Ok(out) } - ExprEnum::VariantAccess(variant_access) => self + ExprEnum::VariantAccess(variant_access) => Ok(self .module .match_arm_values .get(&variant_access) .expect("VariantAccess must be in its corresponding match arm") - .to_string(), + .to_string()), ExprEnum::ArrayIndex(expr) => { - let mut out = self.expr(Expr::canonical(expr.base()), definitions, const_ty); + let mut out = self.expr(Expr::canonical(expr.base()), definitions, const_ty)?; write!(out, "[{}]", expr.element_index()).unwrap(); - out + Ok(out) } ExprEnum::DynArrayIndex(expr) => { - let mut out = self.expr(Expr::canonical(expr.base()), definitions, const_ty); - let index = self.expr(Expr::canonical(expr.element_index()), definitions, const_ty); + let mut out = self.expr(Expr::canonical(expr.base()), definitions, const_ty)?; + let index = + self.expr(Expr::canonical(expr.element_index()), definitions, const_ty)?; write!(out, "[{index}]").unwrap(); - out + Ok(out) } - ExprEnum::ModuleIO(expr) => self.module.ns.get(expr.name_id()).to_string(), + ExprEnum::ModuleIO(expr) => Ok(self.module.ns.get(expr.name_id()).to_string()), ExprEnum::Instance(expr) => { assert!(!const_ty, "not a constant"); - self.module.ns.get(expr.scoped_name().1).to_string() + Ok(self.module.ns.get(expr.scoped_name().1).to_string()) } ExprEnum::Wire(expr) => { assert!(!const_ty, "not a constant"); - self.module.ns.get(expr.scoped_name().1).to_string() + Ok(self.module.ns.get(expr.scoped_name().1).to_string()) } ExprEnum::Reg(expr) => { assert!(!const_ty, "not a constant"); - self.module.ns.get(expr.scoped_name().1).to_string() + Ok(self.module.ns.get(expr.scoped_name().1).to_string()) } ExprEnum::RegSync(expr) => { assert!(!const_ty, "not a constant"); - self.module.ns.get(expr.scoped_name().1).to_string() + Ok(self.module.ns.get(expr.scoped_name().1).to_string()) } ExprEnum::RegAsync(expr) => { assert!(!const_ty, "not a constant"); - self.module.ns.get(expr.scoped_name().1).to_string() + Ok(self.module.ns.get(expr.scoped_name().1).to_string()) } ExprEnum::MemPort(expr) => { assert!(!const_ty, "not a constant"); let mem_name = self.module.ns.get(expr.mem_name().1); let port_name = Ident::from(expr.port_name()); - format!("{mem_name}.{port_name}") + Ok(format!("{mem_name}.{port_name}")) } } } @@ -1778,7 +1834,7 @@ impl<'a> Exporter<'a> { memory_name: Ident, array_type: Array, initial_value: Interned, - ) -> Result<(), WrappedError> { + ) -> Result<()> { assert_eq!( initial_value.len(), array_type.type_properties().bit_width, @@ -1860,7 +1916,7 @@ impl<'a> Exporter<'a> { }, }) } - fn annotation_target_ref(&mut self, target: Interned) -> AnnotationTargetRef { + fn annotation_target_ref(&mut self, target: Interned) -> Result { match *target { Target::Base(base) => { let mut segments = vec![]; @@ -1878,17 +1934,17 @@ impl<'a> Exporter<'a> { TargetBase::Wire(v) => self.module.ns.get(v.name_id()), TargetBase::Instance(v) => self.module.ns.get(v.name_id()), }; - AnnotationTargetRef { base, segments } + Ok(AnnotationTargetRef { base, segments }) } Target::Child(child) => { - let mut retval = self.annotation_target_ref(child.parent()); + let mut retval = self.annotation_target_ref(child.parent())?; match *child.path_element() { TargetPathElement::BundleField(TargetPathBundleField { name }) => { retval.segments.push(AnnotationTargetRefSegment::Field { name: self.type_state.get_bundle_field( Bundle::from_canonical(child.parent().canonical_ty()), name, - ), + )?, }) } TargetPathElement::ArrayElement(TargetPathArrayElement { index, .. }) => retval @@ -1896,7 +1952,7 @@ impl<'a> Exporter<'a> { .push(AnnotationTargetRefSegment::Index { index }), TargetPathElement::DynArrayElement(_) => unreachable!(), } - retval + Ok(retval) } } } @@ -1905,9 +1961,9 @@ impl<'a> Exporter<'a> { base_module: Ident, submodules: Vec, annotations: &[crate::annotations::TargetedAnnotation], - ) { + ) -> Result<()> { for annotation in annotations { - let target_ref = Some(self.annotation_target_ref(annotation.target())); + let target_ref = Some(self.annotation_target_ref(annotation.target())?); self.annotation( AnnotationTargetPath { base_module, @@ -1917,8 +1973,9 @@ impl<'a> Exporter<'a> { annotation.annotation(), ); } + Ok(()) } - fn write_mem(&mut self, module_name: Ident, memory: Mem) -> Result { + fn write_mem(&mut self, module_name: Ident, memory: Mem) -> Result { let indent = self.indent; let name_id = memory.scoped_name().1; let source_location = memory.source_location(); @@ -1942,11 +1999,11 @@ impl<'a> Exporter<'a> { annotation, ); } - self.targeted_annotations(module_name, vec![], &memory.port_annotations()); + self.targeted_annotations(module_name, vec![], &memory.port_annotations())?; if let Some(initial_value) = initial_value { self.write_mem_init(module_name, name, array_type, initial_value)?; } - let data_type = self.type_state.ty(array_type.element()); + let data_type = self.type_state.ty(array_type.element())?; let mut body = String::new(); writeln!( body, @@ -1989,16 +2046,16 @@ impl<'a> Exporter<'a> { module_name: Ident, definitions: &RcDefinitions, body: &mut String, - ) { + ) -> Result<()> { let StmtReg { annotations, reg } = stmt_reg; let indent = self.indent; self.targeted_annotations(module_name, vec![], &annotations); let name = self.module.ns.get(reg.name_id()); - let ty = self.type_state.ty(reg.ty()); - let clk = self.expr(Expr::canonical(reg.clock_domain().clk), definitions, false); + let ty = self.type_state.ty(reg.ty())?; + let clk = self.expr(Expr::canonical(reg.clock_domain().clk), definitions, false)?; if let Some(init) = reg.init() { - let rst = self.expr(Expr::canonical(reg.clock_domain().rst), definitions, false); - let init = self.expr(init, definitions, false); + let rst = self.expr(Expr::canonical(reg.clock_domain().rst), definitions, false)?; + let init = self.expr(init, definitions, false)?; writeln!( body, "{indent}regreset {name}: {ty}, {clk}, {rst}, {init}{}", @@ -2013,6 +2070,7 @@ impl<'a> Exporter<'a> { ) .unwrap(); } + Ok(()) } fn block( &mut self, @@ -2020,7 +2078,7 @@ impl<'a> Exporter<'a> { block: Block, _block_indent: &PushIndent<'_>, definitions: Option, - ) -> Result { + ) -> Result { let indent = self.indent; let definitions = definitions.unwrap_or_default(); let mut body = String::new(); @@ -2046,8 +2104,8 @@ impl<'a> Exporter<'a> { ) .unwrap(); } - let lhs = self.expr(lhs, &definitions, false); - let rhs = self.expr(rhs, &definitions, false); + let lhs = self.expr(lhs, &definitions, false)?; + let rhs = self.expr(rhs, &definitions, false)?; writeln!( body, "{indent}connect {lhs}, {rhs}{}", @@ -2063,9 +2121,9 @@ impl<'a> Exporter<'a> { text, source_location, }) => { - let clk = self.expr(Expr::canonical(clk), &definitions, false); - let pred = self.expr(Expr::canonical(pred), &definitions, false); - let en = self.expr(Expr::canonical(en), &definitions, false); + let clk = self.expr(Expr::canonical(clk), &definitions, false)?; + let pred = self.expr(Expr::canonical(pred), &definitions, false)?; + let en = self.expr(Expr::canonical(en), &definitions, false)?; let kind = match kind { FormalKind::Assert => "assert", FormalKind::Assume => "assume", @@ -2090,7 +2148,7 @@ impl<'a> Exporter<'a> { let mut when = "when"; let mut pushed_indent; loop { - let cond_str = self.expr(Expr::canonical(cond), &definitions, false); + let cond_str = self.expr(Expr::canonical(cond), &definitions, false)?; writeln!( body, "{indent}{when} {cond_str}:{}", @@ -2132,7 +2190,7 @@ impl<'a> Exporter<'a> { writeln!( body, "{indent}match {}:{}", - self.expr(Expr::canonical(expr), &definitions, false), + self.expr(Expr::canonical(expr), &definitions, false)?, FileInfo::new(source_location), ) .unwrap(); @@ -2144,7 +2202,7 @@ impl<'a> Exporter<'a> { write!( body, "{indent}{}", - self.type_state.get_enum_variant(enum_ty, variant.name), + self.type_state.get_enum_variant(enum_ty, variant.name)?, ) .unwrap(); let variant_access = if variant.ty.is_some() { @@ -2176,7 +2234,7 @@ impl<'a> Exporter<'a> { Stmt::Declaration(StmtDeclaration::Wire(StmtWire { annotations, wire })) => { self.targeted_annotations(module_name, vec![], &annotations); let name = self.module.ns.get(wire.name_id()); - let ty = self.type_state.ty(wire.ty()); + let ty = self.type_state.ty(wire.ty())?; writeln!( body, "{indent}wire {name}: {ty}{}", @@ -2185,13 +2243,13 @@ impl<'a> Exporter<'a> { .unwrap(); } Stmt::Declaration(StmtDeclaration::Reg(stmt_reg)) => { - self.stmt_reg(stmt_reg, module_name, &definitions, &mut body); + self.stmt_reg(stmt_reg, module_name, &definitions, &mut body)?; } Stmt::Declaration(StmtDeclaration::RegSync(stmt_reg)) => { - self.stmt_reg(stmt_reg, module_name, &definitions, &mut body); + self.stmt_reg(stmt_reg, module_name, &definitions, &mut body)?; } Stmt::Declaration(StmtDeclaration::RegAsync(stmt_reg)) => { - self.stmt_reg(stmt_reg, module_name, &definitions, &mut body); + self.stmt_reg(stmt_reg, module_name, &definitions, &mut body)?; } Stmt::Declaration(StmtDeclaration::Instance(StmtInstance { annotations, @@ -2216,7 +2274,7 @@ impl<'a> Exporter<'a> { } Ok(out) } - fn module(&mut self, module: Interned>) -> Result { + fn module(&mut self, module: Interned>) -> Result { self.module = ModuleState::default(); let indent = self.indent; let module_name = self.global_ns.get(module.name_id()); @@ -2239,7 +2297,7 @@ impl<'a> Exporter<'a> { { self.targeted_annotations(module_name, vec![], annotations); let name = self.module.ns.get(module_io.name_id()); - let ty = self.type_state.ty(module_io.ty()); + let ty = self.type_state.ty(module_io.ty())?; if module_io.is_input() { writeln!( body, @@ -2314,6 +2372,7 @@ pub trait FileBackendTrait { type Error: From; type Path: AsRef + fmt::Debug + ?Sized; type PathBuf: AsRef + fmt::Debug; + fn custom_error(&self, error: Box) -> Self::Error; fn path_to_string(&mut self, path: &Self::Path) -> Result; fn write_mem_init_file( &mut self, @@ -2333,6 +2392,10 @@ impl FileBackendTrait for Box { type Path = T::Path; type PathBuf = T::PathBuf; + fn custom_error(&self, error: Box) -> Self::Error { + (**self).custom_error(error) + } + fn path_to_string(&mut self, path: &Self::Path) -> Result { (**self).path_to_string(path) } @@ -2360,6 +2423,10 @@ impl FileBackendTrait for &'_ mut T { type Path = T::Path; type PathBuf = T::PathBuf; + fn custom_error(&self, error: Box) -> Self::Error { + (**self).custom_error(error) + } + fn path_to_string(&mut self, path: &Self::Path) -> Result { (**self).path_to_string(path) } @@ -2405,6 +2472,10 @@ impl FileBackendTrait for FileBackend { type Path = Path; type PathBuf = PathBuf; + fn custom_error(&self, error: Box) -> Self::Error { + io::Error::new(io::ErrorKind::Other, error) + } + fn path_to_string(&mut self, path: &Self::Path) -> Result { path.to_str() .map(String::from) @@ -2571,6 +2642,10 @@ impl FileBackendTrait for TestBackend { type Path = str; type PathBuf = String; + fn custom_error(&self, error: Box) -> Self::Error { + TestBackendError(error.to_string()) + } + fn path_to_string(&mut self, path: &Self::Path) -> Result { self.step_error_after(&path)?; Ok(path.to_owned()) diff --git a/crates/fayalite/src/int.rs b/crates/fayalite/src/int.rs index c491cdc..7fa77ce 100644 --- a/crates/fayalite/src/int.rs +++ b/crates/fayalite/src/int.rs @@ -11,10 +11,13 @@ use crate::{ intern::{Intern, Interned, Memoize}, sim::value::{SimValue, ToSimValueWithType}, source_location::SourceLocation, - ty::{CanonicalType, StaticType, Type, TypeProperties, impl_match_variant_as_self}, - util::{ConstBool, ConstUsize, GenericConstBool, GenericConstUsize, interned_bit}, + ty::{ + CanonicalType, OpaqueSimValueSize, OpaqueSimValueSlice, OpaqueSimValueWriter, + OpaqueSimValueWritten, StaticType, Type, TypeProperties, impl_match_variant_as_self, + }, + util::{ConstBool, ConstUsize, GenericConstBool, GenericConstUsize, interned_bit, slice_range}, }; -use bitvec::{order::Lsb0, slice::BitSlice, vec::BitVec, view::BitView}; +use bitvec::{bits, order::Lsb0, slice::BitSlice, vec::BitVec, view::BitView}; use num_bigint::{BigInt, BigUint, Sign}; use num_traits::{One, Signed, Zero}; use serde::{ @@ -26,7 +29,7 @@ use std::{ fmt, marker::PhantomData, num::NonZero, - ops::{Bound, Index, Not, Range, RangeBounds, RangeInclusive}, + ops::{Index, Not, Range, RangeBounds, RangeInclusive}, str::FromStr, sync::Arc, }; @@ -645,6 +648,7 @@ macro_rules! impl_int { is_storable: true, is_castable_from_bits: true, bit_width: self.width, + sim_only_values_len: 0, } } } @@ -678,19 +682,36 @@ macro_rules! impl_int { fn source_location() -> SourceLocation { SourceLocation::builtin() } - fn sim_value_from_bits(&self, bits: &BitSlice) -> Self::SimValue { - assert_eq!(bits.len(), self.width()); - $value::new(Arc::new(bits.to_bitvec())) + fn sim_value_from_opaque(&self, opaque: OpaqueSimValueSlice<'_>) -> Self::SimValue { + assert_eq!( + opaque.size(), + OpaqueSimValueSize::from_bit_width(self.width()) + ); + $value::new(Arc::new(opaque.bits().to_bitvec())) } - fn sim_value_clone_from_bits(&self, value: &mut Self::SimValue, bits: &BitSlice) { - assert_eq!(bits.len(), self.width()); + fn sim_value_clone_from_opaque( + &self, + value: &mut Self::SimValue, + opaque: OpaqueSimValueSlice<'_>, + ) { + assert_eq!( + opaque.size(), + OpaqueSimValueSize::from_bit_width(self.width()) + ); assert_eq!(value.width(), self.width()); - value.bits_mut().copy_from_bitslice(bits); + value.bits_mut().copy_from_bitslice(opaque.bits()); } - fn sim_value_to_bits(&self, value: &Self::SimValue, bits: &mut BitSlice) { - assert_eq!(bits.len(), self.width()); + fn sim_value_to_opaque<'w>( + &self, + value: &Self::SimValue, + writer: OpaqueSimValueWriter<'w>, + ) -> OpaqueSimValueWritten<'w> { + assert_eq!( + writer.size(), + OpaqueSimValueSize::from_bit_width(self.width()) + ); assert_eq!(value.width(), self.width()); - bits.copy_from_bitslice(value.bits()); + writer.fill_cloned_from_slice(OpaqueSimValueSlice::from_bitslice(value.bits())) } } @@ -898,6 +919,9 @@ macro_rules! impl_int { _phantom: PhantomData, } } + pub fn bitvec_mut(&mut self) -> &mut BitVec { + Arc::make_mut(&mut self.bits) + } } }; } @@ -1160,19 +1184,7 @@ pub trait IntType: Self::Dyn::new(width) } fn slice_index_to_range>(self, index: I) -> Range { - let width = self.width(); - let start = match index.start_bound() { - Bound::Included(start) => *start, - Bound::Excluded(start) => *start + 1, - Bound::Unbounded => 0, - }; - let end = match index.end_bound() { - Bound::Included(end) => *end + 1, - Bound::Excluded(end) => *end, - Bound::Unbounded => width, - }; - assert!(start <= end && end <= width, "slice range out-of-range"); - start..end + slice_range(index, self.width()) } fn slice_and_shift>(self, index: I) -> (UInt, usize) { let range = self.slice_index_to_range(index); @@ -1252,17 +1264,27 @@ impl Type for Bool { fn source_location() -> SourceLocation { SourceLocation::builtin() } - fn sim_value_from_bits(&self, bits: &BitSlice) -> Self::SimValue { - assert_eq!(bits.len(), 1); - bits[0] + fn sim_value_from_opaque(&self, opaque: OpaqueSimValueSlice<'_>) -> Self::SimValue { + assert_eq!(opaque.size(), OpaqueSimValueSize::from_bit_width(1)); + opaque.bits()[0] } - fn sim_value_clone_from_bits(&self, value: &mut Self::SimValue, bits: &BitSlice) { - assert_eq!(bits.len(), 1); - *value = bits[0]; + fn sim_value_clone_from_opaque( + &self, + value: &mut Self::SimValue, + opaque: OpaqueSimValueSlice<'_>, + ) { + assert_eq!(opaque.size(), OpaqueSimValueSize::from_bit_width(1)); + *value = opaque.bits()[0]; } - fn sim_value_to_bits(&self, value: &Self::SimValue, bits: &mut BitSlice) { - assert_eq!(bits.len(), 1); - bits.set(0, *value); + fn sim_value_to_opaque<'w>( + &self, + value: &Self::SimValue, + writer: OpaqueSimValueWriter<'w>, + ) -> OpaqueSimValueWritten<'w> { + assert_eq!(writer.size(), OpaqueSimValueSize::from_bit_width(1)); + writer.fill_cloned_from_slice(OpaqueSimValueSlice::from_bitslice( + [bits![0], bits![1]][*value as usize], + )) } } @@ -1274,6 +1296,7 @@ impl StaticType for Bool { is_storable: true, is_castable_from_bits: true, bit_width: 1, + sim_only_values_len: 0, }; const MASK_TYPE_PROPERTIES: TypeProperties = Bool::TYPE_PROPERTIES; } diff --git a/crates/fayalite/src/int/uint_in_range.rs b/crates/fayalite/src/int/uint_in_range.rs index ae80a93..5ddd38c 100644 --- a/crates/fayalite/src/int/uint_in_range.rs +++ b/crates/fayalite/src/int/uint_in_range.rs @@ -12,9 +12,12 @@ use crate::{ phantom_const::PhantomConst, sim::value::{SimValue, SimValuePartialEq, ToSimValueWithType}, source_location::SourceLocation, - ty::{CanonicalType, StaticType, Type, TypeProperties, impl_match_variant_as_self}, + ty::{ + CanonicalType, OpaqueSimValueSlice, OpaqueSimValueWriter, OpaqueSimValueWritten, + StaticType, Type, TypeProperties, impl_match_variant_as_self, + }, }; -use bitvec::{order::Lsb0, slice::BitSlice, view::BitView}; +use bitvec::{order::Lsb0, view::BitView}; use serde::{ Deserialize, Deserializer, Serialize, Serializer, de::{Error, Visitor, value::UsizeDeserializer}, @@ -70,16 +73,24 @@ impl Type for UIntInRangeMaskType { SourceLocation::builtin() } - fn sim_value_from_bits(&self, bits: &BitSlice) -> Self::SimValue { - Bool.sim_value_from_bits(bits) + fn sim_value_from_opaque(&self, opaque: OpaqueSimValueSlice<'_>) -> Self::SimValue { + Bool.sim_value_from_opaque(opaque) } - fn sim_value_clone_from_bits(&self, value: &mut Self::SimValue, bits: &BitSlice) { - Bool.sim_value_clone_from_bits(value, bits); + fn sim_value_clone_from_opaque( + &self, + value: &mut Self::SimValue, + opaque: OpaqueSimValueSlice<'_>, + ) { + Bool.sim_value_clone_from_opaque(value, opaque); } - fn sim_value_to_bits(&self, value: &Self::SimValue, bits: &mut BitSlice) { - Bool.sim_value_to_bits(value, bits); + fn sim_value_to_opaque<'w>( + &self, + value: &Self::SimValue, + writer: OpaqueSimValueWriter<'w>, + ) -> OpaqueSimValueWritten<'w> { + Bool.sim_value_to_opaque(value, writer) } } @@ -353,18 +364,30 @@ macro_rules! define_uint_in_range_type { SourceLocation::builtin() } - fn sim_value_from_bits(&self, bits: &BitSlice) -> Self::SimValue { + fn sim_value_from_opaque(&self, opaque: OpaqueSimValueSlice<'_>) -> Self::SimValue { + assert_eq!(opaque.size(), self.value.type_properties().size()); let mut retval = 0usize; - retval.view_bits_mut::()[..bits.len()].clone_from_bitslice(bits); + retval.view_bits_mut::()[..opaque.bit_width()] + .clone_from_bitslice(opaque.bits()); retval } - fn sim_value_clone_from_bits(&self, value: &mut Self::SimValue, bits: &BitSlice) { - *value = self.sim_value_from_bits(bits); + fn sim_value_clone_from_opaque( + &self, + value: &mut Self::SimValue, + opaque: OpaqueSimValueSlice<'_>, + ) { + *value = self.sim_value_from_opaque(opaque); } - fn sim_value_to_bits(&self, value: &Self::SimValue, bits: &mut BitSlice) { - bits.clone_from_bitslice(&value.view_bits::()[..bits.len()]); + fn sim_value_to_opaque<'w>( + &self, + value: &Self::SimValue, + writer: OpaqueSimValueWriter<'w>, + ) -> OpaqueSimValueWritten<'w> { + writer.fill_cloned_from_slice(OpaqueSimValueSlice::from_bitslice( + &value.view_bits::()[..self.value.width()], + )) } } diff --git a/crates/fayalite/src/memory.rs b/crates/fayalite/src/memory.rs index a146ac6..15789c8 100644 --- a/crates/fayalite/src/memory.rs +++ b/crates/fayalite/src/memory.rs @@ -1066,7 +1066,8 @@ pub fn splat_mask(ty: T, value: Expr) -> Expr> { | CanonicalType::SyncReset(_) | CanonicalType::Reset(_) | CanonicalType::Clock(_) - | CanonicalType::Enum(_) => Expr::from_canonical(Expr::canonical(value)), + | CanonicalType::Enum(_) + | CanonicalType::DynSimOnlyValueType(_) => Expr::from_canonical(Expr::canonical(value)), CanonicalType::Array(array) => Expr::from_canonical(Expr::canonical(repeat( splat_mask(array.element(), value), array.len(), diff --git a/crates/fayalite/src/module.rs b/crates/fayalite/src/module.rs index aaa9340..de54fb2 100644 --- a/crates/fayalite/src/module.rs +++ b/crates/fayalite/src/module.rs @@ -1524,7 +1524,8 @@ impl TargetState { | CanonicalType::Clock(_) | CanonicalType::AsyncReset(_) | CanonicalType::SyncReset(_) - | CanonicalType::Reset(_) => TargetStateInner::Single { + | CanonicalType::Reset(_) + | CanonicalType::DynSimOnlyValueType(_) => TargetStateInner::Single { declared_in_block, written_in_blocks: RefCell::default(), }, diff --git a/crates/fayalite/src/phantom_const.rs b/crates/fayalite/src/phantom_const.rs index 44b36ca..b852056 100644 --- a/crates/fayalite/src/phantom_const.rs +++ b/crates/fayalite/src/phantom_const.rs @@ -11,11 +11,11 @@ use crate::{ sim::value::{SimValue, SimValuePartialEq, ToSimValue, ToSimValueWithType}, source_location::SourceLocation, ty::{ - CanonicalType, StaticType, Type, TypeProperties, impl_match_variant_as_self, + CanonicalType, OpaqueSimValueSlice, OpaqueSimValueWriter, OpaqueSimValueWritten, + StaticType, Type, TypeProperties, impl_match_variant_as_self, serde_impls::{SerdeCanonicalType, SerdePhantomConst}, }, }; -use bitvec::slice::BitSlice; use serde::{ Deserialize, Deserializer, Serialize, Serializer, de::{DeserializeOwned, Error}, @@ -284,19 +284,27 @@ impl Type for PhantomConst { SourceLocation::builtin() } - fn sim_value_from_bits(&self, bits: &BitSlice) -> Self::SimValue { - assert!(bits.is_empty()); + fn sim_value_from_opaque(&self, opaque: OpaqueSimValueSlice<'_>) -> Self::SimValue { + assert!(opaque.is_empty()); *self } - fn sim_value_clone_from_bits(&self, value: &mut Self::SimValue, bits: &BitSlice) { - assert!(bits.is_empty()); + fn sim_value_clone_from_opaque( + &self, + value: &mut Self::SimValue, + opaque: OpaqueSimValueSlice<'_>, + ) { + assert!(opaque.is_empty()); assert_eq!(*value, *self); } - fn sim_value_to_bits(&self, value: &Self::SimValue, bits: &mut BitSlice) { - assert!(bits.is_empty()); + fn sim_value_to_opaque<'w>( + &self, + value: &Self::SimValue, + writer: OpaqueSimValueWriter<'w>, + ) -> OpaqueSimValueWritten<'w> { assert_eq!(*value, *self); + writer.fill_cloned_from_slice(OpaqueSimValueSlice::empty()) } } diff --git a/crates/fayalite/src/reset.rs b/crates/fayalite/src/reset.rs index f3392a2..5dff278 100644 --- a/crates/fayalite/src/reset.rs +++ b/crates/fayalite/src/reset.rs @@ -5,9 +5,12 @@ use crate::{ expr::{Expr, ToExpr, ops}, int::{Bool, SInt, UInt}, source_location::SourceLocation, - ty::{CanonicalType, StaticType, Type, TypeProperties, impl_match_variant_as_self}, + ty::{ + CanonicalType, OpaqueSimValueSize, OpaqueSimValueSlice, OpaqueSimValueWriter, + OpaqueSimValueWritten, StaticType, Type, TypeProperties, impl_match_variant_as_self, + }, }; -use bitvec::slice::BitSlice; +use bitvec::{bits, order::Lsb0}; mod sealed { pub trait ResetTypeSealed {} @@ -69,19 +72,29 @@ macro_rules! reset_type { retval } - fn sim_value_from_bits(&self, bits: &BitSlice) -> Self::SimValue { - assert_eq!(bits.len(), 1); - bits[0] + fn sim_value_from_opaque(&self, opaque: OpaqueSimValueSlice<'_>) -> Self::SimValue { + assert_eq!(opaque.size(), OpaqueSimValueSize::from_bit_width(1)); + opaque.bits()[0] } - fn sim_value_clone_from_bits(&self, value: &mut Self::SimValue, bits: &BitSlice) { - assert_eq!(bits.len(), 1); - *value = bits[0]; + fn sim_value_clone_from_opaque( + &self, + value: &mut Self::SimValue, + opaque: OpaqueSimValueSlice<'_>, + ) { + assert_eq!(opaque.size(), OpaqueSimValueSize::from_bit_width(1)); + *value = opaque.bits()[0]; } - fn sim_value_to_bits(&self, value: &Self::SimValue, bits: &mut BitSlice) { - assert_eq!(bits.len(), 1); - bits.set(0, *value); + fn sim_value_to_opaque<'w>( + &self, + value: &Self::SimValue, + writer: OpaqueSimValueWriter<'w>, + ) -> OpaqueSimValueWritten<'w> { + assert_eq!(writer.size(), OpaqueSimValueSize::from_bit_width(1)); + writer.fill_cloned_from_slice(OpaqueSimValueSlice::from_bitslice( + [bits![0], bits![1]][*value as usize], + )) } } @@ -102,6 +115,7 @@ macro_rules! reset_type { is_storable: false, is_castable_from_bits: $is_castable_from_bits, bit_width: 1, + sim_only_values_len: 0, }; const MASK_TYPE_PROPERTIES: TypeProperties = Bool::TYPE_PROPERTIES; } diff --git a/crates/fayalite/src/sim.rs b/crates/fayalite/src/sim.rs index d91427f..4062d9a 100644 --- a/crates/fayalite/src/sim.rs +++ b/crates/fayalite/src/sim.rs @@ -26,7 +26,7 @@ use crate::{ TypeLen, }, time::{SimDuration, SimInstant}, - value::SimValue, + value::{DynSimOnlyValue, DynSimOnlyValueType, SimValue}, }, util::{BitSliceWriteWithBase, DebugAsDisplay, HashMap, HashSet}, }; @@ -503,6 +503,11 @@ pub trait TraceWriter: fmt::Debug + 'static { variant_index: usize, ty: Enum, ) -> Result<(), Self::Error>; + fn set_signal_sim_only_value( + &mut self, + id: TraceScalarId, + value: &DynSimOnlyValue, + ) -> Result<(), Self::Error>; } pub struct DynTraceWriterDecls(Box); @@ -557,6 +562,11 @@ trait TraceWriterDynTrait: fmt::Debug + 'static { variant_index: usize, ty: Enum, ) -> std::io::Result<()>; + fn set_signal_sim_only_value_dyn( + &mut self, + id: TraceScalarId, + value: &DynSimOnlyValue, + ) -> std::io::Result<()>; } impl TraceWriterDynTrait for T { @@ -616,6 +626,13 @@ impl TraceWriterDynTrait for T { .map_err(err_into_io)?, ) } + fn set_signal_sim_only_value_dyn( + &mut self, + id: TraceScalarId, + value: &DynSimOnlyValue, + ) -> std::io::Result<()> { + Ok(TraceWriter::set_signal_sim_only_value(self, id, value).map_err(err_into_io)?) + } } pub struct DynTraceWriter(Box); @@ -680,6 +697,13 @@ impl TraceWriter for DynTraceWriter { self.0 .set_signal_enum_discriminant_dyn(id, variant_index, ty) } + fn set_signal_sim_only_value( + &mut self, + id: TraceScalarId, + value: &DynSimOnlyValue, + ) -> Result<(), Self::Error> { + self.0.set_signal_sim_only_value_dyn(id, value) + } } #[derive(Debug)] @@ -844,6 +868,10 @@ pub(crate) enum SimTraceKind { index: StatePartIndex, ty: Enum, }, + SimOnlyValue { + index: StatePartIndex, + ty: DynSimOnlyValueType, + }, } impl SimTraceKind { @@ -866,6 +894,10 @@ impl SimTraceKind { SimTraceKind::EnumDiscriminant { index: _, ty } => { BitVec::repeat(false, ty.discriminant_bit_width()) } + SimTraceKind::SimOnlyValue { index: _, ty: _ } => { + // use all ones as the default initializer to help catch uninitialized indexes + BitVec::repeat(true, SmallUInt::BITS as usize) + } } } } @@ -1416,6 +1448,8 @@ struct SimulationImpl { clocks_triggered: Interned<[StatePartIndex]>, breakpoints: Option, generator_waker: std::task::Waker, + sim_only_values: Vec>, + free_sim_only_value_indexes: Vec, } impl fmt::Debug for SimulationImpl { @@ -1440,6 +1474,8 @@ impl SimulationImpl { clocks_triggered, breakpoints: _, generator_waker: _, + sim_only_values, + free_sim_only_value_indexes, } = self; f.debug_struct("Simulation") .field("state", state) @@ -1453,6 +1489,8 @@ impl SimulationImpl { .field("trace_writers", trace_writers) .field("instant", instant) .field("clocks_triggered", clocks_triggered) + .field("sim_only_values", &SliceAsMapDebug(sim_only_values)) + .field("free_sim_only_value_indexes", free_sim_only_value_indexes) .finish_non_exhaustive() } fn new(compiled: Compiled) -> Self { @@ -1516,6 +1554,8 @@ impl SimulationImpl { clocks_triggered: compiled.clocks_triggered, breakpoints: None, generator_waker: Arc::new(GeneratorWaker).into(), + sim_only_values: vec![], + free_sim_only_value_indexes: vec![], } } fn write_traces( @@ -1586,6 +1626,18 @@ impl SimulationImpl { ty, )?; } + SimTraceKind::SimOnlyValue { .. } => { + let mut sim_only_value_index = [0; mem::size_of::()]; + sim_only_value_index.view_bits_mut::()[0..state.len()] + .clone_from_bitslice(state); + let sim_only_value_index = SmallUInt::from_le_bytes(sim_only_value_index); + trace_writer.set_signal_sim_only_value( + id, + self.sim_only_values[sim_only_value_index as usize] + .as_ref() + .expect("should be Some"), + )?; + } } } Ok(trace_writer) @@ -1635,7 +1687,8 @@ impl SimulationImpl { } SimTraceKind::SmallUInt { index, ty: _ } | SimTraceKind::SmallSInt { index, ty: _ } - | SimTraceKind::EnumDiscriminant { index, ty: _ } => { + | SimTraceKind::EnumDiscriminant { index, ty: _ } + | SimTraceKind::SimOnlyValue { index, ty: _ } => { let bytes = self.state.small_slots[index].to_le_bytes(); let bitslice = BitSlice::::from_slice(&bytes); let bitslice = &bitslice[..state.len()]; @@ -2050,6 +2103,7 @@ impl SimulationImpl { CanonicalType::Reset(_) => false, CanonicalType::Clock(_) => false, CanonicalType::PhantomConst(_) => unreachable!(), + CanonicalType::DynSimOnlyValueType(_) => false, }; let bit_indexes = start_bit_index..start_bit_index + compiled_value.layout.ty.bit_width(); @@ -2409,6 +2463,21 @@ impl fmt::Debug for SortedMapDebug<'_, K, V> { } } +struct SliceAsMapDebug<'a, T>(&'a [Option]); + +impl fmt::Debug for SliceAsMapDebug<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_map() + .entries( + self.0 + .iter() + .enumerate() + .filter_map(|(k, v)| Some((k, v.as_ref()?))), + ) + .finish() + } +} + impl fmt::Debug for Simulation { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let Self { sim_impl, io } = self; diff --git a/crates/fayalite/src/sim/value.rs b/crates/fayalite/src/sim/value.rs index 70cb943..7214bb0 100644 --- a/crates/fayalite/src/sim/value.rs +++ b/crates/fayalite/src/sim/value.rs @@ -9,60 +9,82 @@ use crate::{ expr::{CastBitsTo, Expr, ToExpr}, int::{Bool, IntType, KnownSize, SInt, SIntType, SIntValue, Size, UInt, UIntType, UIntValue}, reset::{AsyncReset, Reset, SyncReset}, - ty::{CanonicalType, StaticType, Type}, + source_location::SourceLocation, + ty::{ + CanonicalType, OpaqueSimValue, OpaqueSimValueSize, OpaqueSimValueSlice, + OpaqueSimValueWriter, StaticType, Type, TypeProperties, impl_match_variant_as_self, + }, util::{ - ConstUsize, + ConstUsize, HashMap, alternating_cell::{AlternatingCell, AlternatingCellMethods}, }, }; use bitvec::{slice::BitSlice, vec::BitVec}; -use serde::{Deserialize, Deserializer, Serialize, Serializer}; +use hashbrown::hash_map::Entry; +use serde::{Deserialize, Deserializer, Serialize, Serializer, de::Error as _, ser::Error as _}; use std::{ - fmt, + borrow::Cow, + fmt::{self, Write}, + hash::{BuildHasher, Hash, Hasher, RandomState}, ops::{Deref, DerefMut}, - sync::Arc, + sync::{Arc, Mutex}, +}; + +pub(crate) mod sim_only_value_unsafe; + +pub use sim_only_value_unsafe::{ + DynSimOnlyValue, DynSimOnlyValueType, SimOnlyValue, SimOnlyValueTrait, SimOnlyValueType, }; #[derive(Copy, Clone, Eq, PartialEq)] enum ValidFlags { BothValid = 0, OnlyValueValid = 1, - OnlyBitsValid = 2, + OnlyOpaqueValid = 2, } #[derive(Clone)] struct SimValueInner { value: T::SimValue, - bits: UIntValue, + opaque: OpaqueSimValue, valid_flags: ValidFlags, ty: T, + sim_only_values_len: usize, } impl SimValueInner { - fn fill_bits(&mut self) { + fn size(&self) -> OpaqueSimValueSize { + OpaqueSimValueSize { + bit_width: self.opaque.bit_width(), + sim_only_values_len: self.sim_only_values_len, + } + } + fn fill_opaque(&mut self) { match self.valid_flags { - ValidFlags::BothValid | ValidFlags::OnlyBitsValid => {} + ValidFlags::BothValid | ValidFlags::OnlyOpaqueValid => {} ValidFlags::OnlyValueValid => { - self.ty.sim_value_to_bits(&self.value, self.bits.bits_mut()); + OpaqueSimValueWriter::rewrite_with(self.size(), &mut self.opaque, |writer| { + self.ty.sim_value_to_opaque(&self.value, writer) + }); self.valid_flags = ValidFlags::BothValid; } } } - fn into_bits(mut self) -> UIntValue { - self.fill_bits(); - self.bits + fn into_opaque(mut self) -> OpaqueSimValue { + self.fill_opaque(); + self.opaque } - fn bits_mut(&mut self) -> &mut UIntValue { - self.fill_bits(); - self.valid_flags = ValidFlags::OnlyBitsValid; - &mut self.bits + fn opaque_mut(&mut self) -> &mut OpaqueSimValue { + self.fill_opaque(); + self.valid_flags = ValidFlags::OnlyOpaqueValid; + &mut self.opaque } fn fill_value(&mut self) { match self.valid_flags { ValidFlags::BothValid | ValidFlags::OnlyValueValid => {} - ValidFlags::OnlyBitsValid => { + ValidFlags::OnlyOpaqueValid => { self.ty - .sim_value_clone_from_bits(&mut self.value, self.bits.bits()); + .sim_value_clone_from_opaque(&mut self.value, self.opaque.as_slice()); self.valid_flags = ValidFlags::BothValid; } } @@ -83,11 +105,13 @@ impl AlternatingCellMethods for SimValueInner { match self.valid_flags { ValidFlags::BothValid => return, ValidFlags::OnlyValueValid => { - self.ty.sim_value_to_bits(&self.value, self.bits.bits_mut()) + OpaqueSimValueWriter::rewrite_with(self.size(), &mut self.opaque, |writer| { + self.ty.sim_value_to_opaque(&self.value, writer) + }) } - ValidFlags::OnlyBitsValid => self + ValidFlags::OnlyOpaqueValid => self .ty - .sim_value_clone_from_bits(&mut self.value, self.bits.bits()), + .sim_value_clone_from_opaque(&mut self.value, self.opaque.as_slice()), } self.valid_flags = ValidFlags::BothValid; } @@ -143,13 +167,15 @@ impl Clone for SimValue { impl SimValue { #[track_caller] - pub fn from_bits(ty: T, bits: UIntValue) -> Self { - assert_eq!(ty.canonical().bit_width(), bits.width()); + pub fn from_opaque(ty: T, opaque: OpaqueSimValue) -> Self { + let size = ty.canonical().size(); + assert_eq!(size, opaque.size()); let inner = SimValueInner { - value: ty.sim_value_from_bits(bits.bits()), - bits, + value: ty.sim_value_from_opaque(opaque.as_slice()), + opaque, valid_flags: ValidFlags::BothValid, ty, + sim_only_values_len: size.sim_only_values_len, }; Self { inner: AlternatingCell::new_shared(inner), @@ -157,14 +183,30 @@ impl SimValue { } #[track_caller] pub fn from_bitslice(ty: T, bits: &BitSlice) -> Self { - Self::from_bits(ty, UIntValue::new(Arc::new(bits.to_bitvec()))) + Self::from_bitslice_and_sim_only_values(ty, bits, Vec::new()) + } + #[track_caller] + pub fn from_bitslice_and_sim_only_values( + ty: T, + bits: &BitSlice, + sim_only_values: Vec, + ) -> Self { + Self::from_opaque( + ty, + OpaqueSimValue::from_bitslice_and_sim_only_values(bits, sim_only_values), + ) } pub fn from_value(ty: T, value: T::SimValue) -> Self { + let type_properties = ty.canonical().type_properties(); let inner = SimValueInner { - bits: UIntValue::new_dyn(Arc::new(BitVec::repeat(false, ty.canonical().bit_width()))), + opaque: OpaqueSimValue::from_bits_and_sim_only_values( + UIntValue::new_dyn(Arc::new(BitVec::repeat(false, type_properties.bit_width))), + Vec::with_capacity(type_properties.sim_only_values_len), + ), value, valid_flags: ValidFlags::OnlyValueValid, ty, + sim_only_values_len: type_properties.sim_only_values_len, }; Self { inner: AlternatingCell::new_unique(inner), @@ -173,18 +215,24 @@ impl SimValue { pub fn ty(this: &Self) -> T { this.inner.share().ty } - pub fn into_bits(this: Self) -> UIntValue { - this.inner.into_inner().into_bits() + pub fn into_opaque(this: Self) -> OpaqueSimValue { + this.inner.into_inner().into_opaque() } - pub fn into_ty_and_bits(this: Self) -> (T, UIntValue) { + pub fn into_ty_and_opaque(this: Self) -> (T, OpaqueSimValue) { let inner = this.inner.into_inner(); - (inner.ty, inner.into_bits()) + (inner.ty, inner.into_opaque()) + } + pub fn opaque(this: &Self) -> &OpaqueSimValue { + &this.inner.share().opaque + } + pub fn opaque_mut(this: &mut Self) -> &mut OpaqueSimValue { + &mut this.inner.unique().opaque } pub fn bits(this: &Self) -> &UIntValue { - &this.inner.share().bits + Self::opaque(this).bits() } pub fn bits_mut(this: &mut Self) -> &mut UIntValue { - this.inner.unique().bits_mut() + Self::opaque_mut(this).bits_mut() } pub fn into_value(this: Self) -> T::SimValue { this.inner.into_inner().into_value() @@ -197,59 +245,59 @@ impl SimValue { } #[track_caller] pub fn from_canonical(v: SimValue) -> Self { - let (ty, bits) = SimValue::into_ty_and_bits(v); - Self::from_bits(T::from_canonical(ty), bits) + let (ty, opaque) = SimValue::into_ty_and_opaque(v); + Self::from_opaque(T::from_canonical(ty), opaque) } pub fn into_canonical(this: Self) -> SimValue { - let (ty, bits) = Self::into_ty_and_bits(this); - SimValue::from_bits(ty.canonical(), bits) + let (ty, opaque) = Self::into_ty_and_opaque(this); + SimValue::from_opaque(ty.canonical(), opaque) } pub fn canonical(this: &Self) -> SimValue { - SimValue::from_bits(Self::ty(this).canonical(), Self::bits(this).clone()) + SimValue::from_opaque(Self::ty(this).canonical(), Self::opaque(this).clone()) } #[track_caller] pub fn from_dyn_int(v: SimValue) -> Self where T: IntType, { - let (ty, bits) = SimValue::into_ty_and_bits(v); - SimValue::from_bits(T::from_dyn_int(ty), bits) + let (ty, opaque) = SimValue::into_ty_and_opaque(v); + SimValue::from_opaque(T::from_dyn_int(ty), opaque) } pub fn into_dyn_int(this: Self) -> SimValue where T: IntType, { - let (ty, bits) = Self::into_ty_and_bits(this); - SimValue::from_bits(ty.as_dyn_int(), bits) + let (ty, opaque) = Self::into_ty_and_opaque(this); + SimValue::from_opaque(ty.as_dyn_int(), opaque) } pub fn to_dyn_int(this: &Self) -> SimValue where T: IntType, { - SimValue::from_bits(Self::ty(this).as_dyn_int(), Self::bits(&this).clone()) + SimValue::from_opaque(Self::ty(this).as_dyn_int(), Self::opaque(&this).clone()) } #[track_caller] pub fn from_bundle(v: SimValue) -> Self where T: BundleType, { - let (ty, bits) = SimValue::into_ty_and_bits(v); - SimValue::from_bits(T::from_canonical(CanonicalType::Bundle(ty)), bits) + let (ty, opaque) = SimValue::into_ty_and_opaque(v); + SimValue::from_opaque(T::from_canonical(CanonicalType::Bundle(ty)), opaque) } pub fn into_bundle(this: Self) -> SimValue where T: BundleType, { - let (ty, bits) = Self::into_ty_and_bits(this); - SimValue::from_bits(Bundle::from_canonical(ty.canonical()), bits) + let (ty, opaque) = Self::into_ty_and_opaque(this); + SimValue::from_opaque(Bundle::from_canonical(ty.canonical()), opaque) } pub fn to_bundle(this: &Self) -> SimValue where T: BundleType, { - SimValue::from_bits( + SimValue::from_opaque( Bundle::from_canonical(Self::ty(this).canonical()), - Self::bits(&this).clone(), + Self::opaque(&this).clone(), ) } #[track_caller] @@ -257,23 +305,23 @@ impl SimValue { where T: EnumType, { - let (ty, bits) = SimValue::into_ty_and_bits(v); - SimValue::from_bits(T::from_canonical(CanonicalType::Enum(ty)), bits) + let (ty, opaque) = SimValue::into_ty_and_opaque(v); + SimValue::from_opaque(T::from_canonical(CanonicalType::Enum(ty)), opaque) } pub fn into_enum(this: Self) -> SimValue where T: EnumType, { - let (ty, bits) = Self::into_ty_and_bits(this); - SimValue::from_bits(Enum::from_canonical(ty.canonical()), bits) + let (ty, opaque) = Self::into_ty_and_opaque(this); + SimValue::from_opaque(Enum::from_canonical(ty.canonical()), opaque) } pub fn to_enum(this: &Self) -> SimValue where T: EnumType, { - SimValue::from_bits( + SimValue::from_opaque( Enum::from_canonical(Self::ty(this).canonical()), - Self::bits(&this).clone(), + Self::opaque(&this).clone(), ) } } @@ -308,7 +356,11 @@ impl ToExpr for SimValue { #[track_caller] fn to_expr(&self) -> Expr { let inner = self.inner.share(); - inner.bits.cast_bits_to(inner.ty) + assert_eq!( + inner.sim_only_values_len, 0, + "can't convert sim-only values to Expr" + ); + inner.opaque.bits().cast_bits_to(inner.ty) } } @@ -443,12 +495,15 @@ impl ToSimValueWithType for BitVec { #[track_caller] fn arc_into_sim_value_with_type(self: Arc, ty: T) -> SimValue { - SimValue::from_bits(ty, UIntValue::new_dyn(self)) + SimValue::from_opaque(ty, OpaqueSimValue::from_bits(UIntValue::new_dyn(self))) } #[track_caller] fn arc_to_sim_value_with_type(self: &Arc, ty: T) -> SimValue { - SimValue::from_bits(ty, UIntValue::new_dyn(self.clone())) + SimValue::from_opaque( + ty, + OpaqueSimValue::from_bits(UIntValue::new_dyn(self.clone())), + ) } } @@ -792,16 +847,18 @@ impl ToSimValueWithType for bool { | CanonicalType::Array(_) | CanonicalType::Enum(_) | CanonicalType::Bundle(_) - | CanonicalType::PhantomConst(_) => { + | CanonicalType::PhantomConst(_) + | CanonicalType::DynSimOnlyValueType(_) => { panic!("can't create SimValue from bool: expected value of type: {ty:?}"); } CanonicalType::Bool(_) | CanonicalType::AsyncReset(_) | CanonicalType::SyncReset(_) | CanonicalType::Reset(_) - | CanonicalType::Clock(_) => { - SimValue::from_bits(ty, UIntValue::new(Arc::new(BitVec::repeat(*self, 1)))) - } + | CanonicalType::Clock(_) => SimValue::from_opaque( + ty, + OpaqueSimValue::from_bits(UIntValue::new(Arc::new(BitVec::repeat(*self, 1)))), + ), } } } @@ -911,3 +968,330 @@ macro_rules! impl_to_sim_value_for_int_value { impl_to_sim_value_for_int_value!(UIntValue, UInt, UIntType); impl_to_sim_value_for_int_value!(SIntValue, SInt, SIntType); + +#[derive(Default)] +struct DynSimOnlyValueTypeSerdeTableRest { + from_serde: HashMap, + serde_id_random_state: RandomState, + buffer: String, +} + +impl DynSimOnlyValueTypeSerdeTableRest { + #[cold] + fn add_new(&mut self, ty: DynSimOnlyValueType) -> DynSimOnlyValueTypeSerdeId { + let mut try_number = 0u64; + let mut hasher = self.serde_id_random_state.build_hasher(); + // extract more bits of randomness from TypeId -- its Hash impl only hashes 64-bits + write!(self.buffer, "{:?}", ty.type_id()).expect("shouldn't ever fail"); + self.buffer.hash(&mut hasher); + loop { + let mut hasher = hasher.clone(); + try_number.hash(&mut hasher); + try_number += 1; + let retval = DynSimOnlyValueTypeSerdeId(std::array::from_fn(|i| { + let mut hasher = hasher.clone(); + i.hash(&mut hasher); + hasher.finish() as u32 + })); + match self.from_serde.entry(retval) { + Entry::Occupied(_) => continue, + Entry::Vacant(e) => { + e.insert(ty); + return retval; + } + } + } + } +} + +#[derive(Default)] +struct DynSimOnlyValueTypeSerdeTable { + to_serde: HashMap, + rest: DynSimOnlyValueTypeSerdeTableRest, +} + +static DYN_SIM_ONLY_VALUE_TYPE_SERDE_TABLE: Mutex> = + Mutex::new(None); + +#[derive(Copy, Clone, Eq, PartialEq, Hash, Debug, Serialize, Deserialize)] +#[serde(transparent)] +struct DynSimOnlyValueTypeSerdeId([u32; 4]); + +impl From for DynSimOnlyValueTypeSerdeId { + fn from(ty: DynSimOnlyValueType) -> Self { + let mut locked = DYN_SIM_ONLY_VALUE_TYPE_SERDE_TABLE + .lock() + .expect("shouldn't be poison"); + let DynSimOnlyValueTypeSerdeTable { to_serde, rest } = locked.get_or_insert_default(); + match to_serde.entry(ty) { + Entry::Occupied(occupied_entry) => *occupied_entry.get(), + Entry::Vacant(vacant_entry) => *vacant_entry.insert(rest.add_new(ty)), + } + } +} + +impl DynSimOnlyValueTypeSerdeId { + fn ty(self) -> Option { + let locked = DYN_SIM_ONLY_VALUE_TYPE_SERDE_TABLE + .lock() + .expect("shouldn't be poison"); + Some(*locked.as_ref()?.rest.from_serde.get(&self)?) + } +} + +#[derive(Clone, Eq, PartialEq, Hash, Debug, Serialize, Deserialize)] +struct DynSimOnlyValueTypeSerde<'a> { + random_id: DynSimOnlyValueTypeSerdeId, + #[serde(borrow)] + type_name: Cow<'a, str>, +} + +impl Serialize for DynSimOnlyValueType { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + DynSimOnlyValueTypeSerde { + random_id: (*self).into(), + type_name: Cow::Borrowed(self.type_name()), + } + .serialize(serializer) + } +} + +impl<'de> Deserialize<'de> for DynSimOnlyValueType { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let deserialized = DynSimOnlyValueTypeSerde::deserialize(deserializer)?; + let retval = deserialized + .random_id + .ty() + .filter(|ty| ty.type_name() == deserialized.type_name); + retval.ok_or_else(|| D::Error::custom("doesn't match any DynSimOnlyValueType that was serialized this time this program was run")) + } +} + +impl DynSimOnlyValueType { + pub const fn type_properties(self) -> TypeProperties { + TypeProperties { + is_passive: true, + is_storable: true, + is_castable_from_bits: false, + bit_width: 0, + sim_only_values_len: 1, + } + } + pub fn can_connect(self, other: Self) -> bool { + self == other + } +} + +impl Type for DynSimOnlyValueType { + type BaseType = DynSimOnlyValueType; + type MaskType = Bool; + type SimValue = DynSimOnlyValue; + + impl_match_variant_as_self!(); + + fn mask_type(&self) -> Self::MaskType { + Bool + } + + fn canonical(&self) -> CanonicalType { + CanonicalType::DynSimOnlyValueType(*self) + } + + fn from_canonical(canonical_type: CanonicalType) -> Self { + let CanonicalType::DynSimOnlyValueType(v) = canonical_type else { + panic!("expected DynSimOnlyValueType"); + }; + v + } + + fn source_location() -> SourceLocation { + SourceLocation::builtin() + } + + fn sim_value_from_opaque(&self, opaque: OpaqueSimValueSlice<'_>) -> Self::SimValue { + assert_eq!(opaque.size(), self.type_properties().size()); + opaque.sim_only_values()[0].clone() + } + + fn sim_value_clone_from_opaque( + &self, + value: &mut Self::SimValue, + opaque: OpaqueSimValueSlice<'_>, + ) { + assert_eq!(opaque.size(), self.type_properties().size()); + value.clone_from(&opaque.sim_only_values()[0]); + } + + fn sim_value_to_opaque<'w>( + &self, + value: &Self::SimValue, + writer: OpaqueSimValueWriter<'w>, + ) -> crate::ty::OpaqueSimValueWritten<'w> { + writer.fill_cloned_from_slice(OpaqueSimValueSlice::from_parts( + BitSlice::empty(), + std::array::from_ref(value), + )) + } +} + +impl Type for SimOnlyValueType { + type BaseType = DynSimOnlyValueType; + type MaskType = Bool; + type SimValue = SimOnlyValue; + + impl_match_variant_as_self!(); + + fn mask_type(&self) -> Self::MaskType { + Bool + } + + fn canonical(&self) -> CanonicalType { + DynSimOnlyValueType::from(*self).canonical() + } + + fn from_canonical(canonical_type: CanonicalType) -> Self { + DynSimOnlyValueType::from_canonical(canonical_type) + .downcast() + .expect("got wrong SimOnlyValueType") + } + + fn source_location() -> SourceLocation { + SourceLocation::builtin() + } + + fn sim_value_from_opaque(&self, opaque: OpaqueSimValueSlice<'_>) -> Self::SimValue { + assert_eq!(Self::TYPE_PROPERTIES.size(), opaque.size()); + SimOnlyValue::new( + opaque.sim_only_values()[0] + .downcast_ref::() + .expect("type mismatch") + .clone(), + ) + } + + fn sim_value_clone_from_opaque( + &self, + value: &mut Self::SimValue, + opaque: OpaqueSimValueSlice<'_>, + ) { + assert_eq!(Self::TYPE_PROPERTIES.size(), opaque.size()); + (**value).clone_from( + &opaque.sim_only_values()[0] + .downcast_ref::() + .expect("type mismatch"), + ) + } + + fn sim_value_to_opaque<'w>( + &self, + value: &Self::SimValue, + writer: OpaqueSimValueWriter<'w>, + ) -> crate::ty::OpaqueSimValueWritten<'w> { + SimOnlyValue::with_dyn_ref(value, |value| { + writer.fill_cloned_from_slice(OpaqueSimValueSlice::from_parts( + BitSlice::empty(), + std::array::from_ref(value), + )) + }) + } +} + +impl StaticType for SimOnlyValueType { + const TYPE: Self = Self::new(); + + const MASK_TYPE: Self::MaskType = Bool; + + const TYPE_PROPERTIES: TypeProperties = DynSimOnlyValueType::of::().type_properties(); + + const MASK_TYPE_PROPERTIES: TypeProperties = Bool::TYPE_PROPERTIES; +} + +impl fmt::Debug for SimOnlyValue { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + Self::with_dyn_ref(self, |this| fmt::Debug::fmt(this, f)) + } +} + +#[derive(Serialize, Deserialize)] +#[serde(rename = "SimOnlyValue")] +struct SerdeSimOnlyValue<'a> { + ty: DynSimOnlyValueType, + #[serde(borrow)] + value: Cow<'a, str>, +} + +impl Serialize for DynSimOnlyValue { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + SerdeSimOnlyValue { + ty: self.ty(), + value: Cow::Owned(self.serialize_to_json_string().map_err(S::Error::custom)?), + } + .serialize(serializer) + } +} + +impl<'de> Deserialize<'de> for DynSimOnlyValue { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let SerdeSimOnlyValue { ty, value } = Deserialize::deserialize(deserializer)?; + ty.deserialize_from_json_string(&value) + .map_err(D::Error::custom) + } +} + +impl ToSimValueWithType for DynSimOnlyValue { + #[track_caller] + fn to_sim_value_with_type(&self, ty: DynSimOnlyValueType) -> SimValue { + assert_eq!(self.ty(), ty, "mismatched type"); + SimValue::from_value(ty, self.clone()) + } + #[track_caller] + fn into_sim_value_with_type(self, ty: DynSimOnlyValueType) -> SimValue { + assert_eq!(self.ty(), ty, "mismatched type"); + SimValue::from_value(ty, self) + } +} + +impl ToSimValueWithType> for SimOnlyValue { + fn to_sim_value_with_type(&self, ty: SimOnlyValueType) -> SimValue> { + SimValue::from_value(ty, self.clone()) + } + fn into_sim_value_with_type(self, ty: SimOnlyValueType) -> SimValue> { + SimValue::from_value(ty, self) + } +} + +impl ToSimValue for DynSimOnlyValue { + type Type = DynSimOnlyValueType; + + fn to_sim_value(&self) -> SimValue { + SimValue::from_value(self.ty(), self.clone()) + } + + fn into_sim_value(self) -> SimValue { + SimValue::from_value(self.ty(), self) + } +} + +impl ToSimValue for SimOnlyValue { + type Type = SimOnlyValueType; + + fn to_sim_value(&self) -> SimValue { + SimValue::from_value(Default::default(), self.clone()) + } + + fn into_sim_value(self) -> SimValue { + SimValue::from_value(Default::default(), self) + } +} diff --git a/crates/fayalite/src/sim/value/sim_only_value_unsafe.rs b/crates/fayalite/src/sim/value/sim_only_value_unsafe.rs new file mode 100644 index 0000000..59134c1 --- /dev/null +++ b/crates/fayalite/src/sim/value/sim_only_value_unsafe.rs @@ -0,0 +1,357 @@ +// SPDX-License-Identifier: LGPL-3.0-or-later +// See Notices.txt for copyright information + +//! `unsafe` parts of [`DynSimOnlyValue`] + +use serde::{Serialize, de::DeserializeOwned}; +use std::{ + alloc::{Layout, alloc, dealloc, handle_alloc_error}, + any::TypeId, + fmt, + hash::{Hash, Hasher}, + marker::PhantomData, + mem::ManuallyDrop, + ptr::{self, NonNull}, +}; + +struct SimOnlyValueVTable { + layout: Layout, + // TODO: replace with TypeId once TypeId::of is const-stable + type_id: fn() -> TypeId, + type_name: fn() -> &'static str, + drop_in_place: unsafe fn(this: NonNull<()>), + eq: unsafe fn(this: NonNull<()>, other: NonNull<()>) -> bool, + hash: unsafe fn(this: NonNull<()>, hasher: &mut dyn Hasher), + debug_fmt: unsafe fn(this: NonNull<()>, f: &mut fmt::Formatter<'_>) -> fmt::Result, + serialize_to_json_string: unsafe fn(this: NonNull<()>) -> serde_json::Result, + deserialize_into_uninit_from_json_string: + unsafe fn(this: NonNull<()>, json_str: &str) -> serde_json::Result<()>, + clone_into_uninit: unsafe fn(target: NonNull<()>, src: NonNull<()>), + clone_from: unsafe fn(this: NonNull<()>, src: NonNull<()>), +} + +pub trait SimOnlyValueTrait: + 'static + Eq + Hash + fmt::Debug + Serialize + DeserializeOwned + Clone +{ +} + +impl SimOnlyValueTrait + for T +{ +} + +unsafe trait GetSimOnlyValueVTable: SimOnlyValueTrait { + const VTABLE: &'static SimOnlyValueVTable; +} + +unsafe impl GetSimOnlyValueVTable for T { + const VTABLE: &'static SimOnlyValueVTable = &SimOnlyValueVTable { + layout: Layout::new::(), + type_id: TypeId::of::, + type_name: std::any::type_name::, + drop_in_place: |this| unsafe { + this.cast::().drop_in_place(); + }, + eq: |this, other| unsafe { this.cast::().as_ref() == other.cast::().as_ref() }, + hash: |this, mut hasher| unsafe { this.cast::().as_ref().hash(&mut hasher) }, + debug_fmt: |this, f| unsafe { fmt::Debug::fmt(this.cast::().as_ref(), f) }, + serialize_to_json_string: |this| unsafe { + serde_json::to_string(this.cast::().as_ref()) + }, + deserialize_into_uninit_from_json_string: |this, json_str| unsafe { + serde_json::from_str(json_str).map(|v| this.cast::().write(v)) + }, + clone_into_uninit: |target, src| unsafe { + target + .cast::() + .write(Clone::clone(src.cast::().as_ref())); + }, + clone_from: |this, src| unsafe { + Clone::clone_from(this.cast::().as_mut(), src.cast::().as_ref()); + }, + }; +} + +#[derive(Copy, Clone)] +pub struct DynSimOnlyValueType { + vtable: &'static SimOnlyValueVTable, +} + +struct DynSimOnlyValueUninit { + ty: DynSimOnlyValueType, + value: NonNull<()>, +} + +impl DynSimOnlyValueUninit { + fn new(ty: DynSimOnlyValueType) -> Self { + let layout = ty.vtable.layout; + let value = if layout.size() == 0 { + ptr::without_provenance_mut(layout.align()) + } else { + unsafe { alloc(layout).cast() } + }; + let Some(value) = NonNull::new(value) else { + handle_alloc_error(layout) + }; + Self { ty, value } + } + unsafe fn assume_init(self) -> DynSimOnlyValue { + let this = ManuallyDrop::new(self); + DynSimOnlyValue { + ty: this.ty, + value: this.value, + } + } +} + +impl Drop for DynSimOnlyValueUninit { + fn drop(&mut self) { + let layout = self.ty.vtable.layout; + if layout.size() != 0 { + unsafe { + dealloc(self.value.as_ptr().cast(), layout); + } + } + } +} + +impl DynSimOnlyValueType { + pub const fn of() -> Self { + Self { + vtable: ::VTABLE, + } + } + pub fn type_id(self) -> TypeId { + (self.vtable.type_id)() + } + pub fn type_name(self) -> &'static str { + (self.vtable.type_name)() + } + pub fn is(self) -> bool { + self.type_id() == TypeId::of::() + } + pub fn downcast(self) -> Option> { + self.is::().then_some(SimOnlyValueType::default()) + } + pub fn deserialize_from_json_string( + self, + json_str: &str, + ) -> serde_json::Result { + let retval = DynSimOnlyValueUninit::new(self); + unsafe { + (self.vtable.deserialize_into_uninit_from_json_string)(retval.value, json_str)?; + Ok(retval.assume_init()) + } + } +} + +impl PartialEq for DynSimOnlyValueType { + fn eq(&self, other: &Self) -> bool { + if ptr::eq(self.vtable, other.vtable) { + true + } else if self.vtable.layout != other.vtable.layout { + false + } else { + (self.vtable.type_id)() == (other.vtable.type_id)() + } + } +} + +impl Eq for DynSimOnlyValueType {} + +impl Hash for DynSimOnlyValueType { + fn hash(&self, state: &mut H) { + (self.vtable.type_id)().hash(state); + } +} + +impl fmt::Debug for DynSimOnlyValueType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "SimOnlyValueType<{}>", (self.vtable.type_name)()) + } +} + +impl From> for DynSimOnlyValueType { + fn from(value: SimOnlyValueType) -> Self { + let SimOnlyValueType(PhantomData) = value; + Self { + vtable: ::VTABLE, + } + } +} + +#[derive(Clone, Eq, PartialEq, Hash, Debug)] +pub struct SimOnlyValueType(PhantomData T>); + +impl SimOnlyValueType { + pub const fn new() -> Self { + Self(PhantomData) + } +} + +impl Copy for SimOnlyValueType {} + +impl Default for SimOnlyValueType { + fn default() -> Self { + Self::new() + } +} + +#[derive(Clone, Eq, PartialEq, Hash, Default, PartialOrd, Ord)] +pub struct SimOnlyValue(Box); + +impl SimOnlyValue { + pub fn with_dyn_ref R, R>(&self, f: F) -> R { + let dyn_ref = ManuallyDrop::new(DynSimOnlyValue { + ty: SimOnlyValueType::::default().into(), + value: NonNull::::from_ref(&self.0).cast(), + }); + f(&dyn_ref) + } + pub fn from_box(v: Box) -> Self { + Self(v) + } + pub fn new(v: T) -> Self { + Self(Box::new(v)) + } + pub fn into_inner(this: Self) -> T { + *this.0 + } + pub fn into_inner_box(this: Self) -> Box { + this.0 + } + pub fn into_dyn(this: Self) -> DynSimOnlyValue { + DynSimOnlyValue::from(this) + } +} + +impl std::ops::Deref for SimOnlyValue { + type Target = T; + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl std::ops::DerefMut for SimOnlyValue { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +pub struct DynSimOnlyValue { + ty: DynSimOnlyValueType, + value: NonNull<()>, +} + +struct DebugDynSimOnlyValueInner<'a>(&'a DynSimOnlyValue); + +impl<'a> fmt::Debug for DebugDynSimOnlyValueInner<'a> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + unsafe { (self.0.ty.vtable.debug_fmt)(self.0.value, f) } + } +} + +impl fmt::Debug for DynSimOnlyValue { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "SimOnlyValue<{}>", self.ty.type_name())?; + f.debug_tuple("") + .field(&DebugDynSimOnlyValueInner(self)) + .finish() + } +} + +impl PartialEq for DynSimOnlyValue { + fn eq(&self, other: &Self) -> bool { + self.ty == other.ty && unsafe { (self.ty.vtable.eq)(self.value, other.value) } + } +} + +impl Eq for DynSimOnlyValue {} + +impl Hash for DynSimOnlyValue { + fn hash(&self, state: &mut H) { + self.ty.hash(state); + unsafe { (self.ty.vtable.hash)(self.value, state) }; + } +} + +impl Clone for DynSimOnlyValue { + fn clone(&self) -> Self { + let retval = DynSimOnlyValueUninit::new(self.ty); + unsafe { + (self.ty.vtable.clone_into_uninit)(retval.value, self.value); + retval.assume_init() + } + } + fn clone_from(&mut self, source: &Self) { + if self.ty == source.ty { + unsafe { (self.ty.vtable.clone_from)(self.value, source.value) }; + } else { + *self = source.clone(); + } + } +} + +impl Drop for DynSimOnlyValue { + fn drop(&mut self) { + unsafe { + ptr::read(self).drop_in_place_and_keep_alloc(); + } + } +} + +impl From> for DynSimOnlyValue { + fn from(value: SimOnlyValue) -> Self { + unsafe { + Self { + ty: SimOnlyValueType::::default().into(), + value: NonNull::new_unchecked(Box::into_raw(value.0)).cast::<()>(), + } + } + } +} + +impl DynSimOnlyValue { + pub fn ty(&self) -> DynSimOnlyValueType { + self.ty + } + pub fn type_id(&self) -> TypeId { + self.ty.type_id() + } + pub fn is(&self) -> bool { + self.ty.is::() + } + pub fn downcast(self) -> Result, DynSimOnlyValue> { + let Some(_) = self.ty.downcast::() else { + return Err(self); + }; + Ok(SimOnlyValue(unsafe { + Box::from_raw(ManuallyDrop::new(self).value.as_ptr().cast::()) + })) + } + pub fn downcast_ref(&self) -> Option<&T> { + self.ty + .downcast::() + .map(|_| unsafe { &*self.value.as_ptr().cast::() }) + } + pub fn downcast_mut(&mut self) -> Option<&mut T> { + self.ty + .downcast::() + .map(|_| unsafe { &mut *self.value.as_ptr().cast::() }) + } + pub fn serialize_to_json_string(&self) -> serde_json::Result { + unsafe { (self.ty.vtable.serialize_to_json_string)(self.value) } + } + fn forget_and_keep_alloc(self) -> DynSimOnlyValueUninit { + let this = ManuallyDrop::new(self); + DynSimOnlyValueUninit { + ty: this.ty, + value: this.value, + } + } + fn drop_in_place_and_keep_alloc(self) -> DynSimOnlyValueUninit { + let retval = self.forget_and_keep_alloc(); + unsafe { (retval.ty.vtable.drop_in_place)(retval.value) }; + retval + } +} diff --git a/crates/fayalite/src/sim/vcd.rs b/crates/fayalite/src/sim/vcd.rs index 4a2b564..8918db0 100644 --- a/crates/fayalite/src/sim/vcd.rs +++ b/crates/fayalite/src/sim/vcd.rs @@ -13,6 +13,7 @@ use crate::{ TraceModuleIO, TraceReg, TraceSInt, TraceScalar, TraceScalarId, TraceScope, TraceSyncReset, TraceUInt, TraceWire, TraceWriter, TraceWriterDecls, time::{SimDuration, SimInstant}, + value::DynSimOnlyValue, }, util::HashMap, }; @@ -1061,6 +1062,14 @@ impl TraceWriter for VcdWriter { ) -> Result<(), Self::Error> { write_enum_discriminant_value_change(&mut self.writer, variant_index, ty, id.as_usize()) } + + fn set_signal_sim_only_value( + &mut self, + id: TraceScalarId, + value: &DynSimOnlyValue, + ) -> Result<(), Self::Error> { + write_string_value_change(&mut self.writer, format_args!("{value:?}"), id) + } } impl fmt::Debug for VcdWriter { diff --git a/crates/fayalite/src/ty.rs b/crates/fayalite/src/ty.rs index 787869d..90b39e8 100644 --- a/crates/fayalite/src/ty.rs +++ b/crates/fayalite/src/ty.rs @@ -11,13 +11,21 @@ use crate::{ intern::{Intern, Interned}, phantom_const::PhantomConst, reset::{AsyncReset, Reset, SyncReset}, - sim::value::{SimValue, ToSimValueWithType}, + sim::value::{DynSimOnlyValue, DynSimOnlyValueType, SimValue, ToSimValueWithType}, source_location::SourceLocation, - util::ConstUsize, + util::{ConstUsize, slice_range, try_slice_range}, }; -use bitvec::slice::BitSlice; +use bitvec::{slice::BitSlice, vec::BitVec}; use serde::{Deserialize, Deserializer, Serialize, Serializer, de::DeserializeOwned}; -use std::{fmt, hash::Hash, iter::FusedIterator, ops::Index, sync::Arc}; +use std::{ + fmt, + hash::Hash, + iter::{FusedIterator, Sum}, + marker::PhantomData, + mem, + ops::{Add, AddAssign, Bound, Index, Mul, MulAssign, Range, Sub, SubAssign}, + sync::Arc, +}; pub(crate) mod serde_impls; @@ -28,6 +36,23 @@ pub struct TypeProperties { pub is_storable: bool, pub is_castable_from_bits: bool, pub bit_width: usize, + pub sim_only_values_len: usize, +} + +impl TypeProperties { + pub const fn size(self) -> OpaqueSimValueSize { + let Self { + is_passive: _, + is_storable: _, + is_castable_from_bits: _, + bit_width, + sim_only_values_len, + } = self; + OpaqueSimValueSize { + bit_width, + sim_only_values_len, + } + } } #[derive(Copy, Clone, Hash, PartialEq, Eq)] @@ -43,6 +68,7 @@ pub enum CanonicalType { Reset(Reset), Clock(Clock), PhantomConst(PhantomConst), + DynSimOnlyValueType(DynSimOnlyValueType), } impl fmt::Debug for CanonicalType { @@ -59,6 +85,7 @@ impl fmt::Debug for CanonicalType { Self::Reset(v) => v.fmt(f), Self::Clock(v) => v.fmt(f), Self::PhantomConst(v) => v.fmt(f), + Self::DynSimOnlyValueType(v) => v.fmt(f), } } } @@ -95,6 +122,7 @@ impl CanonicalType { CanonicalType::Reset(v) => v.type_properties(), CanonicalType::Clock(v) => v.type_properties(), CanonicalType::PhantomConst(v) => v.type_properties(), + CanonicalType::DynSimOnlyValueType(v) => v.type_properties(), } } pub fn is_passive(self) -> bool { @@ -109,6 +137,12 @@ impl CanonicalType { pub fn bit_width(self) -> usize { self.type_properties().bit_width } + pub fn sim_only_values_len(self) -> usize { + self.type_properties().sim_only_values_len + } + pub fn size(self) -> OpaqueSimValueSize { + self.type_properties().size() + } pub fn can_connect(self, rhs: Self) -> bool { match self { CanonicalType::UInt(lhs) => { @@ -177,6 +211,12 @@ impl CanonicalType { }; lhs.can_connect(rhs) } + CanonicalType::DynSimOnlyValueType(lhs) => { + let CanonicalType::DynSimOnlyValueType(rhs) = rhs else { + return false; + }; + lhs.can_connect(rhs) + } } } pub(crate) fn as_serde_unexpected_str(self) -> &'static str { @@ -287,6 +327,7 @@ impl_base_type!(SyncReset); impl_base_type!(Reset); impl_base_type!(Clock); impl_base_type!(PhantomConst); +impl_base_type!(DynSimOnlyValueType); impl_base_type_serde!(Bool, "a Bool"); impl_base_type_serde!(Enum, "an Enum"); @@ -348,9 +389,17 @@ pub trait Type: fn canonical(&self) -> CanonicalType; fn from_canonical(canonical_type: CanonicalType) -> Self; fn source_location() -> SourceLocation; - fn sim_value_from_bits(&self, bits: &BitSlice) -> Self::SimValue; - fn sim_value_clone_from_bits(&self, value: &mut Self::SimValue, bits: &BitSlice); - fn sim_value_to_bits(&self, value: &Self::SimValue, bits: &mut BitSlice); + fn sim_value_from_opaque(&self, opaque: OpaqueSimValueSlice<'_>) -> Self::SimValue; + fn sim_value_clone_from_opaque( + &self, + value: &mut Self::SimValue, + opaque: OpaqueSimValueSlice<'_>, + ); + fn sim_value_to_opaque<'w>( + &self, + value: &Self::SimValue, + writer: OpaqueSimValueWriter<'w>, + ) -> OpaqueSimValueWritten<'w>; } pub trait BaseType: @@ -405,6 +454,7 @@ impl Type for CanonicalType { CanonicalType::Reset(v) => v.mask_type().canonical(), CanonicalType::Clock(v) => v.mask_type().canonical(), CanonicalType::PhantomConst(v) => v.mask_type().canonical(), + CanonicalType::DynSimOnlyValueType(v) => v.mask_type().canonical(), } } fn canonical(&self) -> CanonicalType { @@ -416,28 +466,288 @@ impl Type for CanonicalType { fn source_location() -> SourceLocation { SourceLocation::builtin() } - fn sim_value_from_bits(&self, bits: &BitSlice) -> Self::SimValue { - assert_eq!(bits.len(), self.bit_width()); - OpaqueSimValue::from_bitslice(bits) + fn sim_value_from_opaque(&self, opaque: OpaqueSimValueSlice<'_>) -> Self::SimValue { + assert_eq!(self.type_properties().size(), opaque.size()); + opaque.to_owned() } - fn sim_value_clone_from_bits(&self, value: &mut Self::SimValue, bits: &BitSlice) { - assert_eq!(bits.len(), self.bit_width()); - assert_eq!(value.bit_width(), self.bit_width()); - value.bits_mut().bits_mut().copy_from_bitslice(bits); + fn sim_value_clone_from_opaque( + &self, + value: &mut Self::SimValue, + opaque: OpaqueSimValueSlice<'_>, + ) { + assert_eq!(self.type_properties().size(), opaque.size()); + assert_eq!(value.size(), opaque.size()); + value.clone_from_slice(opaque); } - fn sim_value_to_bits(&self, value: &Self::SimValue, bits: &mut BitSlice) { - assert_eq!(bits.len(), self.bit_width()); - assert_eq!(value.bit_width(), self.bit_width()); - bits.copy_from_bitslice(value.bits().bits()); + fn sim_value_to_opaque<'w>( + &self, + value: &Self::SimValue, + writer: OpaqueSimValueWriter<'w>, + ) -> OpaqueSimValueWritten<'w> { + assert_eq!(self.type_properties().size(), writer.size()); + assert_eq!(value.size(), writer.size()); + writer.fill_cloned_from_slice(value.as_slice()) + } +} + +#[derive(Clone, PartialEq, Eq, Hash, Debug, Serialize, Deserialize, Default)] +#[non_exhaustive] +pub struct OpaqueSimValueSizeRange { + pub bit_width: Range, + pub sim_only_values_len: Range, +} + +impl OpaqueSimValueSizeRange { + pub fn start(&self) -> OpaqueSimValueSize { + OpaqueSimValueSize { + bit_width: self.bit_width.start, + sim_only_values_len: self.sim_only_values_len.start, + } + } + pub fn end(&self) -> OpaqueSimValueSize { + OpaqueSimValueSize { + bit_width: self.bit_width.end, + sim_only_values_len: self.sim_only_values_len.end, + } + } + pub fn is_empty(&self) -> bool { + let Self { + bit_width, + sim_only_values_len, + } = self; + bit_width.is_empty() && sim_only_values_len.is_empty() + } +} + +impl From> for OpaqueSimValueSizeRange { + fn from(value: Range) -> Self { + Self { + bit_width: value.start.bit_width..value.end.bit_width, + sim_only_values_len: value.start.sim_only_values_len..value.end.sim_only_values_len, + } + } +} + +impl From for Range { + fn from(value: OpaqueSimValueSizeRange) -> Self { + value.start()..value.end() + } +} + +pub trait OpaqueSimValueSizeRangeBounds { + fn start_bound(&self) -> Bound; + fn end_bound(&self) -> Bound; +} + +impl OpaqueSimValueSizeRangeBounds for OpaqueSimValueSizeRange { + fn start_bound(&self) -> Bound { + Bound::Included(self.start()) + } + + fn end_bound(&self) -> Bound { + Bound::Excluded(self.end()) + } +} + +impl> OpaqueSimValueSizeRangeBounds for T { + fn start_bound(&self) -> Bound { + std::ops::RangeBounds::start_bound(self).cloned() + } + fn end_bound(&self) -> Bound { + std::ops::RangeBounds::end_bound(self).cloned() + } +} + +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, Serialize, Deserialize, Default)] +#[non_exhaustive] +pub struct OpaqueSimValueSize { + pub bit_width: usize, + pub sim_only_values_len: usize, +} + +impl OpaqueSimValueSize { + pub const fn from_bit_width(bit_width: usize) -> Self { + Self::from_bit_width_and_sim_only_values_len(bit_width, 0) + } + pub const fn from_bit_width_and_sim_only_values_len( + bit_width: usize, + sim_only_values_len: usize, + ) -> Self { + Self { + bit_width, + sim_only_values_len, + } + } + pub const fn empty() -> Self { + Self { + bit_width: 0, + sim_only_values_len: 0, + } + } + pub const fn is_empty(self) -> bool { + let Self { + bit_width, + sim_only_values_len, + } = self; + bit_width == 0 && sim_only_values_len == 0 + } + pub const fn checked_mul(self, factor: usize) -> Option { + let Some(bit_width) = self.bit_width.checked_mul(factor) else { + return None; + }; + let Some(sim_only_values_len) = self.sim_only_values_len.checked_mul(factor) else { + return None; + }; + Some(Self { + bit_width, + sim_only_values_len, + }) + } + pub const fn checked_add(self, rhs: Self) -> Option { + let Some(bit_width) = self.bit_width.checked_add(rhs.bit_width) else { + return None; + }; + let Some(sim_only_values_len) = self + .sim_only_values_len + .checked_add(rhs.sim_only_values_len) + else { + return None; + }; + Some(Self { + bit_width, + sim_only_values_len, + }) + } + pub const fn checked_sub(self, rhs: Self) -> Option { + let Some(bit_width) = self.bit_width.checked_sub(rhs.bit_width) else { + return None; + }; + let Some(sim_only_values_len) = self + .sim_only_values_len + .checked_sub(rhs.sim_only_values_len) + else { + return None; + }; + Some(Self { + bit_width, + sim_only_values_len, + }) + } + pub fn try_slice_range( + self, + range: R, + ) -> Option { + let start = range.start_bound(); + let end = range.end_bound(); + let bit_width = try_slice_range( + (start.map(|v| v.bit_width), end.map(|v| v.bit_width)), + self.bit_width, + )?; + let sim_only_values_len = try_slice_range( + ( + start.map(|v| v.sim_only_values_len), + end.map(|v| v.sim_only_values_len), + ), + self.sim_only_values_len, + )?; + Some(OpaqueSimValueSizeRange { + bit_width, + sim_only_values_len, + }) + } + pub fn slice_range( + self, + range: R, + ) -> OpaqueSimValueSizeRange { + self.try_slice_range(range).expect("range out of bounds") + } +} + +impl Mul for OpaqueSimValueSize { + type Output = OpaqueSimValueSize; + + fn mul(self, rhs: usize) -> Self::Output { + self.checked_mul(rhs).expect("multiplication overflowed") + } +} + +impl Mul for usize { + type Output = OpaqueSimValueSize; + + fn mul(self, rhs: OpaqueSimValueSize) -> Self::Output { + rhs.checked_mul(self).expect("multiplication overflowed") + } +} + +impl Add for OpaqueSimValueSize { + type Output = OpaqueSimValueSize; + + fn add(self, rhs: OpaqueSimValueSize) -> Self::Output { + rhs.checked_add(self).expect("addition overflowed") + } +} + +impl Sub for OpaqueSimValueSize { + type Output = OpaqueSimValueSize; + + fn sub(self, rhs: OpaqueSimValueSize) -> Self::Output { + rhs.checked_sub(self).expect("subtraction underflowed") + } +} + +impl MulAssign for OpaqueSimValueSize { + fn mul_assign(&mut self, rhs: usize) { + *self = *self * rhs; + } +} + +impl AddAssign for OpaqueSimValueSize { + fn add_assign(&mut self, rhs: OpaqueSimValueSize) { + *self = *self + rhs; + } +} + +impl SubAssign for OpaqueSimValueSize { + fn sub_assign(&mut self, rhs: OpaqueSimValueSize) { + *self = *self - rhs; + } +} + +impl Sum for OpaqueSimValueSize { + fn sum>(iter: I) -> Self { + iter.fold(OpaqueSimValueSize::empty(), Add::add) } } #[derive(Clone, PartialEq, Eq, Hash, Debug, Serialize, Deserialize)] pub struct OpaqueSimValue { bits: UIntValue, + #[serde(skip_serializing_if = "Vec::is_empty", default)] + sim_only_values: Vec, } impl OpaqueSimValue { + pub fn empty() -> Self { + Self { + bits: UIntValue::new(Default::default()), + sim_only_values: Vec::new(), + } + } + pub fn with_capacity(capacity: OpaqueSimValueSize) -> Self { + Self { + bits: UIntValue::new(Arc::new(BitVec::with_capacity(capacity.bit_width))), + sim_only_values: Vec::with_capacity(capacity.sim_only_values_len), + } + } + pub fn size(&self) -> OpaqueSimValueSize { + OpaqueSimValueSize { + bit_width: self.bits.width(), + sim_only_values_len: self.sim_only_values.len(), + } + } + pub fn is_empty(&self) -> bool { + self.size().is_empty() + } pub fn bit_width(&self) -> usize { self.bits.width() } @@ -451,11 +761,109 @@ impl OpaqueSimValue { self.bits } pub fn from_bits(bits: UIntValue) -> Self { - Self { bits } + Self { + bits, + sim_only_values: Vec::new(), + } } pub fn from_bitslice(v: &BitSlice) -> Self { + Self::from_bitslice_and_sim_only_values(v, Vec::new()) + } + pub fn from_bitslice_and_sim_only_values( + bits: &BitSlice, + sim_only_values: Vec, + ) -> Self { Self { - bits: UIntValue::new(Arc::new(v.to_bitvec())), + bits: UIntValue::new(Arc::new(bits.to_bitvec())), + sim_only_values, + } + } + pub fn from_bits_and_sim_only_values( + bits: UIntValue, + sim_only_values: Vec, + ) -> Self { + Self { + bits, + sim_only_values, + } + } + pub fn into_parts(self) -> (UIntValue, Vec) { + let Self { + bits, + sim_only_values, + } = self; + (bits, sim_only_values) + } + pub fn parts_mut(&mut self) -> (&mut UIntValue, &mut Vec) { + let Self { + bits, + sim_only_values, + } = self; + (bits, sim_only_values) + } + pub fn sim_only_values(&self) -> &[DynSimOnlyValue] { + &self.sim_only_values + } + pub fn sim_only_values_mut(&mut self) -> &mut Vec { + &mut self.sim_only_values + } + pub fn as_slice(&self) -> OpaqueSimValueSlice<'_> { + OpaqueSimValueSlice { + bits: self.bits.bits(), + sim_only_values: &self.sim_only_values, + } + } + pub fn slice(&self, range: R) -> OpaqueSimValueSlice<'_> { + self.as_slice().slice(range) + } + pub fn rewrite_with(&mut self, target_size: OpaqueSimValueSize, f: F) + where + F: for<'b> FnOnce(OpaqueSimValueWriter<'b>) -> OpaqueSimValueWritten<'b>, // 'b is used as a brand + { + OpaqueSimValueWriter::rewrite_with(target_size, self, f); + } + pub fn clone_from_slice(&mut self, slice: OpaqueSimValueSlice<'_>) { + let OpaqueSimValueSlice { + bits, + sim_only_values, + } = slice; + self.bits.bits_mut().copy_from_bitslice(bits); + self.sim_only_values.clone_from_slice(sim_only_values); + } + pub fn extend_from_slice(&mut self, slice: OpaqueSimValueSlice<'_>) { + let OpaqueSimValueSlice { + bits, + sim_only_values, + } = slice; + self.bits.bitvec_mut().extend_from_bitslice(bits); + self.sim_only_values.extend_from_slice(sim_only_values); + } +} + +impl<'a> Extend> for OpaqueSimValue { + fn extend>>(&mut self, iter: T) { + let Self { + bits, + sim_only_values, + } = self; + let bits = bits.bitvec_mut(); + for slice in iter { + bits.extend_from_bitslice(slice.bits); + sim_only_values.extend_from_slice(slice.sim_only_values); + } + } +} + +impl Extend for OpaqueSimValue { + fn extend>(&mut self, iter: T) { + let Self { + bits, + sim_only_values, + } = self; + let bits = bits.bitvec_mut(); + for value in iter { + bits.extend_from_bitslice(value.bits().bits()); + sim_only_values.extend_from_slice(value.sim_only_values()); } } } @@ -469,6 +877,207 @@ impl> ToSimValueWithType for OpaqueSimValu } } +#[derive(Copy, Clone, Debug)] +pub struct OpaqueSimValueSlice<'a> { + bits: &'a BitSlice, + sim_only_values: &'a [DynSimOnlyValue], +} + +impl<'a> Default for OpaqueSimValueSlice<'a> { + fn default() -> Self { + Self::empty() + } +} + +impl<'a> OpaqueSimValueSlice<'a> { + pub fn from_parts(bits: &'a BitSlice, sim_only_values: &'a [DynSimOnlyValue]) -> Self { + Self { + bits, + sim_only_values, + } + } + pub fn from_bitslice(bits: &'a BitSlice) -> Self { + Self::from_parts(bits, &[]) + } + pub fn empty() -> Self { + Self { + bits: BitSlice::empty(), + sim_only_values: &[], + } + } + pub fn size(self) -> OpaqueSimValueSize { + OpaqueSimValueSize { + bit_width: self.bit_width(), + sim_only_values_len: self.sim_only_values_len(), + } + } + pub fn is_empty(self) -> bool { + self.size().is_empty() + } + pub fn bit_width(self) -> usize { + self.bits.len() + } + pub fn bits(self) -> &'a BitSlice { + self.bits + } + pub fn sim_only_values(self) -> &'a [DynSimOnlyValue] { + self.sim_only_values + } + pub fn sim_only_values_len(self) -> usize { + self.sim_only_values.len() + } + pub fn to_owned(self) -> OpaqueSimValue { + OpaqueSimValue::from_bitslice_and_sim_only_values(self.bits, self.sim_only_values.to_vec()) + } + pub fn slice(self, range: R) -> OpaqueSimValueSlice<'a> { + let start = range.start_bound(); + let end = range.end_bound(); + let bits_range = slice_range( + (start.map(|v| v.bit_width), end.map(|v| v.bit_width)), + self.bit_width(), + ); + let sim_only_values_range = slice_range( + (start.map(|v| v.bit_width), end.map(|v| v.bit_width)), + self.sim_only_values_len(), + ); + Self { + bits: &self.bits[bits_range], + sim_only_values: &self.sim_only_values[sim_only_values_range], + } + } + pub fn split_at(self, index: OpaqueSimValueSize) -> (Self, Self) { + let bits = self.bits.split_at(index.bit_width); + let sim_only_values = self.sim_only_values.split_at(index.sim_only_values_len); + ( + Self { + bits: bits.0, + sim_only_values: sim_only_values.0, + }, + Self { + bits: bits.1, + sim_only_values: sim_only_values.1, + }, + ) + } +} + +#[derive(Debug)] +pub struct OpaqueSimValueWriter<'a> { + bits: &'a mut BitSlice, + sim_only_values: &'a mut Vec, + sim_only_values_range: std::ops::Range, +} + +#[derive(Debug)] +pub struct OpaqueSimValueWritten<'a> { + _phantom: PhantomData<&'a ()>, +} + +impl<'a> OpaqueSimValueWriter<'a> { + pub fn rewrite_with(target_size: OpaqueSimValueSize, value: &mut OpaqueSimValue, f: F) + where + F: for<'b> FnOnce(OpaqueSimValueWriter<'b>) -> OpaqueSimValueWritten<'b>, // 'b is used as a brand + { + let OpaqueSimValueWritten { + _phantom: PhantomData, + } = f(OpaqueSimValueWriter::rewrite_helper(target_size, value)); + } + pub(crate) fn rewrite_helper( + target_size: OpaqueSimValueSize, + value: &'a mut OpaqueSimValue, + ) -> Self { + let (bits, sim_only_values) = value.parts_mut(); + let OpaqueSimValueSize { + bit_width, + sim_only_values_len, + } = target_size; + let bits = bits.bitvec_mut(); + bits.resize(bit_width, false); + sim_only_values.truncate(sim_only_values_len); + sim_only_values.reserve_exact(sim_only_values_len - sim_only_values.len()); + Self { + bits, + sim_only_values, + sim_only_values_range: 0..sim_only_values_len, + } + } + pub fn size(&self) -> OpaqueSimValueSize { + OpaqueSimValueSize { + bit_width: self.bit_width(), + sim_only_values_len: self.sim_only_values_len(), + } + } + pub fn bit_width(&self) -> usize { + self.bits.len() + } + pub fn sim_only_values_len(&self) -> usize { + self.sim_only_values_range.len() + } + pub fn is_empty(&self) -> bool { + self.size().is_empty() + } + pub fn fill_cloned_from_slice( + self, + slice: OpaqueSimValueSlice<'_>, + ) -> OpaqueSimValueWritten<'a> { + assert_eq!(self.size(), slice.size()); + let Self { + bits, + sim_only_values, + sim_only_values_range, + } = self; + bits.copy_from_bitslice(slice.bits); + let (clone_from_src, clone_src) = slice.sim_only_values.split_at( + (sim_only_values.len() - sim_only_values_range.start).min(slice.sim_only_values.len()), + ); + sim_only_values[sim_only_values_range][..clone_from_src.len()] + .clone_from_slice(clone_from_src); + sim_only_values.extend_from_slice(clone_src); + OpaqueSimValueWritten { + _phantom: PhantomData, + } + } + pub fn fill_with_zeros(self) -> OpaqueSimValueWritten<'a> { + assert_eq!( + self.size(), + OpaqueSimValueSize::from_bit_width(self.bit_width()), + "can't fill things other than bits with zeros", + ); + let Self { + bits, + sim_only_values, + sim_only_values_range, + } = self; + bits.fill(false); + assert_eq!(sim_only_values.len(), sim_only_values_range.end); + OpaqueSimValueWritten { + _phantom: PhantomData, + } + } + pub fn fill_prefix_with(&mut self, prefix_size: OpaqueSimValueSize, f: F) + where + F: for<'b> FnOnce(OpaqueSimValueWriter<'b>) -> OpaqueSimValueWritten<'b>, // 'b is used as a brand + { + let OpaqueSimValueSize { + bit_width, + sim_only_values_len, + } = prefix_size; + assert!(bit_width <= self.bit_width()); + assert!(sim_only_values_len <= self.sim_only_values_len()); + let next_start = self.sim_only_values_range.start + sim_only_values_len; + let OpaqueSimValueWritten { + _phantom: PhantomData, + } = f(OpaqueSimValueWriter { + bits: &mut self.bits[..bit_width], + sim_only_values: self.sim_only_values, + sim_only_values_range: self.sim_only_values_range.start..next_start, + }); + assert!(self.sim_only_values.len() >= next_start); + self.bits = &mut mem::take(&mut self.bits)[bit_width..]; + self.sim_only_values_range.start = next_start; + } +} + pub trait StaticType: Type + Default { const TYPE: Self; const MASK_TYPE: Self::MaskType; diff --git a/crates/fayalite/src/ty/serde_impls.rs b/crates/fayalite/src/ty/serde_impls.rs index 2ea4362..1af2287 100644 --- a/crates/fayalite/src/ty/serde_impls.rs +++ b/crates/fayalite/src/ty/serde_impls.rs @@ -11,6 +11,7 @@ use crate::{ phantom_const::{PhantomConstCanonicalValue, PhantomConstValue}, prelude::PhantomConst, reset::{AsyncReset, Reset, SyncReset}, + sim::value::DynSimOnlyValueType, ty::{BaseType, CanonicalType}, }; use serde::{Deserialize, Deserializer, Serialize, Serializer}; @@ -63,6 +64,7 @@ pub(crate) enum SerdeCanonicalType< Reset, Clock, PhantomConst(ThePhantomConst), + DynSimOnlyValueType(DynSimOnlyValueType), } impl SerdeCanonicalType { @@ -79,6 +81,7 @@ impl SerdeCanonicalType "a Reset", Self::Clock => "a Clock", Self::PhantomConst(_) => "a PhantomConst", + Self::DynSimOnlyValueType(_) => "a SimOnlyValue", } } } @@ -105,6 +108,7 @@ impl From for SerdeCanonicalType { CanonicalType::Reset(Reset {}) => Self::Reset, CanonicalType::Clock(Clock {}) => Self::Clock, CanonicalType::PhantomConst(ty) => Self::PhantomConst(SerdePhantomConst(ty.get())), + CanonicalType::DynSimOnlyValueType(ty) => Self::DynSimOnlyValueType(ty), } } } @@ -125,6 +129,7 @@ impl From for CanonicalType { SerdeCanonicalType::PhantomConst(value) => { Self::PhantomConst(PhantomConst::new(value.0)) } + SerdeCanonicalType::DynSimOnlyValueType(value) => Self::DynSimOnlyValueType(value), } } } diff --git a/crates/fayalite/src/util.rs b/crates/fayalite/src/util.rs index 4670a1f..f3d2c7c 100644 --- a/crates/fayalite/src/util.rs +++ b/crates/fayalite/src/util.rs @@ -36,7 +36,7 @@ pub use scoped_ref::ScopedRef; #[doc(inline)] pub use misc::{ BitSliceWriteWithBase, DebugAsDisplay, DebugAsRawString, MakeMutSlice, RcWriter, interned_bit, - iter_eq_by, + iter_eq_by, slice_range, try_slice_range, }; pub mod job_server; diff --git a/crates/fayalite/src/util/misc.rs b/crates/fayalite/src/util/misc.rs index 99b7343..d70605b 100644 --- a/crates/fayalite/src/util/misc.rs +++ b/crates/fayalite/src/util/misc.rs @@ -5,6 +5,7 @@ use bitvec::{bits, order::Lsb0, slice::BitSlice, view::BitView}; use std::{ cell::Cell, fmt::{self, Debug, Write}, + ops::{Bound, Range, RangeBounds}, rc::Rc, sync::{Arc, OnceLock}, }; @@ -209,3 +210,21 @@ impl std::io::Write for RcWriter { Ok(()) } } + +pub fn try_slice_range>(range: R, size: usize) -> Option> { + let start = match range.start_bound() { + Bound::Included(start) => *start, + Bound::Excluded(start) => start.checked_add(1)?, + Bound::Unbounded => 0, + }; + let end = match range.end_bound() { + Bound::Included(end) => end.checked_add(1)?, + Bound::Excluded(end) => *end, + Bound::Unbounded => size, + }; + (start <= end && end <= size).then_some(start..end) +} + +pub fn slice_range>(range: R, size: usize) -> Range { + try_slice_range(range, size).expect("range out of bounds") +}