diff --git a/crates/cpu/src/main_memory_and_io.rs b/crates/cpu/src/main_memory_and_io.rs index af35662..8e236ae 100644 --- a/crates/cpu/src/main_memory_and_io.rs +++ b/crates/cpu/src/main_memory_and_io.rs @@ -2,8 +2,18 @@ // See Notices.txt for copyright information use crate::{config::CpuConfig, next_pc::FETCH_BLOCK_ID_WIDTH, util::array_vec::ArrayVec}; -use fayalite::{prelude::*, util::ready_valid::ReadyValid}; -use std::num::{NonZeroU64, NonZeroUsize, Wrapping}; +use fayalite::{ + bundle::BundleType, + expr::Valueless, + int::{UIntInRangeInclusiveType, UIntInRangeType}, + intern::{Intern, Interned}, + prelude::*, + util::ready_valid::{ReadyValid, queue}, +}; +use std::{ + fmt, + num::{NonZeroU64, NonZeroUsize, Wrapping}, +}; pub mod simple_uart; @@ -187,3 +197,418 @@ pub struct MemoryInterface> { pub next_op_ids: HdlOption, MemoryInterfaceQueueCapacity>>, pub config: C, } + +pub const fn memory_interface_always_error_config( + base_config: MemoryInterfaceConfig, +) -> MemoryInterfaceConfig { + let MemoryInterfaceConfig { + log2_bus_width_in_bytes, + queue_capacity: _, + op_id_width, + address_range: _, + } = base_config; + MemoryInterfaceConfig { + log2_bus_width_in_bytes, + queue_capacity: const { NonZeroUsize::new(1).unwrap() }, + op_id_width, + address_range: AddressRange::Full, + } +} + +#[hdl_module] +pub fn memory_interface_always_error(config: PhantomConst) { + assert_eq!( + *config.get(), + memory_interface_always_error_config(*config.get()), + ); + #[hdl] + let input_interface: MemoryInterface> = + m.input(MemoryInterface[config]); + + connect( + input_interface.next_op_ids, + input_interface.ty().next_op_ids.HdlNone(), + ); + connect(input_interface.start.ready, input_interface.finish.ready); + connect( + input_interface.finish.data, + input_interface.ty().finish.data.HdlNone(), + ); + #[hdl] + if let HdlSome(_) = input_interface.start.data { + connect( + input_interface.finish.data, + HdlSome( + #[hdl] + MemoryOperationFinish::<_> { + kind: MemoryOperationFinishKind.Error(MemoryOperationErrorKind.Generic()), + read_data: repeat(0u8, MemoryInterfaceBusWidthInBytes[config]), + config, + }, + ), + ); + } +} + +#[derive(Copy, Clone, PartialEq, Eq, Hash)] +pub struct MemoryInterfacesBundleFieldPath(pub Interned<[Interned]>); + +impl MemoryInterfacesBundleFieldPath { + pub fn from_slice(path: &[Interned]) -> Self { + Self(path.intern()) + } +} + +impl fmt::Display for MemoryInterfacesBundleFieldPath { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if self.0.is_empty() { + return f.write_str(""); + } + for (i, name) in self.0.iter().enumerate() { + if i != 0 { + f.write_str(".")?; + } + if name.is_empty() || name.contains(|ch: char| !ch.is_ascii_alphanumeric() && ch != '_') + { + write!(f, "{name:?}")?; + } else { + f.write_str(name)?; + } + } + Ok(()) + } +} + +impl fmt::Debug for MemoryInterfacesBundleFieldPath { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(self, f) + } +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] +pub struct MemoryInterfacesBundleField< + T: ValueType>>, +> { + pub path: MemoryInterfacesBundleFieldPath, + pub value: T, +} + +#[derive(Clone, Debug)] +pub struct MemoryInterfacesBundleProperties< + T: ValueType>>, +> { + first_full_interface: Option, + fields: Vec>, +} + +impl>>> + MemoryInterfacesBundleProperties +{ + pub fn first_full_interface(&self) -> Option { + self.first_full_interface + } + pub fn fields(&self) -> &[MemoryInterfacesBundleField] { + &self.fields + } + pub fn into_fields(self) -> Vec> { + self.fields + } + #[track_caller] + pub fn from_fields(fields: Vec>) -> Self { + let mut first_full_interface = None; + for (index, field) in fields.iter().enumerate() { + let ty = field.value.ty(); + assert_eq!( + ty, MemoryInterface[ty.config], + "inconsistent field type: {}", + field.path, + ); + if let None = first_full_interface + && let AddressRange::Full = ty.config.get().address_range + { + first_full_interface = Some(index); + } + } + Self { + first_full_interface, + fields, + } + } + #[track_caller] + fn get_fields< + B: ValueType, + C: ValueType, + >( + fields: &mut Vec>, + path_prefix: &mut Vec>, + bundle: B, + get_field: impl Copy + Fn(&B, Interned) -> C, + interface_from_bundle: impl Copy + Fn(B) -> T, + bundle_from_canonical: impl Copy + Fn(C) -> B, + ) { + let bundle_fields = bundle.ty().fields(); + if bundle_fields.iter().any(|f| f.flipped) + && bundle_fields.iter().any(|f| *f.name == *"config") + { + let value = interface_from_bundle(bundle); + fields.push(MemoryInterfacesBundleField { + path: MemoryInterfacesBundleFieldPath::from_slice(path_prefix), + value, + }); + } else { + for f in &bundle_fields { + assert!( + !f.flipped, + "field must not have #[hdl(flip)]: {}", + MemoryInterfacesBundleFieldPath::from_slice(path_prefix), + ); + let field = get_field(&bundle, f.name); + match field.ty() { + CanonicalType::Bundle(_) => { + path_prefix.push(f.name); + Self::get_fields( + fields, + path_prefix, + bundle_from_canonical(field), + get_field, + interface_from_bundle, + bundle_from_canonical, + ); + path_prefix.pop(); + } + CanonicalType::PhantomConst(_) => continue, + _ => panic!( + "field type must be either a MemoryInterfacesBundle or a PhantomConst: {}", + MemoryInterfacesBundleFieldPath::from_slice(path_prefix), + ), + } + } + } + } +} + +/// `Self` is a bundle where either: +/// * `Self` is a [`MemoryInterface>`] +/// * each field is a [`MemoryInterfacesBundle`] or a [`PhantomConst`] and none of the fields have `#[hdl(flip)]` +pub trait MemoryInterfacesBundle: BundleType { + #[track_caller] + fn properties_valueless( + self, + mut path_prefix: Vec>, + ) -> MemoryInterfacesBundleProperties< + Valueless>>, + > { + let mut fields = Vec::new(); + MemoryInterfacesBundleProperties::get_fields( + &mut fields, + &mut path_prefix, + Valueless::new(Bundle::new(self.fields())), + |&bundle, name| { + Valueless::new( + bundle + .ty() + .field_by_name(name) + .expect("field is known to exist") + .ty, + ) + }, + |bundle| Valueless::new(MemoryInterface::from_canonical(bundle.ty().canonical())), + |canonical| Valueless::new(Bundle::from_canonical(canonical.ty())), + ); + MemoryInterfacesBundleProperties::from_fields(fields) + } + #[track_caller] + fn properties_expr( + this: impl ToExpr, + mut path_prefix: Vec>, + ) -> MemoryInterfacesBundleProperties>>> + { + let mut fields = Vec::new(); + MemoryInterfacesBundleProperties::get_fields( + &mut fields, + &mut path_prefix, + Expr::as_bundle(this.to_expr()), + |&bundle, name| Expr::field(bundle, &name), + Expr::from_bundle, + Expr::from_canonical, + ); + MemoryInterfacesBundleProperties::from_fields(fields) + } +} + +#[hdl_module] +pub fn memory_interface_adaptor( + input_interface_config: PhantomConst, + output_interfaces_ty: OutputInterfaces, +) { + #[hdl] + let cd: ClockDomain = m.input(); + #[hdl] + let input_interface: MemoryInterface> = + m.input(MemoryInterface[input_interface_config]); + #[hdl] + let output_interfaces: OutputInterfaces = m.output(output_interfaces_ty); + let mut output_interfaces = MemoryInterfacesBundle::properties_expr( + output_interfaces, + vec!["output_interfaces".intern()], + ); + if let Some(index) = output_interfaces.first_full_interface() { + assert!( + index == output_interfaces.fields().len() - 1, + "all fields after {path} will never be used since {path} comes before and handles all addresses", + path = output_interfaces.fields[index].path, + ); + } else { + #[hdl] + let always_error = instance(memory_interface_always_error(PhantomConst::new_sized( + memory_interface_always_error_config(*input_interface_config.get()), + ))); + let mut fields = output_interfaces.into_fields(); + fields.push(MemoryInterfacesBundleField { + path: MemoryInterfacesBundleFieldPath::from_slice(&[ + "always_error".intern(), + "input_interface".intern(), + ]), + value: always_error.input_interface, + }); + output_interfaces = MemoryInterfacesBundleProperties::from_fields(fields); + } + + fn visit_selected_output_interface( + output_interfaces: &MemoryInterfacesBundleProperties< + Expr>>, + >, + selected_output_interface: impl ToExpr, DynSize>>, + mut visit_selected: impl FnMut( + usize, + MemoryInterfacesBundleField>>>, + ), + ) { + let selected_output_interface = selected_output_interface.to_expr(); + let mut else_scope = None; + for (i, &interface) in output_interfaces.fields().iter().enumerate() { + if i == output_interfaces.fields().len() - 1 { + // just else, no else if + visit_selected(i, interface); + } else { + let if_scope = fayalite::module::if_(selected_output_interface.cmp_eq(i)); + visit_selected(i, interface); + else_scope = Some(if_scope.else_()); + } + } + drop(else_scope); + } + + #[hdl(no_static)] + struct Op, OutputInterfaceCount: Size> { + start: MemoryOperationStart, + output_interface_index: UIntInRangeType, OutputInterfaceCount>, + started_bytes: UIntInRangeInclusiveType, MemoryInterfaceBusWidthInBytes>, + finished_bytes: UIntInRangeInclusiveType, MemoryInterfaceBusWidthInBytes>, + } + + let op_ty = Op[input_interface_config][output_interfaces.fields().len()]; + let option_op_ty = HdlOption[op_ty]; + + #[hdl] + let not_started_op_queue = instance(queue( + op_ty, + const { NonZeroUsize::new(2).unwrap() }, + false, + true, + )); + + connect(not_started_op_queue.cd, cd); + connect( + not_started_op_queue.inp.data, + HdlOption::map(input_interface.start.data, |start| { + #[hdl] + let output_interface_index = wire(op_ty.output_interface_index); + connect( + output_interface_index, + (output_interfaces.fields().len() - 1).cast_to(output_interface_index.ty()), + ); + #[hdl] + Op::<_, _> { + start, + output_interface_index, + started_bytes: 0u8.cast_to(op_ty.started_bytes), + finished_bytes: 0u8.cast_to(op_ty.finished_bytes), + } + }), + ); + connect(input_interface.start.ready, not_started_op_queue.inp.ready); + + #[hdl] + let starting_op_reg = reg_builder().clock_domain(cd).reset(option_op_ty.HdlNone()); + + #[hdl] + let starting_op_in = wire(option_op_ty); + + #[hdl] + if let HdlSome(_) = starting_op_reg { + connect(starting_op_in, starting_op_reg); + connect(not_started_op_queue.out.ready, false); + } else { + connect(starting_op_in, not_started_op_queue.out.data); + connect(not_started_op_queue.out.ready, true); + } + + for output_interface in output_interfaces.fields() { + connect( + output_interface.value.start.data, + output_interface.value.ty().start.data.HdlNone(), + ); + } + + #[hdl] + if let HdlSome(starting_op_in) = starting_op_in { + #[hdl] + let Op::<_, _> { + start, + output_interface_index, + started_bytes, + finished_bytes, + } = starting_op_in; + #[hdl] + let MemoryOperationStart::<_> { + kind, + addr, + write_data, + rw_mask, + op_id, + config, + } = start; + visit_selected_output_interface( + &output_interfaces, + output_interface_index, + |_, output_interface| { + connect( + output_interface.value.start.data, + #[hdl] + MemoryOperationStart::<_> { kind }, + ); + #[hdl] + if output_interface.value.start { + todo(); + } + todo!(); + }, + ); + } else { + connect(starting_op_reg, option_op_ty.HdlNone()); + } + + #[hdl] + let started_op_queue = instance(queue( + op_ty, + input_interface_config.get().queue_capacity, + false, + false, + )); + + #[hdl] + let finishing_op_reg = reg_builder().clock_domain(cd).reset(option_op_ty.HdlNone()); + + todo!() +}