diff --git a/crates/cpu/src/main_memory_and_io.rs b/crates/cpu/src/main_memory_and_io.rs index af35662..61857a5 100644 --- a/crates/cpu/src/main_memory_and_io.rs +++ b/crates/cpu/src/main_memory_and_io.rs @@ -2,8 +2,23 @@ // See Notices.txt for copyright information use crate::{config::CpuConfig, next_pc::FETCH_BLOCK_ID_WIDTH, util::array_vec::ArrayVec}; -use fayalite::{prelude::*, util::ready_valid::ReadyValid}; -use std::num::{NonZeroU64, NonZeroUsize, Wrapping}; +use fayalite::{ + bundle::BundleType, + expr::Valueless, + int::{UIntInRangeInclusiveType, UIntInRangeType}, + intern::{Intern, Interned}, + module::wire_with_loc, + prelude::*, + util::{ + prefix_sum, + ready_valid::{ReadyValid, queue}, + }, +}; +use std::{ + cmp::Ordering, + fmt, + num::{NonZeroU64, NonZeroUsize, Wrapping}, +}; pub mod simple_uart; @@ -180,6 +195,7 @@ pub struct MemoryOperationFinish> { #[hdl(no_static)] pub struct MemoryInterface> { pub start: ReadyValid>, + /// started operations must finish in the same order they started in #[hdl(flip)] pub finish: ReadyValid>, /// for debugging @@ -187,3 +203,981 @@ pub struct MemoryInterface> { pub next_op_ids: HdlOption, MemoryInterfaceQueueCapacity>>, pub config: C, } + +pub const fn memory_interface_always_error_config( + base_config: MemoryInterfaceConfig, +) -> MemoryInterfaceConfig { + let MemoryInterfaceConfig { + log2_bus_width_in_bytes, + queue_capacity: _, + op_id_width, + address_range: _, + } = base_config; + MemoryInterfaceConfig { + log2_bus_width_in_bytes, + queue_capacity: const { NonZeroUsize::new(1).unwrap() }, + op_id_width, + address_range: AddressRange::Full, + } +} + +#[hdl_module] +pub fn memory_interface_always_error(config: PhantomConst) { + assert_eq!( + *config.get(), + memory_interface_always_error_config(*config.get()), + ); + #[hdl] + let input_interface: MemoryInterface> = + m.input(MemoryInterface[config]); + + connect( + input_interface.next_op_ids, + input_interface.ty().next_op_ids.HdlNone(), + ); + connect(input_interface.start.ready, input_interface.finish.ready); + connect( + input_interface.finish.data, + input_interface.ty().finish.data.HdlNone(), + ); + #[hdl] + if let HdlSome(_) = input_interface.start.data { + connect( + input_interface.finish.data, + HdlSome( + #[hdl] + MemoryOperationFinish::<_> { + kind: MemoryOperationFinishKind.Error(MemoryOperationErrorKind.Generic()), + read_data: repeat(0u8, MemoryInterfaceBusWidthInBytes[config]), + config, + }, + ), + ); + } +} + +#[derive(Copy, Clone, PartialEq, Eq, Hash)] +pub struct MemoryInterfacesBundleFieldPath(pub Interned<[Interned]>); + +impl MemoryInterfacesBundleFieldPath { + pub fn from_slice(path: &[Interned]) -> Self { + Self(path.intern()) + } +} + +impl fmt::Display for MemoryInterfacesBundleFieldPath { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if self.0.is_empty() { + return f.write_str(""); + } + for (i, name) in self.0.iter().enumerate() { + if i != 0 { + f.write_str(".")?; + } + if name.is_empty() || name.contains(|ch: char| !ch.is_ascii_alphanumeric() && ch != '_') + { + write!(f, "{name:?}")?; + } else { + f.write_str(name)?; + } + } + Ok(()) + } +} + +impl fmt::Debug for MemoryInterfacesBundleFieldPath { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(self, f) + } +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] +pub struct MemoryInterfacesBundleField< + T: ValueType>>, +> { + pub path: MemoryInterfacesBundleFieldPath, + pub value: T, +} + +#[derive(Clone, Debug)] +pub struct MemoryInterfacesBundleProperties< + T: ValueType>>, +> { + first_full_interface: Option, + fields: Vec>, +} + +impl>>> + MemoryInterfacesBundleProperties +{ + pub fn first_full_interface(&self) -> Option { + self.first_full_interface + } + pub fn fields(&self) -> &[MemoryInterfacesBundleField] { + &self.fields + } + pub fn into_fields(self) -> Vec> { + self.fields + } + #[track_caller] + pub fn from_fields(fields: Vec>) -> Self { + let mut first_full_interface = None; + for (index, field) in fields.iter().enumerate() { + let ty = field.value.ty(); + assert_eq!( + ty, MemoryInterface[ty.config], + "inconsistent field type: {}", + field.path, + ); + if let None = first_full_interface + && let AddressRange::Full = ty.config.get().address_range + { + first_full_interface = Some(index); + } + } + Self { + first_full_interface, + fields, + } + } + #[track_caller] + fn get_fields< + B: ValueType, + C: ValueType, + >( + fields: &mut Vec>, + path_prefix: &mut Vec>, + bundle: B, + get_field: impl Copy + Fn(&B, Interned) -> C, + interface_from_bundle: impl Copy + Fn(B) -> T, + bundle_from_canonical: impl Copy + Fn(C) -> B, + ) { + let bundle_fields = bundle.ty().fields(); + if bundle_fields.iter().any(|f| f.flipped) + && bundle_fields.iter().any(|f| *f.name == *"config") + { + let value = interface_from_bundle(bundle); + fields.push(MemoryInterfacesBundleField { + path: MemoryInterfacesBundleFieldPath::from_slice(path_prefix), + value, + }); + } else { + for f in &bundle_fields { + assert!( + !f.flipped, + "field must not have #[hdl(flip)]: {}", + MemoryInterfacesBundleFieldPath::from_slice(path_prefix), + ); + let field = get_field(&bundle, f.name); + match field.ty() { + CanonicalType::Bundle(_) => { + path_prefix.push(f.name); + Self::get_fields( + fields, + path_prefix, + bundle_from_canonical(field), + get_field, + interface_from_bundle, + bundle_from_canonical, + ); + path_prefix.pop(); + } + CanonicalType::PhantomConst(_) => continue, + _ => panic!( + "field type must be either a MemoryInterfacesBundle or a PhantomConst: {}", + MemoryInterfacesBundleFieldPath::from_slice(path_prefix), + ), + } + } + } + } +} + +/// `Self` is a bundle where either: +/// * `Self` is a [`MemoryInterface>`] +/// * each field is a [`MemoryInterfacesBundle`] or a [`PhantomConst`] and none of the fields have `#[hdl(flip)]` +pub trait MemoryInterfacesBundle: BundleType { + #[track_caller] + fn properties_valueless( + self, + mut path_prefix: Vec>, + ) -> MemoryInterfacesBundleProperties< + Valueless>>, + > { + let mut fields = Vec::new(); + MemoryInterfacesBundleProperties::get_fields( + &mut fields, + &mut path_prefix, + Valueless::new(Bundle::new(self.fields())), + |&bundle, name| { + Valueless::new( + bundle + .ty() + .field_by_name(name) + .expect("field is known to exist") + .ty, + ) + }, + |bundle| Valueless::new(MemoryInterface::from_canonical(bundle.ty().canonical())), + |canonical| Valueless::new(Bundle::from_canonical(canonical.ty())), + ); + MemoryInterfacesBundleProperties::from_fields(fields) + } + #[track_caller] + fn properties_expr( + this: impl ToExpr, + mut path_prefix: Vec>, + ) -> MemoryInterfacesBundleProperties>>> + { + let mut fields = Vec::new(); + MemoryInterfacesBundleProperties::get_fields( + &mut fields, + &mut path_prefix, + Expr::as_bundle(this.to_expr()), + |&bundle, name| Expr::field(bundle, &name), + Expr::from_bundle, + Expr::from_canonical, + ); + MemoryInterfacesBundleProperties::from_fields(fields) + } +} + +pub fn memory_interface_resize_adaptor_no_split_input_config( + output_config: PhantomConst, + input_log2_bus_width_in_bytes: u8, +) -> PhantomConst { + let MemoryInterfaceConfig { + log2_bus_width_in_bytes: _, + queue_capacity, + op_id_width, + address_range, + } = *output_config.get(); + PhantomConst::new_sized(MemoryInterfaceConfig { + log2_bus_width_in_bytes: input_log2_bus_width_in_bytes, + queue_capacity, + op_id_width, + address_range, + }) +} + +#[hdl] +fn get_shrink_no_split_output_start( + input_start: Expr>>, + output_config: PhantomConst, +) -> Expr>>> { + #[hdl] + let MemoryOperationStart::<_> { + kind, + addr, + write_data, + rw_mask, + op_id, + config: _, + } = input_start; + let input_config = input_start.ty().config; + let output_bus_width_in_bytes = output_config.get().bus_width_in_bytes(); + let input_bus_width_in_bytes = input_config.get().bus_width_in_bytes(); + assert_eq!(input_bus_width_in_bytes % output_bus_width_in_bytes, 0); + + #[hdl(no_static)] + struct OutputStartData> { + addr: UInt<64>, + write_data: ArrayType, MemoryInterfaceBusWidthInBytes>, + rw_mask: ArrayType>, + config: C, + } + + #[hdl(no_static)] + struct ReduceState { + at_least_one: Bool, + more_than_one: Bool, + output_start_data: T, + } + + let output_start_data_ty = OutputStartData[output_config]; + let reduce_state_ty = ReduceState[output_start_data_ty]; + + let Some((reduce_state, _)) = prefix_sum::reduce( + rw_mask + .chunks(output_bus_width_in_bytes) + .zip(write_data.chunks(output_bus_width_in_bytes)) + .enumerate() + .map(|(index, (rw_mask_chunk, write_data_chunk))| { + let reduce_state = wire_with_loc( + &format!("reduce_state_{index}"), + SourceLocation::caller(), + reduce_state_ty, + ); + connect( + reduce_state.at_least_one, + rw_mask_chunk.cast_to_bits().any_one_bits(), + ); + connect(reduce_state.more_than_one, false); + connect( + reduce_state.output_start_data, + UInt[output_start_data_ty.canonical().bit_width()] + .zero() + .cast_bits_to(output_start_data_ty), + ); + #[hdl] + if reduce_state.at_least_one { + #[hdl] + let OutputStartData::<_> { + addr: output_addr, + write_data: output_write_data, + rw_mask: output_rw_mask, + config: _, + } = reduce_state.output_start_data; + let start_byte = index * output_bus_width_in_bytes; + let byte_range = start_byte..start_byte + output_bus_width_in_bytes; + connect_any(output_addr, addr | start_byte); + for ((i, output_write_data), output_rw_mask) in + byte_range.zip(output_write_data).zip(output_rw_mask) + { + connect(output_write_data, write_data[i]); + connect(output_rw_mask, rw_mask[i]); + } + } + (reduce_state, format!("{index}")) + }), + |(l, l_str), (r, r_str)| -> (Expr>, String) { + let out_str = l_str + "_" + &*r_str; + let reduce_state = wire_with_loc( + &format!("reduce_state_{out_str}"), + SourceLocation::caller(), + reduce_state_ty, + ); + connect( + reduce_state, + #[hdl] + ReduceState::<_> { + at_least_one: l.at_least_one | r.at_least_one, + more_than_one: l.more_than_one + | r.more_than_one + | (l.at_least_one & r.at_least_one), + output_start_data: (l.output_start_data.cast_to_bits() + | r.output_start_data.cast_to_bits()) + .cast_bits_to(output_start_data_ty), + }, + ); + (reduce_state, out_str) + }, + ) else { + unreachable!("known to be non-empty"); + }; + #[hdl] + let ReduceState::<_> { + at_least_one, + more_than_one, + output_start_data, + } = reduce_state; + + #[hdl] + let shrink_no_split_output_start = wire(HdlOption[MemoryOperationStart[output_config]]); + + #[hdl] + if more_than_one { + connect( + shrink_no_split_output_start, + shrink_no_split_output_start.ty().HdlNone(), + ); + } else { + #[hdl] + let OutputStartData::<_> { + addr: output_start_data_addr, + write_data, + rw_mask, + config: _, + } = output_start_data; + #[hdl] + let output_addr = wire(); + #[hdl] + if at_least_one { + connect(output_addr, output_start_data_addr); + } else { + connect(output_addr, addr); + } + connect( + shrink_no_split_output_start, + HdlSome( + #[hdl] + MemoryOperationStart::<_> { + kind, + addr: output_addr, + write_data, + rw_mask, + op_id, + config: output_config, + }, + ), + ); + } + + shrink_no_split_output_start +} + +#[hdl_module] +fn memory_interface_shrink_adaptor_no_split( + output_config: PhantomConst, + input_log2_bus_width_in_bytes: u8, +) { + let queue_capacity = output_config.get().queue_capacity; + let output_bus_width_in_bytes = output_config.get().bus_width_in_bytes(); + let input_config = memory_interface_resize_adaptor_no_split_input_config( + output_config, + input_log2_bus_width_in_bytes, + ); + let input_bus_width_in_bytes = input_config.get().bus_width_in_bytes(); + assert_eq!(input_bus_width_in_bytes % output_bus_width_in_bytes, 0); + + #[hdl] + let cd: ClockDomain = m.input(); + #[hdl] + let input_interface: MemoryInterface> = + m.input(MemoryInterface[input_config]); + #[hdl] + let output_interface: MemoryInterface> = + m.output(MemoryInterface[output_config]); + + #[hdl] + struct QueueEntry { + is_error: Bool, + } + + #[hdl] + let queue = instance(queue(QueueEntry, queue_capacity, false, false)); + + connect(queue.cd, cd); + + #[hdl] + if !queue.inp.ready { + connect(input_interface.start.ready, false); + connect( + output_interface.start.data, + output_interface.ty().start.data.HdlNone(), + ); + connect(queue.inp.data, queue.ty().inp.data.HdlNone()); + } else { + connect(input_interface.start.ready, output_interface.start.ready); + #[hdl] + if let HdlSome(input_start) = input_interface.start.data { + let output_start = get_shrink_no_split_output_start(input_start, output_config); + connect(output_interface.start.data, output_start); + #[hdl] + if let HdlSome(_) = output_start { + #[hdl] + if input_interface.start.ready { + connect( + queue.inp.data, + HdlSome( + #[hdl] + QueueEntry { is_error: false }, + ), + ); + } else { + connect(queue.inp.data, HdlNone()); + } + } else { + connect( + queue.inp.data, + HdlSome( + #[hdl] + QueueEntry { is_error: true }, + ), + ); + } + } else { + connect( + output_interface.start.data, + output_interface.ty().start.data.HdlNone(), + ); + connect(queue.inp.data, queue.ty().inp.data.HdlNone()); + } + } + + connect(queue.out.ready, input_interface.finish.ready); + + #[hdl] + if let HdlSome(queue_out) = queue.out.data { + #[hdl] + let QueueEntry { is_error } = queue_out; + #[hdl] + if is_error { + connect( + input_interface.finish.data, + HdlSome( + #[hdl] + MemoryOperationFinish::<_> { + kind: MemoryOperationFinishKind.Error(MemoryOperationErrorKind.Generic()), + read_data: repeat(0u8, input_bus_width_in_bytes), + config: output_config, + }, + ), + ); + connect(output_interface.finish.ready, false); + } else { + connect(output_interface.finish.ready, input_interface.finish.ready); + #[hdl] + if let HdlSome(output_finish) = output_interface.finish.data { + #[hdl] + let MemoryOperationFinish::<_> { + kind, + read_data, + config: _, + } = output_finish; + #[hdl] + let input_finish = wire(input_interface.ty().finish.data.HdlSome); + connect(input_finish.config, input_finish.ty().config); + connect(input_finish.kind, kind); + for (l, r) in input_finish + .read_data + .into_iter() + .zip(read_data.into_iter().cycle()) + { + connect(l, r); + } + connect(input_interface.finish.data, HdlSome(input_finish)); + } else { + connect( + input_interface.finish.data, + input_interface.ty().finish.data.HdlNone(), + ); + } + } + } else { + connect(output_interface.finish.ready, false); + connect( + input_interface.finish.data, + input_interface.ty().finish.data.HdlNone(), + ); + } +} + +#[hdl_module] +pub fn memory_interface_resize_adaptor_no_split( + output_config: PhantomConst, + input_log2_bus_width_in_bytes: u8, +) { + let input_config = memory_interface_resize_adaptor_no_split_input_config( + output_config, + input_log2_bus_width_in_bytes, + ); + #[hdl] + let cd: ClockDomain = m.input(); + #[hdl] + let input_interface: MemoryInterface> = + m.input(MemoryInterface[input_config]); + #[hdl] + let output_interface: MemoryInterface> = + m.output(MemoryInterface[output_config]); + match output_config + .get() + .log2_bus_width_in_bytes + .cmp(&input_log2_bus_width_in_bytes) + { + Ordering::Less => { + #[hdl] + let shrink_adaptor = instance(memory_interface_shrink_adaptor_no_split( + output_config, + input_log2_bus_width_in_bytes, + )); + connect(shrink_adaptor.cd, cd); + connect(shrink_adaptor.input_interface, input_interface); + connect(output_interface, shrink_adaptor.output_interface); + } + Ordering::Equal => { + connect(output_interface, input_interface); + return; + } + Ordering::Greater => todo!( + "connecting a input memory interface to a output memory interface with larger bus width" + ), + } +} + +fn memory_interface_resize_adaptor_input_config( + output_config: PhantomConst, + input_log2_bus_width_in_bytes: u8, + max_input_queue_capacity: Option, +) -> PhantomConst { + let output_config = *output_config.get(); + let MemoryInterfaceConfig { + log2_bus_width_in_bytes: _, + queue_capacity: output_queue_capacity, + op_id_width, + address_range, + } = output_config; + let mut input_config = MemoryInterfaceConfig { + log2_bus_width_in_bytes: input_log2_bus_width_in_bytes, + queue_capacity: output_queue_capacity, + op_id_width, + address_range, + }; + if let Some(ratio) = + NonZeroUsize::new(input_config.bus_width_in_bytes() / output_config.bus_width_in_bytes()) + { + input_config.queue_capacity = output_queue_capacity.div_ceil(ratio) + } + if let Some(max_input_queue_capacity) = max_input_queue_capacity + && max_input_queue_capacity < input_config.queue_capacity + { + input_config.queue_capacity = max_input_queue_capacity; + } + PhantomConst::new_sized(input_config) +} + +#[hdl_module] +fn memory_interface_resize_adaptor( + input_config: PhantomConst, + output_config: PhantomConst, +) { + let expected_input_config = memory_interface_resize_adaptor_input_config( + output_config, + input_config.get().log2_bus_width_in_bytes, + Some(input_config.get().queue_capacity), + ); + assert_eq!(input_config, expected_input_config); + #[hdl] + let cd: ClockDomain = m.input(); + #[hdl] + let input_interface: MemoryInterface> = + m.input(MemoryInterface[input_config]); + #[hdl] + let output_interface: MemoryInterface> = + m.output(MemoryInterface[output_config]); + + let log2_split_count = output_config + .get() + .log2_bus_width_in_bytes + .saturating_sub(input_config.get().log2_bus_width_in_bytes); + let split_count = 1usize.strict_shl(log2_split_count.into()); + let log2_split_count = usize::from(log2_split_count); + + connect( + input_interface.next_op_ids, + input_interface.ty().next_op_ids.HdlNone(), + ); + + #[hdl(no_static)] + struct Entry, SplitCount: Size> { + addr: UInt<64>, + active_split_chunks: ArrayType, + config: C, + } + + #[hdl] + let queue = instance(queue( + Entry[input_config][split_count], + input_config.get().queue_capacity, + false, + false, + )); + + connect(queue.cd, cd); + + #[hdl(no_static)] + struct IncompleteStartState, SplitCount: Size> { + start: MemoryOperationStart, + active_split_chunks: ArrayType, + split_chunks_left: ArrayType, + } + + #[hdl] + let incomplete_start_state_reg = reg_builder() + .clock_domain(cd) + .reset(HdlOption[IncompleteStartState[input_config][split_count]].HdlNone()); + + connect(input_interface.start.ready, queue.inp.ready); + connect( + output_interface.start.data, + output_interface.ty().start.data.HdlNone(), + ); + + #[hdl] + let prev_incomplete_start_state = wire(incomplete_start_state_reg.ty()); + + #[hdl] + if let HdlSome(start) = input_interface.start.data { + #[hdl] + let start_active_split_chunks = wire(Array[Bool][split_count]); + + #[hdl] + if start.rw_mask.cast_to_bits().all_zero_bits() { + // if rw_mask is all false, set start_active_split_chunks to [true, false, false, false, ...] so we make at least one transaction on output_interface + connect(start_active_split_chunks, repeat(false, split_count)); + connect(start_active_split_chunks[0], true); + } else { + // otherwise, split rw_mask into chunks of size at most output_config.get().bus_width_in_bytes() + for (split_chunk, rw_mask_chunk) in start_active_split_chunks.into_iter().zip( + start + .rw_mask + .chunks(output_config.get().bus_width_in_bytes()), + ) { + connect(split_chunk, rw_mask_chunk.cast_to_bits().any_one_bits()); + } + } + + connect( + prev_incomplete_start_state, + HdlSome( + #[hdl] + IncompleteStartState::<_, _> { + start, + active_split_chunks: start_active_split_chunks, + split_chunks_left: start_active_split_chunks, + }, + ), + ); + } else { + connect( + prev_incomplete_start_state, + prev_incomplete_start_state.ty().HdlNone(), + ); + } + + #[hdl] + if let HdlSome(_) = incomplete_start_state_reg { + connect(input_interface.start.ready, false); + connect(prev_incomplete_start_state, incomplete_start_state_reg); + } + + connect( + incomplete_start_state_reg, + incomplete_start_state_reg.ty().HdlNone(), + ); + + #[hdl] + if let HdlSome(incomplete_start_state) = prev_incomplete_start_state { + #[hdl] + let IncompleteStartState::<_, _> { + start, + active_split_chunks, + split_chunks_left, + } = incomplete_start_state; + // we know at least one element of split_chunks_left is true + #[hdl] + let start_split_chunk_selected = wire(UInt[log2_split_count]); + + // use last-connect semantics to get the index of the first element of split_chunks_left that's true + for (i, split_chunk_left) in split_chunks_left.into_iter().enumerate().rev() { + if i == split_count - 1 { + connect_any(start_split_chunk_selected, i); + continue; + } + #[hdl] + if split_chunk_left { + connect_any(start_split_chunk_selected, i); + } + } + + let output_data_shift = todo!(); + + connect( + output_interface.start.data, + HdlSome( + #[hdl] + MemoryOperationStart::<_> { + kind: start.kind, + addr: start.addr + + start_split_chunk_selected * todo!("chunk size") + + todo!("mask addr"), + write_data, + rw_mask, + op_id, + config, + }, + ), + ); + } + + #[hdl(no_static)] + struct IncompleteFinishState, SplitCount: Size> { + addr: UInt<64>, + finish: MemoryOperationFinish, + split_chunks_left: ArrayType, + } + + #[hdl] + let incomplete_finish_state_reg = reg_builder() + .clock_domain(cd) + .reset(HdlOption[IncompleteFinishState[input_config][split_count]].HdlNone()); + + todo!(); +} + +#[hdl_module] +pub fn memory_interface_adaptor( + input_interface_config: PhantomConst, + output_interfaces_ty: OutputInterfaces, +) { + #[hdl] + let cd: ClockDomain = m.input(); + #[hdl] + let input_interface: MemoryInterface> = + m.input(MemoryInterface[input_interface_config]); + #[hdl] + let output_interfaces: OutputInterfaces = m.output(output_interfaces_ty); + let mut output_interfaces = MemoryInterfacesBundle::properties_expr( + output_interfaces, + vec!["output_interfaces".intern()], + ); + if let Some(index) = output_interfaces.first_full_interface() { + assert!( + index == output_interfaces.fields().len() - 1, + "all fields after {path} will never be used since {path} comes before and handles all addresses", + path = output_interfaces.fields[index].path, + ); + } else { + #[hdl] + let always_error = instance(memory_interface_always_error(PhantomConst::new_sized( + memory_interface_always_error_config(*input_interface_config.get()), + ))); + let mut fields = output_interfaces.into_fields(); + fields.push(MemoryInterfacesBundleField { + path: MemoryInterfacesBundleFieldPath::from_slice(&[ + "always_error".intern(), + "input_interface".intern(), + ]), + value: always_error.input_interface, + }); + output_interfaces = MemoryInterfacesBundleProperties::from_fields(fields); + } + + fn visit_selected_output_interface( + output_interfaces: &MemoryInterfacesBundleProperties< + Expr>>, + >, + selected_output_interface: impl ToExpr, DynSize>>, + mut visit_selected: impl FnMut( + usize, + MemoryInterfacesBundleField>>>, + ), + ) { + let selected_output_interface = selected_output_interface.to_expr(); + let mut else_scope = None; + for (i, &interface) in output_interfaces.fields().iter().enumerate() { + if i == output_interfaces.fields().len() - 1 { + // just else, no else if + visit_selected(i, interface); + } else { + let if_scope = fayalite::module::if_(selected_output_interface.cmp_eq(i)); + visit_selected(i, interface); + else_scope = Some(if_scope.else_()); + } + } + drop(else_scope); + } + + #[hdl(no_static)] + struct Op, OutputInterfaceCount: Size> { + start: MemoryOperationStart, + output_interface_index: UIntInRangeType, OutputInterfaceCount>, + started_bytes: UIntInRangeInclusiveType, MemoryInterfaceBusWidthInBytes>, + finished_bytes: UIntInRangeInclusiveType, MemoryInterfaceBusWidthInBytes>, + } + + let op_ty = Op[input_interface_config][output_interfaces.fields().len()]; + let option_op_ty = HdlOption[op_ty]; + + #[hdl] + let not_started_op_queue = instance(queue( + op_ty, + const { NonZeroUsize::new(2).unwrap() }, + false, + true, + )); + + connect(not_started_op_queue.cd, cd); + connect( + not_started_op_queue.inp.data, + HdlOption::map(input_interface.start.data, |start| { + #[hdl] + let output_interface_index = wire(op_ty.output_interface_index); + connect( + output_interface_index, + (output_interfaces.fields().len() - 1).cast_to(output_interface_index.ty()), + ); + #[hdl] + Op::<_, _> { + start, + output_interface_index, + started_bytes: 0u8.cast_to(op_ty.started_bytes), + finished_bytes: 0u8.cast_to(op_ty.finished_bytes), + } + }), + ); + connect(input_interface.start.ready, not_started_op_queue.inp.ready); + + #[hdl] + let starting_op_reg = reg_builder().clock_domain(cd).reset(option_op_ty.HdlNone()); + + #[hdl] + let starting_op_in = wire(option_op_ty); + + #[hdl] + if let HdlSome(_) = starting_op_reg { + connect(starting_op_in, starting_op_reg); + connect(not_started_op_queue.out.ready, false); + } else { + connect(starting_op_in, not_started_op_queue.out.data); + connect(not_started_op_queue.out.ready, true); + } + + for output_interface in output_interfaces.fields() { + connect( + output_interface.value.start.data, + output_interface.value.ty().start.data.HdlNone(), + ); + } + + #[hdl] + if let HdlSome(starting_op_in) = starting_op_in { + #[hdl] + let Op::<_, _> { + start, + output_interface_index, + started_bytes, + finished_bytes, + } = starting_op_in; + #[hdl] + let MemoryOperationStart::<_> { + kind, + addr, + write_data, + rw_mask, + op_id, + config, + } = start; + visit_selected_output_interface( + &output_interfaces, + output_interface_index, + |_, output_interface| { + connect( + output_interface.value.start.data, + #[hdl] + MemoryOperationStart::<_> { kind }, + ); + #[hdl] + if output_interface.value.start { + todo(); + } + todo!(); + }, + ); + } else { + connect(starting_op_reg, option_op_ty.HdlNone()); + } + + #[hdl] + let started_op_queue = instance(queue( + op_ty, + input_interface_config.get().queue_capacity, + false, + false, + )); + + #[hdl] + let finishing_op_reg = reg_builder().clock_domain(cd).reset(option_op_ty.HdlNone()); + + todo!() +}