WIP: adding memory_interface_adapter_no_split

This commit is contained in:
Jacob Lifshay 2026-03-10 21:23:47 -07:00
parent 3080ea4ce2
commit 08aafe370e
Signed by: programmerjake
SSH key fingerprint: SHA256:HnFTLGpSm4Q4Fj502oCFisjZSoakwEuTsJJMSke63RQ
5 changed files with 1597 additions and 13 deletions

View file

@ -2,12 +2,27 @@
// See Notices.txt for copyright information
use crate::{config::CpuConfig, next_pc::FETCH_BLOCK_ID_WIDTH, util::array_vec::ArrayVec};
use fayalite::{prelude::*, util::ready_valid::ReadyValid};
use std::num::{NonZeroU64, NonZeroUsize, Wrapping};
use fayalite::{
bundle::BundleType,
expr::Valueless,
int::UIntInRangeType,
intern::{Intern, Interned},
module::{instance_with_loc, wire_with_loc},
prelude::*,
util::{
prefix_sum,
ready_valid::{ReadyValid, queue},
},
};
use std::{
cmp::Ordering,
fmt,
num::{NonZeroU64, NonZeroUsize, Wrapping},
};
pub mod simple_uart;
#[derive(Copy, Clone, Eq, PartialEq, Hash, Debug, serde::Serialize, serde::Deserialize)]
#[derive(Copy, Clone, Eq, PartialEq, Hash, serde::Serialize, serde::Deserialize)]
pub enum AddressRange {
Full,
Limited {
@ -16,6 +31,19 @@ pub enum AddressRange {
},
}
impl fmt::Debug for AddressRange {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Full => write!(f, "Full"),
Self::Limited { start, size } => f
.debug_struct("Limited")
.field("start", &fmt::from_fn(|f| write!(f, "{start:#x}")))
.field("size", &fmt::from_fn(|f| write!(f, "{size:#x}")))
.finish(),
}
}
}
impl AddressRange {
pub const fn from_wrapping_range_inclusive(range: std::ops::RangeInclusive<u64>) -> Self {
let start = Wrapping(*range.start());
@ -118,6 +146,21 @@ impl MemoryInterfaceConfig {
address_range: AddressRange::Full,
}
}
#[track_caller]
pub const fn new(
log2_bus_width_in_bytes: u8,
queue_capacity: usize,
op_id_width: usize,
address_range: AddressRange,
) -> Self {
Self {
log2_bus_width_in_bytes,
queue_capacity: NonZeroUsize::new(queue_capacity)
.expect("queue capacity must be at least 1"),
op_id_width,
address_range,
}
}
pub const fn bus_width_in_bytes(&self) -> usize {
1usize.strict_shl(self.log2_bus_width_in_bytes as u32)
}
@ -180,6 +223,7 @@ pub struct MemoryOperationFinish<C: PhantomConstGet<MemoryInterfaceConfig>> {
#[hdl(no_static)]
pub struct MemoryInterface<C: PhantomConstGet<MemoryInterfaceConfig>> {
pub start: ReadyValid<MemoryOperationStart<C>>,
/// started operations must finish in the same order they started in
#[hdl(flip)]
pub finish: ReadyValid<MemoryOperationFinish<C>>,
/// for debugging
@ -187,3 +231,922 @@ pub struct MemoryInterface<C: PhantomConstGet<MemoryInterfaceConfig>> {
pub next_op_ids: HdlOption<ArrayVec<MemoryInterfaceOpId<C>, MemoryInterfaceQueueCapacity<C>>>,
pub config: C,
}
pub const fn memory_interface_always_error_config(
base_config: MemoryInterfaceConfig,
) -> MemoryInterfaceConfig {
let MemoryInterfaceConfig {
log2_bus_width_in_bytes,
queue_capacity: _,
op_id_width,
address_range: _,
} = base_config;
MemoryInterfaceConfig {
log2_bus_width_in_bytes,
queue_capacity: const { NonZeroUsize::new(1).unwrap() },
op_id_width,
address_range: AddressRange::Full,
}
}
#[hdl_module]
pub fn memory_interface_always_error(config: PhantomConst<MemoryInterfaceConfig>) {
assert_eq!(
*config.get(),
memory_interface_always_error_config(*config.get()),
);
#[hdl]
let input_interface: MemoryInterface<PhantomConst<MemoryInterfaceConfig>> =
m.input(MemoryInterface[config]);
connect(
input_interface.next_op_ids,
input_interface.ty().next_op_ids.HdlNone(),
);
connect(input_interface.start.ready, input_interface.finish.ready);
connect(
input_interface.finish.data,
input_interface.ty().finish.data.HdlNone(),
);
#[hdl]
if let HdlSome(_) = input_interface.start.data {
connect(
input_interface.finish.data,
HdlSome(
#[hdl]
MemoryOperationFinish::<_> {
kind: MemoryOperationFinishKind.Error(MemoryOperationErrorKind.Generic()),
read_data: repeat(0u8, MemoryInterfaceBusWidthInBytes[config]),
config,
},
),
);
}
}
#[derive(Copy, Clone, PartialEq, Eq, Hash)]
pub struct MemoryInterfacesBundleFieldPath(pub Interned<[Interned<str>]>);
impl MemoryInterfacesBundleFieldPath {
pub fn from_slice(path: &[Interned<str>]) -> Self {
Self(path.intern())
}
}
impl fmt::Display for MemoryInterfacesBundleFieldPath {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if self.0.is_empty() {
return f.write_str("<empty path>");
}
for (i, name) in self.0.iter().enumerate() {
if i != 0 {
f.write_str(".")?;
}
if name.is_empty() || name.contains(|ch: char| !ch.is_ascii_alphanumeric() && ch != '_')
{
write!(f, "{name:?}")?;
} else {
f.write_str(name)?;
}
}
Ok(())
}
}
impl fmt::Debug for MemoryInterfacesBundleFieldPath {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt(self, f)
}
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub struct MemoryInterfacesBundleField<
T: ValueType<Type = MemoryInterface<PhantomConst<MemoryInterfaceConfig>>>,
> {
pub path: MemoryInterfacesBundleFieldPath,
pub value: T,
}
#[derive(Clone, Debug)]
pub struct MemoryInterfacesBundleProperties<
T: ValueType<Type = MemoryInterface<PhantomConst<MemoryInterfaceConfig>>>,
> {
first_full_interface: Option<usize>,
fields: Vec<MemoryInterfacesBundleField<T>>,
}
impl<T: ValueType<Type = MemoryInterface<PhantomConst<MemoryInterfaceConfig>>>>
MemoryInterfacesBundleProperties<T>
{
pub fn first_full_interface(&self) -> Option<usize> {
self.first_full_interface
}
pub fn fields(&self) -> &[MemoryInterfacesBundleField<T>] {
&self.fields
}
pub fn into_fields(self) -> Vec<MemoryInterfacesBundleField<T>> {
self.fields
}
#[track_caller]
pub fn from_fields(fields: Vec<MemoryInterfacesBundleField<T>>) -> Self {
let mut first_full_interface = None;
for (index, field) in fields.iter().enumerate() {
let ty = field.value.ty();
assert_eq!(
ty, MemoryInterface[ty.config],
"inconsistent field type: {}",
field.path,
);
if let None = first_full_interface
&& let AddressRange::Full = ty.config.get().address_range
{
first_full_interface = Some(index);
}
}
Self {
first_full_interface,
fields,
}
}
#[track_caller]
fn get_fields<
B: ValueType<Type = Bundle, ValueCategory = T::ValueCategory>,
C: ValueType<Type = CanonicalType, ValueCategory = T::ValueCategory>,
>(
fields: &mut Vec<MemoryInterfacesBundleField<T>>,
path_prefix: &mut Vec<Interned<str>>,
bundle: B,
get_field: impl Copy + Fn(&B, Interned<str>) -> C,
interface_from_bundle: impl Copy + Fn(B) -> T,
bundle_from_canonical: impl Copy + Fn(C) -> B,
) {
let bundle_fields = bundle.ty().fields();
if bundle_fields.iter().any(|f| f.flipped)
&& bundle_fields.iter().any(|f| *f.name == *"config")
{
let value = interface_from_bundle(bundle);
fields.push(MemoryInterfacesBundleField {
path: MemoryInterfacesBundleFieldPath::from_slice(path_prefix),
value,
});
} else {
for f in &bundle_fields {
assert!(
!f.flipped,
"field must not have #[hdl(flip)]: {}",
MemoryInterfacesBundleFieldPath::from_slice(path_prefix),
);
let field = get_field(&bundle, f.name);
match field.ty() {
CanonicalType::Bundle(_) => {
path_prefix.push(f.name);
Self::get_fields(
fields,
path_prefix,
bundle_from_canonical(field),
get_field,
interface_from_bundle,
bundle_from_canonical,
);
path_prefix.pop();
}
CanonicalType::PhantomConst(_) => continue,
_ => panic!(
"field type must be either a MemoryInterfacesBundle or a PhantomConst: {}",
MemoryInterfacesBundleFieldPath::from_slice(path_prefix),
),
}
}
}
}
}
/// `Self` is a bundle where either:
/// * `Self` is a [`MemoryInterface<PhantomConst<MemoryInterfaceConfig>>`]
/// * each field is a [`MemoryInterfacesBundle`] or a [`PhantomConst`] and none of the fields have `#[hdl(flip)]`
pub trait MemoryInterfacesBundle: BundleType {
#[track_caller]
fn properties_valueless(
self,
mut path_prefix: Vec<Interned<str>>,
) -> MemoryInterfacesBundleProperties<
Valueless<MemoryInterface<PhantomConst<MemoryInterfaceConfig>>>,
> {
let mut fields = Vec::new();
MemoryInterfacesBundleProperties::get_fields(
&mut fields,
&mut path_prefix,
Valueless::new(Bundle::new(self.fields())),
|&bundle, name| {
Valueless::new(
bundle
.ty()
.field_by_name(name)
.expect("field is known to exist")
.ty,
)
},
|bundle| Valueless::new(MemoryInterface::from_canonical(bundle.ty().canonical())),
|canonical| Valueless::new(Bundle::from_canonical(canonical.ty())),
);
MemoryInterfacesBundleProperties::from_fields(fields)
}
#[track_caller]
fn properties_expr(
this: impl ToExpr<Type = Self>,
mut path_prefix: Vec<Interned<str>>,
) -> MemoryInterfacesBundleProperties<Expr<MemoryInterface<PhantomConst<MemoryInterfaceConfig>>>>
{
let mut fields = Vec::new();
MemoryInterfacesBundleProperties::get_fields(
&mut fields,
&mut path_prefix,
Expr::as_bundle(this.to_expr()),
|&bundle, name| Expr::field(bundle, &name),
Expr::from_bundle,
Expr::from_canonical,
);
MemoryInterfacesBundleProperties::from_fields(fields)
}
}
impl MemoryInterfacesBundle for Bundle {}
/// get the input's [`MemoryInterfaceConfig`] for [`memory_interface_resize_adapter_no_split()`]
pub fn memory_interface_resize_adapter_no_split_input_config(
output_config: PhantomConst<MemoryInterfaceConfig>,
input_log2_bus_width_in_bytes: u8,
) -> PhantomConst<MemoryInterfaceConfig> {
let MemoryInterfaceConfig {
log2_bus_width_in_bytes: _,
queue_capacity,
op_id_width,
address_range,
} = *output_config.get();
PhantomConst::new_sized(MemoryInterfaceConfig {
log2_bus_width_in_bytes: input_log2_bus_width_in_bytes,
queue_capacity,
op_id_width,
address_range,
})
}
#[hdl]
fn get_shrink_no_split_output_start(
input_start: Expr<MemoryOperationStart<PhantomConst<MemoryInterfaceConfig>>>,
output_config: PhantomConst<MemoryInterfaceConfig>,
) -> Expr<HdlOption<MemoryOperationStart<PhantomConst<MemoryInterfaceConfig>>>> {
#[hdl]
let MemoryOperationStart::<_> {
kind,
addr,
write_data,
rw_mask,
op_id,
config: _,
} = input_start;
let input_config = input_start.ty().config;
let output_bus_width_in_bytes = output_config.get().bus_width_in_bytes();
let input_bus_width_in_bytes = input_config.get().bus_width_in_bytes();
assert_eq!(input_bus_width_in_bytes % output_bus_width_in_bytes, 0);
#[hdl(no_static)]
struct OutputStartData<C: PhantomConstGet<MemoryInterfaceConfig>> {
addr: UInt<64>,
write_data: ArrayType<UInt<8>, MemoryInterfaceBusWidthInBytes<C>>,
rw_mask: ArrayType<Bool, MemoryInterfaceBusWidthInBytes<C>>,
config: C,
}
#[hdl(no_static)]
struct ReduceState<T> {
at_least_one: Bool,
more_than_one: Bool,
output_start_data: T,
}
let output_start_data_ty = OutputStartData[output_config];
let reduce_state_ty = ReduceState[output_start_data_ty];
let Some((reduce_state, _)) = prefix_sum::reduce(
rw_mask
.chunks(output_bus_width_in_bytes)
.enumerate()
.map(|(index, rw_mask_chunk)| {
let reduce_state = wire_with_loc(
&format!("reduce_state_{index}"),
SourceLocation::caller(),
reduce_state_ty,
);
connect(
reduce_state.at_least_one,
rw_mask_chunk.cast_to_bits().any_one_bits(),
);
connect(reduce_state.more_than_one, false);
connect(
reduce_state.output_start_data,
UInt[output_start_data_ty.canonical().bit_width()]
.zero()
.cast_bits_to(output_start_data_ty),
);
#[hdl]
if reduce_state.at_least_one {
#[hdl]
let OutputStartData::<_> {
addr: output_addr,
write_data: output_write_data,
rw_mask: output_rw_mask,
config: _,
} = reduce_state.output_start_data;
let start_byte = index * output_bus_width_in_bytes;
let byte_range = start_byte..start_byte + output_bus_width_in_bytes;
connect_any(output_addr, addr | start_byte);
for ((i, output_write_data), output_rw_mask) in
byte_range.zip(output_write_data).zip(output_rw_mask)
{
connect(output_write_data, write_data[i]);
connect(output_rw_mask, rw_mask[i]);
}
}
(reduce_state, format!("{index}"))
}),
|(l, l_str), (r, r_str)| -> (Expr<ReduceState<_>>, String) {
let out_str = l_str + "_" + &*r_str;
let reduce_state = wire_with_loc(
&format!("reduce_state_{out_str}"),
SourceLocation::caller(),
reduce_state_ty,
);
connect(
reduce_state,
#[hdl]
ReduceState::<_> {
at_least_one: l.at_least_one | r.at_least_one,
more_than_one: l.more_than_one
| r.more_than_one
| (l.at_least_one & r.at_least_one),
output_start_data: (l.output_start_data.cast_to_bits()
| r.output_start_data.cast_to_bits())
.cast_bits_to(output_start_data_ty),
},
);
(reduce_state, out_str)
},
) else {
unreachable!("known to be non-empty");
};
#[hdl]
let ReduceState::<_> {
at_least_one,
more_than_one,
output_start_data,
} = reduce_state;
#[hdl]
let shrink_no_split_output_start = wire(HdlOption[MemoryOperationStart[output_config]]);
#[hdl]
if more_than_one {
connect(
shrink_no_split_output_start,
shrink_no_split_output_start.ty().HdlNone(),
);
} else {
#[hdl]
let OutputStartData::<_> {
addr: output_start_data_addr,
write_data,
rw_mask,
config: _,
} = output_start_data;
#[hdl]
let output_addr = wire();
#[hdl]
if at_least_one {
connect(output_addr, output_start_data_addr);
} else {
connect(output_addr, addr);
}
connect(
shrink_no_split_output_start,
HdlSome(
#[hdl]
MemoryOperationStart::<_> {
kind,
addr: output_addr,
write_data,
rw_mask,
op_id,
config: output_config,
},
),
);
}
shrink_no_split_output_start
}
/// Adaptor to a memory interface with a smaller bus width.
/// Operations return errors if they would need to be split into more than one operation on the output.
#[hdl_module]
fn memory_interface_shrink_adapter_no_split(
output_config: PhantomConst<MemoryInterfaceConfig>,
input_log2_bus_width_in_bytes: u8,
) {
let queue_capacity = output_config.get().queue_capacity;
let output_bus_width_in_bytes = output_config.get().bus_width_in_bytes();
let input_config = memory_interface_resize_adapter_no_split_input_config(
output_config,
input_log2_bus_width_in_bytes,
);
let input_bus_width_in_bytes = input_config.get().bus_width_in_bytes();
assert_eq!(input_bus_width_in_bytes % output_bus_width_in_bytes, 0);
#[hdl]
let cd: ClockDomain = m.input();
#[hdl]
let input_interface: MemoryInterface<PhantomConst<MemoryInterfaceConfig>> =
m.input(MemoryInterface[input_config]);
#[hdl]
let output_interface: MemoryInterface<PhantomConst<MemoryInterfaceConfig>> =
m.output(MemoryInterface[output_config]);
#[hdl]
struct QueueEntry {
is_error: Bool,
}
#[hdl]
let queue = instance(queue(QueueEntry, queue_capacity, false, false));
connect(queue.cd, cd);
#[hdl]
if !queue.inp.ready {
connect(input_interface.start.ready, false);
connect(
output_interface.start.data,
output_interface.ty().start.data.HdlNone(),
);
connect(queue.inp.data, queue.ty().inp.data.HdlNone());
} else {
connect(input_interface.start.ready, output_interface.start.ready);
#[hdl]
if let HdlSome(input_start) = input_interface.start.data {
let output_start = get_shrink_no_split_output_start(input_start, output_config);
connect(output_interface.start.data, output_start);
#[hdl]
if let HdlSome(_) = output_start {
#[hdl]
if input_interface.start.ready {
connect(
queue.inp.data,
HdlSome(
#[hdl]
QueueEntry { is_error: false },
),
);
} else {
connect(queue.inp.data, HdlNone());
}
} else {
connect(
queue.inp.data,
HdlSome(
#[hdl]
QueueEntry { is_error: true },
),
);
}
} else {
connect(
output_interface.start.data,
output_interface.ty().start.data.HdlNone(),
);
connect(queue.inp.data, queue.ty().inp.data.HdlNone());
}
}
connect(queue.out.ready, input_interface.finish.ready);
#[hdl]
if let HdlSome(queue_out) = queue.out.data {
#[hdl]
let QueueEntry { is_error } = queue_out;
#[hdl]
if is_error {
connect(
input_interface.finish.data,
HdlSome(
#[hdl]
MemoryOperationFinish::<_> {
kind: MemoryOperationFinishKind.Error(MemoryOperationErrorKind.Generic()),
read_data: repeat(0u8, input_bus_width_in_bytes),
config: output_config,
},
),
);
connect(output_interface.finish.ready, false);
} else {
connect(output_interface.finish.ready, input_interface.finish.ready);
#[hdl]
if let HdlSome(output_finish) = output_interface.finish.data {
#[hdl]
let MemoryOperationFinish::<_> {
kind,
read_data,
config: _,
} = output_finish;
#[hdl]
let input_finish = wire(input_interface.ty().finish.data.HdlSome);
connect(input_finish.config, input_finish.ty().config);
connect(input_finish.kind, kind);
for (l, r) in input_finish
.read_data
.into_iter()
.zip(read_data.into_iter().cycle())
{
connect(l, r);
}
connect(input_interface.finish.data, HdlSome(input_finish));
} else {
connect(
input_interface.finish.data,
input_interface.ty().finish.data.HdlNone(),
);
}
}
} else {
connect(output_interface.finish.ready, false);
connect(
input_interface.finish.data,
input_interface.ty().finish.data.HdlNone(),
);
}
}
/// Adaptor to a memory interface with a different bus width.
/// Operations return errors if they would need to be split into more than one operation on the output.
#[hdl_module]
pub fn memory_interface_resize_adapter_no_split(
output_config: PhantomConst<MemoryInterfaceConfig>,
input_log2_bus_width_in_bytes: u8,
) {
let input_config = memory_interface_resize_adapter_no_split_input_config(
output_config,
input_log2_bus_width_in_bytes,
);
#[hdl]
let cd: ClockDomain = m.input();
#[hdl]
let input_interface: MemoryInterface<PhantomConst<MemoryInterfaceConfig>> =
m.input(MemoryInterface[input_config]);
#[hdl]
let output_interface: MemoryInterface<PhantomConst<MemoryInterfaceConfig>> =
m.output(MemoryInterface[output_config]);
match output_config
.get()
.log2_bus_width_in_bytes
.cmp(&input_log2_bus_width_in_bytes)
{
Ordering::Less => {
#[hdl]
let shrink_adapter = instance(memory_interface_shrink_adapter_no_split(
output_config,
input_log2_bus_width_in_bytes,
));
connect(shrink_adapter.cd, cd);
connect(shrink_adapter.input_interface, input_interface);
connect(output_interface, shrink_adapter.output_interface);
}
Ordering::Equal => {
connect(output_interface, input_interface);
return;
}
Ordering::Greater => todo!(
"connecting a input memory interface to a output memory interface with larger bus width"
),
}
}
/// Adapt an input [`MemoryInterface`] to a collection of output [`MemoryInterface`]s (see [`MemoryInterfacesBundle`] for how that works).
/// The output interface used for each operation is the first output interface that matches [`MemoryInterfaceConfig::address_range`].
/// If none match, then the operation returns an error.
/// Operations return errors if they would need to be split into more than one operation on the output.
#[hdl_module]
pub fn memory_interface_adapter_no_split<OutputInterfaces: Type + MemoryInterfacesBundle>(
input_config: PhantomConst<MemoryInterfaceConfig>,
output_interfaces_ty: OutputInterfaces,
) {
#[hdl]
let cd: ClockDomain = m.input();
#[hdl]
let input_interface: MemoryInterface<PhantomConst<MemoryInterfaceConfig>> =
m.input(MemoryInterface[input_config]);
#[hdl]
let output_interfaces: OutputInterfaces = m.output(output_interfaces_ty);
let mut output_interfaces = MemoryInterfacesBundle::properties_expr(
output_interfaces,
vec!["output_interfaces".intern()],
);
if let Some(index) = output_interfaces.first_full_interface() {
assert!(
index == output_interfaces.fields().len() - 1,
"all fields after {path} will never be used since {path} comes before and handles all addresses",
path = output_interfaces.fields[index].path,
);
} else {
#[hdl]
let always_error = instance(memory_interface_always_error(PhantomConst::new_sized(
memory_interface_always_error_config(*input_config.get()),
)));
let mut fields = output_interfaces.into_fields();
fields.push(MemoryInterfacesBundleField {
path: MemoryInterfacesBundleFieldPath::from_slice(&[
"always_error".intern(),
"input_interface".intern(),
]),
value: always_error.input_interface,
});
output_interfaces = MemoryInterfacesBundleProperties::from_fields(fields);
}
for field in output_interfaces.fields() {
let MemoryInterfaceConfig {
log2_bus_width_in_bytes: _,
queue_capacity: _,
op_id_width: _,
address_range,
} = *field.value.ty().config.get();
let start = address_range.start().0;
let size_or_zero = address_range.size().map_or(0, |v| v.get());
let input_bus_width_in_bytes = input_config.get().bus_width_in_bytes() as u64;
assert!(
start.is_multiple_of(input_bus_width_in_bytes)
&& size_or_zero.is_multiple_of(input_bus_width_in_bytes),
"output interface's address_range must start and end on a multiple of the input interface's bus width in bytes:\n\
{}: address_range={address_range:?} input_bus_width_in_bytes={input_bus_width_in_bytes:#x} ({input_bus_width_in_bytes})",
field.path,
);
}
output_interfaces = MemoryInterfacesBundleProperties::from_fields(
output_interfaces
.into_fields()
.into_iter()
.map(|MemoryInterfacesBundleField { path, value }| {
let output_config = value.ty().config;
let MemoryInterfaceConfig {
log2_bus_width_in_bytes: _,
queue_capacity,
op_id_width: _,
address_range,
} = *output_config.get();
let MemoryInterfaceConfig {
log2_bus_width_in_bytes: input_log2_bus_width_in_bytes,
queue_capacity: _,
op_id_width,
address_range: _,
} = *input_config.get();
let expected_config = MemoryInterfaceConfig {
log2_bus_width_in_bytes: input_log2_bus_width_in_bytes,
queue_capacity,
op_id_width,
address_range,
};
if expected_config != *output_config.get() {
return MemoryInterfacesBundleField { path, value };
}
let resize_adapter = instance_with_loc(
&format!("resize_adapter_{path}")
.replace(|ch: char| !ch.is_ascii_alphanumeric(), "_"),
memory_interface_resize_adapter_no_split(
output_config,
input_log2_bus_width_in_bytes,
),
SourceLocation::caller(),
);
connect(resize_adapter.cd, cd);
connect(value, resize_adapter.output_interface);
let value = resize_adapter.input_interface;
assert_eq!(
expected_config,
*value.ty().config.get(),
"can't adapt output interface: {path}",
);
MemoryInterfacesBundleField {
path,
value: resize_adapter.input_interface,
}
})
.collect(),
);
fn visit_selected_output_interface(
output_interfaces: &MemoryInterfacesBundleProperties<
Expr<MemoryInterface<PhantomConst<MemoryInterfaceConfig>>>,
>,
selected_output_interface: impl ToExpr<Type = UIntInRangeType<ConstUsize<0>, DynSize>>,
mut visit_selected: impl FnMut(
usize,
MemoryInterfacesBundleField<Expr<MemoryInterface<PhantomConst<MemoryInterfaceConfig>>>>,
),
) {
let selected_output_interface = selected_output_interface.to_expr();
let mut else_scope = None;
for (i, &interface) in output_interfaces.fields().iter().enumerate() {
if i == output_interfaces.fields().len() - 1 {
// just else, no else if
visit_selected(i, interface);
} else {
let if_scope = fayalite::module::if_(selected_output_interface.cmp_eq(i));
visit_selected(i, interface);
else_scope = Some(if_scope.else_());
}
}
drop(else_scope);
}
#[hdl(no_static)]
struct Op<OutputInterfaceCount: Size> {
output_interface_index: UIntInRangeType<ConstUsize<0>, OutputInterfaceCount>,
}
#[hdl(no_static)]
struct StartOp<C: PhantomConstGet<MemoryInterfaceConfig>, OutputInterfaceCount: Size> {
start: MemoryOperationStart<C>,
op: Op<OutputInterfaceCount>,
}
let start_op_ty = StartOp[input_config][output_interfaces.fields().len()];
let op_ty = Op[output_interfaces.fields().len()];
#[hdl]
let not_started_op_queue = instance(queue(
start_op_ty,
const { NonZeroUsize::new(1).unwrap() },
false,
true,
));
connect(not_started_op_queue.cd, cd);
assert_eq!(
output_interfaces.first_full_interface(),
Some(output_interfaces.fields().len() - 1),
"guaranteed by above code adding memory_interface_always_error if necessary",
);
connect(
not_started_op_queue.inp.data,
HdlOption::map(input_interface.start.data, |start| {
#[hdl]
let output_interface_index = wire(op_ty.output_interface_index);
// iterate in reverse order so last-connect semantics give us the
// first output interface that matches.
// output_interface_index is guaranteed to be written because we know the last
// output interface has AddressRange::Full
for (index, output_interface) in output_interfaces.fields().iter().enumerate().rev() {
match output_interface.value.ty().config.get().address_range {
AddressRange::Full => connect(
output_interface_index,
index.cast_to(output_interface_index.ty()),
),
AddressRange::Limited {
start: address_range_start,
size,
} => {
let from_start: Expr<UInt<64>> =
(start.addr - address_range_start.0).cast_to_static();
#[hdl]
if from_start.cmp_lt(size) {
connect(
output_interface_index,
index.cast_to(output_interface_index.ty()),
);
}
}
}
connect(
output_interface_index,
index.cast_to(output_interface_index.ty()),
);
}
#[hdl]
StartOp::<_, _> {
start,
op: #[hdl]
Op::<_> {
output_interface_index,
},
}
}),
);
connect(input_interface.start.ready, not_started_op_queue.inp.ready);
#[hdl]
let op_queue = instance(queue(
op_ty,
input_config.get().queue_capacity,
false,
false,
));
connect(op_queue.cd, cd);
connect(op_queue.inp.data, op_queue.ty().inp.data.HdlNone());
connect(not_started_op_queue.out.ready, false);
for output_interface in output_interfaces.fields() {
connect(
output_interface.value.start.data,
output_interface.value.ty().start.data.HdlNone(),
);
connect(output_interface.value.finish.ready, false);
}
#[hdl]
if let HdlSome(start_op) = not_started_op_queue.out.data {
#[hdl]
let StartOp::<_, _> { start, op } = start_op;
#[hdl]
let Op::<_> {
output_interface_index,
} = op;
#[hdl]
let MemoryOperationStart::<_> {
kind,
addr,
write_data,
rw_mask,
op_id,
config: _,
} = start;
visit_selected_output_interface(
&output_interfaces,
output_interface_index,
|_, output_interface| {
connect(
output_interface.value.start.data,
HdlSome(
#[hdl]
MemoryOperationStart::<_> {
kind,
addr,
write_data,
rw_mask,
op_id,
config: output_interface.value.ty().config,
},
),
);
#[hdl]
if output_interface.value.start.ready & op_queue.inp.ready {
connect(not_started_op_queue.out.ready, true);
connect(op_queue.inp.data, HdlSome(op));
}
},
);
}
connect(op_queue.out.ready, false);
connect(
input_interface.finish.data,
input_interface.ty().finish.data.HdlNone(),
);
#[hdl]
if let HdlSome(op) = op_queue.out.data {
#[hdl]
let Op::<_> {
output_interface_index,
} = op;
visit_selected_output_interface(
&output_interfaces,
output_interface_index,
|_, output_interface| {
connect(
output_interface.value.finish.ready,
input_interface.finish.ready,
);
#[hdl]
if let HdlSome(finish) = output_interface.value.finish.data {
#[hdl]
let MemoryOperationFinish::<_> {
kind,
read_data,
config: _,
} = finish;
connect(
input_interface.finish.data,
HdlSome(
#[hdl]
MemoryOperationFinish::<_> {
kind,
read_data,
config: input_config,
},
),
);
connect(op_queue.out.ready, input_interface.finish.ready);
}
},
);
}
}

View file

@ -358,9 +358,9 @@ pub fn receiver(queue_capacity: NonZeroUsize) {
pub const SIMPLE_UART_RECEIVE_OFFSET: u64 = 0;
pub const SIMPLE_UART_TRANSMIT_OFFSET: u64 = 0;
pub const SIMPLE_UART_STATUS_OFFSET: u64 = 1;
pub const SIMPLE_UART_SIZE: NonZeroU64 =
NonZeroU64::new(1 << SIMPLE_UART_LOG2_SIZE).expect("known to be non-zero");
pub const SIMPLE_UART_LOG2_SIZE: u8 = 1;
pub const SIMPLE_UART_LOG2_BUS_WIDTH: u8 = 1;
pub const SIMPLE_UART_USED_SIZE: NonZeroU64 = NonZeroU64::new(2).expect("known non-zero");
pub const SIMPLE_UART_ADDRESS_SIZE: NonZeroU64 = NonZeroU64::new(1 << 6).expect("known non-zero");
#[hdl(no_static)]
struct Operation<C: PhantomConstGet<MemoryInterfaceConfig>> {
@ -373,16 +373,18 @@ pub const fn simple_uart_memory_interface_config(
start_address: Wrapping<u64>,
) -> MemoryInterfaceConfig {
assert!(
start_address.0 % SIMPLE_UART_SIZE.get() == 0,
start_address
.0
.is_multiple_of(SIMPLE_UART_ADDRESS_SIZE.get()),
"start_address must be properly aligned"
);
MemoryInterfaceConfig {
log2_bus_width_in_bytes: SIMPLE_UART_LOG2_SIZE,
log2_bus_width_in_bytes: SIMPLE_UART_LOG2_BUS_WIDTH,
queue_capacity: const { NonZeroUsize::new(1).unwrap() },
op_id_width,
address_range: AddressRange::Limited {
start: start_address,
size: SIMPLE_UART_SIZE,
size: SIMPLE_UART_ADDRESS_SIZE,
},
}
}

View file

@ -0,0 +1,619 @@
// SPDX-License-Identifier: LGPL-3.0-or-later
// See Notices.txt for copyright information
use cpu::{
main_memory_and_io::{
AddressRange, MemoryInterface, MemoryInterfaceConfig, MemoryOperationErrorKind,
MemoryOperationFinish, MemoryOperationFinishKind, MemoryOperationKind,
MemoryOperationStart, memory_interface_adapter_no_split,
},
next_pc::FETCH_BLOCK_ID_WIDTH,
};
use fayalite::{
bundle::{BundleField, BundleType},
intern::{Intern, Interned},
module::instance_with_loc,
prelude::*,
sim::vcd::VcdWriterDecls,
util::RcWriter,
};
use std::{cell::Cell, collections::VecDeque, fmt};
fn get_next_delay(delay_sequence_index: &Cell<u64>) -> u8 {
let index = delay_sequence_index.get();
delay_sequence_index.set(delay_sequence_index.get().wrapping_add(1));
// make a pseudo-random number deterministically based on index
let random = index
.wrapping_add(1)
.wrapping_mul(0x8c16a62518f86883) // random prime
.rotate_left(32)
.wrapping_mul(0xf807b7df2082353d) // random prime
.rotate_right(60);
const DELAYS: &[u8; 0x20] = &[
0, 0, 0, 0, 0, 0, 0, 0, //
1, 1, 1, 1, 1, 1, 1, 1, //
2, 2, 2, 2, 2, 2, 2, 2, //
3, 3, 3, 3, 4, 5, 6, 20, //
];
DELAYS[(random & 0x1F) as usize]
}
#[hdl_module(extern)]
fn mock_memory(memory: Memory) {
let Memory { config, contents } = memory;
#[hdl]
let cd: ClockDomain = m.input();
#[hdl]
let input_interface: MemoryInterface<PhantomConst<MemoryInterfaceConfig>> =
m.input(MemoryInterface[config]);
m.register_clock_for_past(cd.clk);
#[hdl]
async fn run(
cd: Expr<ClockDomain>,
input_interface: Expr<MemoryInterface<PhantomConst<MemoryInterfaceConfig>>>,
config: PhantomConst<MemoryInterfaceConfig>,
contents: Interned<[u8]>,
delay_sequence_index: &Cell<u64>,
mut sim: ExternModuleSimulationState,
) {
#[derive(Debug)]
struct Op {
cycles_left: u8,
op_id: SimValue<UInt>,
finish: SimValue<MemoryOperationFinish<PhantomConst<MemoryInterfaceConfig>>>,
}
let mut ops = VecDeque::<Op>::with_capacity(config.get().queue_capacity.get());
let finish_ty = input_interface.ty().finish.data.HdlSome;
loop {
for op in &mut ops {
op.cycles_left = op.cycles_left.saturating_sub(1);
}
let next_op_ids_ty = input_interface.ty().next_op_ids.HdlSome;
sim.write(
input_interface.next_op_ids,
#[hdl(sim)]
(input_interface.ty().next_op_ids).HdlSome(
next_op_ids_ty
.from_iter_sim(
next_op_ids_ty.element().zero(),
ops.iter().map(|op| &op.op_id),
)
.expect("known to fit"),
),
)
.await;
if let Some(Op {
cycles_left: 0,
op_id: _,
finish,
}) = ops.front()
{
sim.write(
input_interface.finish.data,
#[hdl(sim)]
(input_interface.ty().finish.data).HdlSome(finish),
)
.await;
}
sim.write_bool(
input_interface.start.ready,
ops.len() < config.get().queue_capacity.get(),
)
.await;
sim.wait_for_clock_edge(cd.clk).await;
if sim
.read_past_bool(input_interface.finish.ready, cd.clk)
.await
{
ops.pop_front_if(|op| op.cycles_left == 0);
}
if sim
.read_past_bool(input_interface.start.ready, cd.clk)
.await
{
#[hdl(sim)]
if let HdlSome(start) = sim.read_past(input_interface.start.data, cd.clk).await {
#[hdl(sim)]
let MemoryOperationStart::<_> {
kind,
addr,
write_data,
rw_mask,
op_id,
config: _,
} = start;
let mut finish_kind = #[hdl(sim)]
MemoryOperationFinishKind.Success(&kind);
let mut read_data = vec![0u8; finish_ty.read_data.len()];
#[hdl(sim)]
match kind {
MemoryOperationKind::Read => {
for (i, v) in read_data.iter_mut().enumerate() {
if *rw_mask[i] {
let addr = addr.as_int().wrapping_add(i as u64);
let offset =
addr.wrapping_sub(config.get().address_range.start().0);
if !config.get().address_range.contains(addr) {
finish_kind = #[hdl(sim)]
MemoryOperationFinishKind.Error(
#[hdl(sim)]
MemoryOperationErrorKind.Generic(),
);
read_data.fill(0);
break;
}
*v = contents[offset as usize];
}
}
}
MemoryOperationKind::Write => {
todo!("write {write_data:?}");
}
}
ops.push_back(Op {
cycles_left: get_next_delay(delay_sequence_index),
op_id,
finish: #[hdl(sim)]
MemoryOperationFinish::<_> {
kind: finish_kind,
read_data,
config,
},
});
}
}
}
}
m.extern_module_simulation_fn(
(cd, input_interface, config, contents),
async |(cd, input_interface, config, contents), mut sim| {
// intentionally have a different sequence each time we're reset
let delay_sequence_index = Cell::new(0);
sim.resettable(
cd,
async |mut sim| {
sim.write(
input_interface.next_op_ids,
#[hdl(sim)]
(input_interface.ty().next_op_ids).HdlNone(),
)
.await;
sim.write_bool(input_interface.start.ready, false).await;
sim.write(
input_interface.finish.data,
#[hdl(sim)]
(input_interface.ty().finish.data).HdlNone(),
)
.await;
},
|sim, ()| {
run(
cd,
input_interface,
config,
contents,
&delay_sequence_index,
sim,
)
},
)
.await
},
);
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
struct Memory {
contents: Interned<[u8]>,
config: PhantomConst<MemoryInterfaceConfig>,
}
impl Memory {
fn new(
contents: impl AsRef<[u8]>,
log2_bus_width_in_bytes: u8,
address_range: AddressRange,
) -> Self {
Self {
contents: contents.as_ref().intern(),
config: PhantomConst::new_sized(MemoryInterfaceConfig::new(
log2_bus_width_in_bytes,
8,
FETCH_BLOCK_ID_WIDTH,
address_range,
)),
}
}
}
#[hdl_module(extern)]
fn mock_cpu(memories: Interned<[Memory]>) {
const LOG2_BUS_WIDTH: u8 = 3;
const BUS_WIDTH: usize = 1 << LOG2_BUS_WIDTH;
let config = PhantomConst::new_sized(MemoryInterfaceConfig::new(
LOG2_BUS_WIDTH,
8,
FETCH_BLOCK_ID_WIDTH,
AddressRange::Full,
));
#[hdl]
let cd: ClockDomain = m.input();
#[hdl]
let output_interface: MemoryInterface<PhantomConst<MemoryInterfaceConfig>> =
m.output(MemoryInterface[config]);
#[hdl]
let finished: Bool = m.output();
m.register_clock_for_past(cd.clk);
#[derive(PartialEq)]
struct Op {
addr: u64,
read_mask: [bool; BUS_WIDTH],
}
impl fmt::Debug for Op {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let Self { addr, read_mask } = self;
f.debug_struct("Op")
.field("addr", &fmt::from_fn(|f| write!(f, "{addr:#x}")))
.field("read_mask", read_mask)
.finish()
}
}
#[hdl]
async fn generator(
cd: Expr<ClockDomain>,
output_interface: Expr<MemoryInterface<PhantomConst<MemoryInterfaceConfig>>>,
config: PhantomConst<MemoryInterfaceConfig>,
sequence: &[Op],
delay_sequence_index: &Cell<u64>,
mut sim: ExternModuleSimulationState,
) {
let start_ty = MemoryOperationStart[config];
for (op_index, op) in sequence.iter().enumerate() {
sim.write(
output_interface.start.data,
#[hdl(sim)]
(output_interface.ty().start.data).HdlNone(),
)
.await;
let delay = get_next_delay(delay_sequence_index);
println!("generator: delay {delay}");
for _ in 0..delay {
sim.wait_for_clock_edge(cd.clk).await;
}
sim.write(
output_interface.start.data,
#[hdl(sim)]
(output_interface.ty().start.data).HdlSome(
#[hdl(sim)]
MemoryOperationStart::<_> {
kind: #[hdl(sim)]
MemoryOperationKind.Read(),
addr: op.addr,
write_data: &[0u8; BUS_WIDTH][..],
rw_mask: &op.read_mask[..],
op_id: op_index.cast_to(start_ty.op_id),
config,
},
),
)
.await;
sim.wait_for_clock_edge(cd.clk).await;
}
}
#[hdl]
async fn checker(
cd: Expr<ClockDomain>,
output_interface: Expr<MemoryInterface<PhantomConst<MemoryInterfaceConfig>>>,
config: PhantomConst<MemoryInterfaceConfig>,
sequence: &[Op],
memories: Interned<[Memory]>,
delay_sequence_index: &Cell<u64>,
sim: &mut ExternModuleSimulationState,
) {
for op in sequence {
sim.write_bool(output_interface.finish.ready, false).await;
let delay = get_next_delay(delay_sequence_index);
println!("checker: delay {delay}");
for _ in 0..delay {
sim.wait_for_clock_edge(cd.clk).await;
}
sim.write_bool(output_interface.finish.ready, true).await;
sim.wait_for_clock_edge(cd.clk).await;
let mut finish = loop {
#[hdl(sim)]
if let HdlSome(finish) = sim.read_past(output_interface.finish.data, cd.clk).await {
break finish;
}
sim.wait_for_clock_edge(cd.clk).await;
};
for (v, &mask) in finish.read_data.iter_mut().zip(&op.read_mask) {
if !mask {
*v = 0u8.to_sim_value(); // ignore outputs for ignored bytes
}
}
let expected_finish = memories
.iter()
.find(|m| m.config.get().address_range.contains(op.addr))
.and_then(
|&Memory {
config: memory_config,
contents,
}|
-> Option<_> {
let mut read_data = [0u8; BUS_WIDTH];
let mut first_enabled = None;
let mut last_enabled = None;
for (i, &mask) in op.read_mask.iter().enumerate() {
if mask {
first_enabled.get_or_insert(i);
last_enabled = Some(i);
read_data[i] = *contents.get(
usize::try_from(
op.addr.wrapping_add(i as u64).wrapping_sub(
memory_config.get().address_range.start().0,
),
)
.ok()?,
)?;
}
}
if let (Some(first_enabled), Some(last_enabled)) =
(first_enabled, last_enabled)
{
let log2_bus_width_in_bytes =
memory_config.get().log2_bus_width_in_bytes;
if first_enabled >> log2_bus_width_in_bytes
!= last_enabled >> log2_bus_width_in_bytes
{
// this operation requires more than one operation at the final memory,
// so it gets turned into an error since we're using
// memory_interface_adapter_no_split
return None;
}
}
Some(
#[hdl(sim)]
MemoryOperationFinish::<_> {
kind: #[hdl(sim)]
MemoryOperationFinishKind.Success(
#[hdl(sim)]
MemoryOperationKind.Read(),
),
read_data: &[0u8; BUS_WIDTH][..],
config,
},
)
},
)
.unwrap_or_else(|| {
#[hdl(sim)]
MemoryOperationFinish::<_> {
kind: #[hdl(sim)]
MemoryOperationFinishKind.Error(
#[hdl(sim)]
MemoryOperationErrorKind.Generic(),
),
read_data: &[0u8; BUS_WIDTH][..],
config,
}
});
assert!(
format!("{finish:#?}") == format!("{expected_finish:#?}"),
"op={op:#?}\nexpected_finish={expected_finish:#?}\nfinish={finish:#?}"
);
}
}
m.extern_module_simulation_fn(
(cd, output_interface, finished, config, memories),
async |(cd, output_interface, finished, config, memories), mut sim| {
sim.write_bool(finished, false).await;
// intentionally have a different sequence each time we're reset
let generator_delay_sequence_index = Cell::new(1 << 63);
let checker_delay_sequence_index = Cell::new(1 << 62);
let mut sequence = Vec::new();
sequence.push(Op {
addr: !0 << 16,
read_mask: [true; _],
});
for (i, memory) in memories.iter().enumerate() {
assert!(
memory
.config
.get()
.address_range
.start()
.0
.is_multiple_of(BUS_WIDTH as u64)
);
assert!(
memory
.config
.get()
.address_range
.last()
.wrapping_add(1)
.is_multiple_of(BUS_WIDTH as u64)
);
if i == 0 {
for log2_read_size in 0..=LOG2_BUS_WIDTH {
let read_size = 1 << log2_read_size;
for offset in (0..BUS_WIDTH).step_by(read_size) {
sequence.push(Op {
addr: memory.config.get().address_range.start().0,
read_mask: std::array::from_fn(|byte_index| {
byte_index
.checked_sub(offset)
.is_some_and(|v| v < read_size)
}),
});
}
}
}
for (addr, chunk) in (memory.config.get().address_range.start().0..)
.step_by(BUS_WIDTH)
.zip(memory.contents.chunks(BUS_WIDTH))
{
let mut op = Op {
addr,
read_mask: [true; BUS_WIDTH],
};
op.read_mask[chunk.len()..].fill(false);
if sequence.last() != Some(&op) {
sequence.push(op);
}
}
}
sim.fork_join_scope(async |scope, mut sim| {
scope.spawn_detached(async |_scope, mut sim| {
sim.resettable(
cd,
async |mut sim| {
sim.write(
output_interface.start.data,
#[hdl(sim)]
(output_interface.ty().start.data).HdlNone(),
)
.await;
},
|sim, ()| {
generator(
cd,
output_interface,
config,
&sequence,
&generator_delay_sequence_index,
sim,
)
},
)
.await
});
sim.resettable(
cd,
async |mut sim| {
sim.write_bool(finished, false).await;
sim.write_bool(output_interface.finish.ready, false).await;
},
async |mut sim, ()| {
checker(
cd,
output_interface,
config,
&sequence,
memories,
&checker_delay_sequence_index,
&mut sim,
)
.await;
sim.write_bool(finished, true).await;
loop {
sim.write_bool(output_interface.finish.ready, true).await;
sim.wait_for_clock_edge(cd.clk).await;
#[hdl(sim)]
if let HdlSome(finish) =
sim.read_past(output_interface.finish.data, cd.clk).await
{
panic!("spurious finished transaction: {finish:#?}");
}
}
},
)
.await
})
.await;
},
);
}
#[hdl_module]
fn memory_interface_adapter_no_split_dut(memories: Interned<[Memory]>) {
#[hdl]
let cd: ClockDomain = m.input();
#[hdl]
let finished: Bool = m.output();
#[hdl]
let mock_cpu = instance(mock_cpu(memories));
connect(mock_cpu.cd, cd);
connect(finished, mock_cpu.finished);
let (fields, inputs): (Vec<_>, Vec<_>) = memories
.iter()
.enumerate()
.map(|(index, &memory)| {
let mock_mem = instance_with_loc(
&format!("mock_mem_{index}"),
mock_memory(memory),
SourceLocation::caller(),
);
connect(mock_mem.cd, cd);
(
BundleField {
name: format!("{index}").intern_deref(),
flipped: false,
ty: MemoryInterface[memory.config].canonical(),
},
mock_mem.input_interface,
)
})
.unzip();
let bundle_ty = Bundle::new(fields.intern_deref());
#[hdl]
let adapter = instance(memory_interface_adapter_no_split(
mock_cpu.ty().output_interface.config,
bundle_ty,
));
connect(adapter.cd, cd);
connect(adapter.input_interface, mock_cpu.output_interface);
for (field, input) in bundle_ty.fields().into_iter().zip(inputs) {
connect(input, Expr::field(adapter.output_interfaces, &field.name));
}
}
#[test]
#[hdl]
fn test_memory_interface_adapter_no_split() {
let _n = SourceLocation::normalize_files_for_tests();
let memories = vec![
Memory::new(b"Testing", 3, AddressRange::from_range(0x1000..0x2000)),
Memory::new(b"Memory2.", 2, AddressRange::from_range(0x2000..0x2010)),
Memory::new(
b"Contents Test",
0,
AddressRange::from_range(0x3000..0x3100),
),
]
.intern_deref();
let m = memory_interface_adapter_no_split_dut(memories);
let mut sim = Simulation::new(m);
let writer = RcWriter::default();
sim.add_trace_writer(VcdWriterDecls::new(writer.clone()));
struct DumpVcdOnDrop {
writer: Option<RcWriter>,
}
impl Drop for DumpVcdOnDrop {
fn drop(&mut self) {
if let Some(mut writer) = self.writer.take() {
let vcd = String::from_utf8(writer.take()).unwrap();
println!("####### VCD:\n{vcd}\n#######");
}
}
}
let mut writer = DumpVcdOnDrop {
writer: Some(writer),
};
sim.write_clock(sim.io().cd.clk, false);
sim.write_reset(sim.io().cd.rst, true);
for _cycle in 0..1000 {
sim.advance_time(SimDuration::from_nanos(500));
sim.write_clock(sim.io().cd.clk, true);
sim.advance_time(SimDuration::from_nanos(500));
sim.write_clock(sim.io().cd.clk, false);
sim.write_reset(sim.io().cd.rst, false);
}
sim.advance_time(SimDuration::from_nanos(500));
assert!(sim.read_bool(sim.io().finished));
let vcd = String::from_utf8(writer.writer.take().unwrap().take()).unwrap();
println!("####### VCD:\n{vcd}\n#######");
if vcd != include_str!("expected/memory_interface_adapter_no_split.vcd") {
panic!();
}
}

View file

@ -6,8 +6,8 @@ use cpu::{
MemoryInterface, MemoryInterfaceConfig, MemoryOperationErrorKind, MemoryOperationFinish,
MemoryOperationFinishKind, MemoryOperationKind, MemoryOperationStart,
simple_uart::{
ReceiverQueueStatus, SIMPLE_UART_RECEIVE_OFFSET, SIMPLE_UART_SIZE,
SIMPLE_UART_STATUS_OFFSET, SIMPLE_UART_TRANSMIT_OFFSET, receiver, receiver_no_queue,
ReceiverQueueStatus, SIMPLE_UART_RECEIVE_OFFSET, SIMPLE_UART_STATUS_OFFSET,
SIMPLE_UART_TRANSMIT_OFFSET, SIMPLE_UART_USED_SIZE, receiver, receiver_no_queue,
simple_uart, simple_uart_memory_interface_config, transmitter, uart_clock_gen,
},
},
@ -920,10 +920,10 @@ fn test_simple_uart() {
for i in 0..2 * BUS_WIDTH_IN_BYTES as u64 {
mem_op_runner
.read_bytes::<1>(SIMPLE_UART_SIZE.get() + i, 1)
.read_bytes::<1>(SIMPLE_UART_USED_SIZE.get() + i, 1)
.unwrap_err();
mem_op_runner
.write_bytes(SIMPLE_UART_SIZE.get() + i, [0], 1)
.write_bytes(SIMPLE_UART_USED_SIZE.get() + i, [0], 1)
.unwrap_err();
}