3
0
Fork 0
mirror of https://github.com/YosysHQ/yosys synced 2025-04-23 09:05:32 +00:00

Merge pull request #4536 from YosysHQ/functional

Functional Backend
This commit is contained in:
Miodrag Milanović 2024-09-06 10:05:04 +02:00 committed by GitHub
commit b20df72e1e
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
41 changed files with 12469 additions and 2 deletions

403
kernel/compute_graph.h Normal file
View file

@ -0,0 +1,403 @@
/*
* yosys -- Yosys Open SYnthesis Suite
*
* Copyright (C) 2024 Jannis Harder <jix@yosyshq.com> <me@jix.one>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
*/
#ifndef COMPUTE_GRAPH_H
#define COMPUTE_GRAPH_H
#include <tuple>
#include "kernel/yosys.h"
YOSYS_NAMESPACE_BEGIN
template<
typename Fn, // Function type (deduplicated across whole graph)
typename Attr = std::tuple<>, // Call attributes (present in every node)
typename SparseAttr = std::tuple<>, // Sparse call attributes (optional per node)
typename Key = std::tuple<> // Stable keys to refer to nodes
>
struct ComputeGraph
{
struct Ref;
private:
// Functions are deduplicated by assigning unique ids
idict<Fn> functions;
struct Node {
int fn_index;
int arg_offset;
int arg_count;
Attr attr;
Node(int fn_index, Attr &&attr, int arg_offset, int arg_count = 0)
: fn_index(fn_index), arg_offset(arg_offset), arg_count(arg_count), attr(std::move(attr)) {}
Node(int fn_index, Attr const &attr, int arg_offset, int arg_count = 0)
: fn_index(fn_index), arg_offset(arg_offset), arg_count(arg_count), attr(attr) {}
};
std::vector<Node> nodes;
std::vector<int> args;
dict<Key, int> keys_;
dict<int, SparseAttr> sparse_attrs;
public:
template<typename Graph>
struct BaseRef
{
protected:
friend struct ComputeGraph;
Graph *graph_;
int index_;
BaseRef(Graph *graph, int index) : graph_(graph), index_(index) {
log_assert(index_ >= 0);
check();
}
void check() const { log_assert(index_ < graph_->size()); }
Node const &deref() const { check(); return graph_->nodes[index_]; }
public:
ComputeGraph const &graph() const { return graph_; }
int index() const { return index_; }
int size() const { return deref().arg_count; }
BaseRef arg(int n) const
{
Node const &node = deref();
log_assert(n >= 0 && n < node.arg_count);
return BaseRef(graph_, graph_->args[node.arg_offset + n]);
}
std::vector<int>::const_iterator arg_indices_cbegin() const
{
Node const &node = deref();
return graph_->args.cbegin() + node.arg_offset;
}
std::vector<int>::const_iterator arg_indices_cend() const
{
Node const &node = deref();
return graph_->args.cbegin() + node.arg_offset + node.arg_count;
}
Fn const &function() const { return graph_->functions[deref().fn_index]; }
Attr const &attr() const { return deref().attr; }
bool has_sparse_attr() const { return graph_->sparse_attrs.count(index_); }
SparseAttr const &sparse_attr() const
{
auto found = graph_->sparse_attrs.find(index_);
log_assert(found != graph_->sparse_attrs.end());
return found->second;
}
};
using ConstRef = BaseRef<ComputeGraph const>;
struct Ref : public BaseRef<ComputeGraph>
{
private:
friend struct ComputeGraph;
Ref(ComputeGraph *graph, int index) : BaseRef<ComputeGraph>(graph, index) {}
Node &deref() const { this->check(); return this->graph_->nodes[this->index_]; }
public:
Ref(BaseRef<ComputeGraph> ref) : Ref(ref.graph_, ref.index_) {}
void set_function(Fn const &function) const
{
deref().fn_index = this->graph_->functions(function);
}
Attr &attr() const { return deref().attr; }
void append_arg(ConstRef arg) const
{
log_assert(arg.graph_ == this->graph_);
append_arg(arg.index());
}
void append_arg(int arg) const
{
log_assert(arg >= 0 && arg < this->graph_->size());
Node &node = deref();
if (node.arg_offset + node.arg_count != GetSize(this->graph_->args))
move_args(node);
this->graph_->args.push_back(arg);
node.arg_count++;
}
operator ConstRef() const
{
return ConstRef(this->graph_, this->index_);
}
SparseAttr &sparse_attr() const
{
return this->graph_->sparse_attrs[this->index_];
}
void clear_sparse_attr() const
{
this->graph_->sparse_attrs.erase(this->index_);
}
void assign_key(Key const &key) const
{
this->graph_->keys_.emplace(key, this->index_);
}
private:
void move_args(Node &node) const
{
auto &args = this->graph_->args;
int old_offset = node.arg_offset;
node.arg_offset = GetSize(args);
for (int i = 0; i != node.arg_count; ++i)
args.push_back(args[old_offset + i]);
}
};
bool has_key(Key const &key) const
{
return keys_.count(key);
}
dict<Key, int> const &keys() const
{
return keys_;
}
ConstRef operator()(Key const &key) const
{
auto it = keys_.find(key);
log_assert(it != keys_.end());
return (*this)[it->second];
}
Ref operator()(Key const &key)
{
auto it = keys_.find(key);
log_assert(it != keys_.end());
return (*this)[it->second];
}
int size() const { return GetSize(nodes); }
ConstRef operator[](int index) const { return ConstRef(this, index); }
Ref operator[](int index) { return Ref(this, index); }
Ref add(Fn const &function, Attr &&attr)
{
int index = GetSize(nodes);
int fn_index = functions(function);
nodes.emplace_back(fn_index, std::move(attr), GetSize(args));
return Ref(this, index);
}
Ref add(Fn const &function, Attr const &attr)
{
int index = GetSize(nodes);
int fn_index = functions(function);
nodes.emplace_back(fn_index, attr, GetSize(args));
return Ref(this, index);
}
template<typename T>
Ref add(Fn const &function, Attr const &attr, T &&args)
{
Ref added = add(function, attr);
for (auto arg : args)
added.append_arg(arg);
return added;
}
template<typename T>
Ref add(Fn const &function, Attr &&attr, T &&args)
{
Ref added = add(function, std::move(attr));
for (auto arg : args)
added.append_arg(arg);
return added;
}
Ref add(Fn const &function, Attr const &attr, std::initializer_list<Ref> args)
{
Ref added = add(function, attr);
for (auto arg : args)
added.append_arg(arg);
return added;
}
Ref add(Fn const &function, Attr &&attr, std::initializer_list<Ref> args)
{
Ref added = add(function, std::move(attr));
for (auto arg : args)
added.append_arg(arg);
return added;
}
template<typename T>
Ref add(Fn const &function, Attr const &attr, T begin, T end)
{
Ref added = add(function, attr);
for (; begin != end; ++begin)
added.append_arg(*begin);
return added;
}
void compact_args()
{
std::vector<int> new_args;
for (auto &node : nodes)
{
int new_offset = GetSize(new_args);
for (int i = 0; i < node.arg_count; i++)
new_args.push_back(args[node.arg_offset + i]);
node.arg_offset = new_offset;
}
std::swap(args, new_args);
}
void permute(std::vector<int> const &perm)
{
log_assert(perm.size() <= nodes.size());
std::vector<int> inv_perm;
inv_perm.resize(nodes.size(), -1);
for (int i = 0; i < GetSize(perm); ++i)
{
int j = perm[i];
log_assert(j >= 0 && j < GetSize(nodes));
log_assert(inv_perm[j] == -1);
inv_perm[j] = i;
}
permute(perm, inv_perm);
}
void permute(std::vector<int> const &perm, std::vector<int> const &inv_perm)
{
log_assert(inv_perm.size() == nodes.size());
std::vector<Node> new_nodes;
new_nodes.reserve(perm.size());
dict<int, SparseAttr> new_sparse_attrs;
for (int i : perm)
{
int j = GetSize(new_nodes);
new_nodes.emplace_back(std::move(nodes[i]));
auto found = sparse_attrs.find(i);
if (found != sparse_attrs.end())
new_sparse_attrs.emplace(j, std::move(found->second));
}
std::swap(nodes, new_nodes);
std::swap(sparse_attrs, new_sparse_attrs);
compact_args();
for (int &arg : args)
{
log_assert(arg < GetSize(inv_perm));
log_assert(inv_perm[arg] >= 0);
arg = inv_perm[arg];
}
for (auto &key : keys_)
{
log_assert(key.second < GetSize(inv_perm));
log_assert(inv_perm[key.second] >= 0);
key.second = inv_perm[key.second];
}
}
struct SccAdaptor
{
private:
ComputeGraph const &graph_;
std::vector<int> indices_;
public:
SccAdaptor(ComputeGraph const &graph) : graph_(graph)
{
indices_.resize(graph.size(), -1);
}
typedef int node_type;
struct node_enumerator {
private:
friend struct SccAdaptor;
int current, end;
node_enumerator(int current, int end) : current(current), end(end) {}
public:
bool finished() const { return current == end; }
node_type next() {
log_assert(!finished());
node_type result = current;
++current;
return result;
}
};
node_enumerator enumerate_nodes() {
return node_enumerator(0, GetSize(indices_));
}
struct successor_enumerator {
private:
friend struct SccAdaptor;
std::vector<int>::const_iterator current, end;
successor_enumerator(std::vector<int>::const_iterator current, std::vector<int>::const_iterator end) :
current(current), end(end) {}
public:
bool finished() const { return current == end; }
node_type next() {
log_assert(!finished());
node_type result = *current;
++current;
return result;
}
};
successor_enumerator enumerate_successors(int index) const {
auto const &ref = graph_[index];
return successor_enumerator(ref.arg_indices_cbegin(), ref.arg_indices_cend());
}
int &dfs_index(node_type const &node) { return indices_[node]; }
std::vector<int> const &dfs_indices() { return indices_; }
};
};
YOSYS_NAMESPACE_END
#endif

949
kernel/drivertools.cc Normal file
View file

@ -0,0 +1,949 @@
/*
* yosys -- Yosys Open SYnthesis Suite
*
* Copyright (C) 2024 Jannis Harder <jix@yosyshq.com> <me@jix.one>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
*/
#include "kernel/drivertools.h"
YOSYS_NAMESPACE_BEGIN
DriveBit::DriveBit(SigBit const &bit)
{
if (bit.is_wire())
*this = DriveBitWire(bit.wire, bit.offset);
else
*this = bit.data;
}
void DriveBit::merge(DriveBit const &other)
{
if (other.type_ == DriveType::NONE)
return;
if (type_ == DriveType::NONE) {
*this = other;
return;
}
if (type_ != DriveType::MULTIPLE) {
DriveBitMultiple multi(std::move(*this));
*this = std::move(multi);
}
multiple().merge(other);
}
void DriveBitMultiple::merge(DriveBit const &single)
{
if (single.type() == DriveType::NONE)
return;
if (single.type() == DriveType::MULTIPLE) {
merge(single.multiple());
return;
}
multiple_.emplace(single);
}
void DriveBitMultiple::merge(DriveBit &&single)
{
if (single.type() == DriveType::NONE)
return;
if (single.type() == DriveType::MULTIPLE) {
merge(std::move(single.multiple()));
return;
}
multiple_.emplace(std::move(single));
}
DriveBitMultiple DriveChunkMultiple::operator[](int i) const
{
DriveBitMultiple result;
for (auto const &single : multiple_)
result.merge(single[i]);
return result;
}
bool DriveChunkWire::can_append(DriveBitWire const &bit) const
{
return bit.wire == wire && bit.offset == offset + width;
}
bool DriveChunkWire::try_append(DriveBitWire const &bit)
{
if (!can_append(bit))
return false;
width += 1;
return true;
}
bool DriveChunkWire::try_append(DriveChunkWire const &chunk)
{
if (chunk.wire != wire || chunk.offset != offset + width)
return false;
width += chunk.width;
return true;
}
bool DriveChunkPort::can_append(DriveBitPort const &bit) const
{
return bit.cell == cell && bit.port == port && bit.offset == offset + width;
}
bool DriveChunkPort::try_append(DriveBitPort const &bit)
{
if (!can_append(bit))
return false;
width += 1;
return true;
}
bool DriveChunkPort::try_append(DriveChunkPort const &chunk)
{
if (chunk.cell != cell || chunk.port != port || chunk.offset != offset + width)
return false;
width += chunk.width;
return true;
}
bool DriveChunkMarker::can_append(DriveBitMarker const &bit) const
{
return bit.marker == marker && bit.offset == offset + width;
}
bool DriveChunkMarker::try_append(DriveBitMarker const &bit)
{
if (!can_append(bit))
return false;
width += 1;
return true;
}
bool DriveChunkMarker::try_append(DriveChunkMarker const &chunk)
{
if (chunk.marker != marker || chunk.offset != offset + width)
return false;
width += chunk.width;
return true;
}
bool DriveChunkMultiple::can_append(DriveBitMultiple const &bit) const
{
if (bit.multiple().size() != multiple_.size())
return false;
int const_drivers = 0;
for (DriveChunk const &single : multiple_)
if (single.is_constant())
const_drivers += 1;
if (const_drivers > 1)
return false;
for (DriveBit const &single : bit.multiple())
if (single.is_constant())
const_drivers -= 1;
if (const_drivers != 0)
return false;
for (DriveChunk const &single : multiple_)
{
switch (single.type())
{
case DriveType::CONSTANT: {
} break;
case DriveType::WIRE: {
auto const &wire = single.wire();
DriveBit next = DriveBitWire(wire.wire, wire.offset + wire.width);
if (!bit.multiple().count(next))
return false;
} break;
case DriveType::PORT: {
auto const &port = single.port();
DriveBit next = DriveBitPort(port.cell, port.port, port.offset + port.width);
if (!bit.multiple().count(next))
return false;
} break;
case DriveType::MARKER: {
auto const &marker = single.marker();
DriveBit next = DriveBitMarker(marker.marker, marker.offset + marker.width);
if (!bit.multiple().count(next))
return false;
} break;
default:
return false;
}
}
return true;
}
bool DriveChunkMultiple::can_append(DriveChunkMultiple const &chunk) const
{
if (chunk.multiple().size() != multiple_.size())
return false;
int const_drivers = 0;
for (DriveChunk const &single : multiple_)
if (single.is_constant())
const_drivers += 1;
if (const_drivers > 1)
return false;
for (DriveChunk const &single : chunk.multiple())
if (single.is_constant())
const_drivers -= 1;
if (const_drivers != 0)
return false;
for (DriveChunk const &single : multiple_)
{
switch (single.type())
{
case DriveType::CONSTANT: {
} break;
case DriveType::WIRE: {
auto const &wire = single.wire();
DriveChunk next = DriveChunkWire(wire.wire, wire.offset + wire.width, chunk.size());
if (!chunk.multiple().count(next))
return false;
} break;
case DriveType::PORT: {
auto const &port = single.port();
DriveChunk next = DriveChunkPort(port.cell, port.port, port.offset + port.width, chunk.size());
if (!chunk.multiple().count(next))
return false;
} break;
case DriveType::MARKER: {
auto const &marker = single.marker();
DriveChunk next = DriveChunkMarker(marker.marker, marker.offset + marker.width, chunk.size());
if (!chunk.multiple().count(next))
return false;
} break;
default:
return false;
}
}
return true;
}
bool DriveChunkMultiple::try_append(DriveBitMultiple const &bit)
{
if (!can_append(bit))
return false;
width_ += 1;
State constant;
for (DriveBit const &single : bit.multiple())
if (single.is_constant())
constant = single.constant();
for (DriveChunk &single : multiple_)
{
switch (single.type())
{
case DriveType::CONSTANT: {
single.constant().bits.push_back(constant);
} break;
case DriveType::WIRE: {
single.wire().width += 1;
} break;
case DriveType::PORT: {
single.port().width += 1;
} break;
case DriveType::MARKER: {
single.marker().width += 1;
} break;
default:
log_abort();
}
}
return true;
}
bool DriveChunkMultiple::try_append(DriveChunkMultiple const &chunk)
{
if (!can_append(chunk))
return false;
int width = chunk.size();
width_ += width;
Const constant;
for (DriveChunk const &single : chunk.multiple())
if (single.is_constant())
constant = single.constant();
for (DriveChunk &single : multiple_)
{
switch (single.type())
{
case DriveType::CONSTANT: {
auto &bits = single.constant().bits;
bits.insert(bits.end(), constant.bits.begin(), constant.bits.end());
} break;
case DriveType::WIRE: {
single.wire().width += width;
} break;
case DriveType::PORT: {
single.port().width += width;
} break;
case DriveType::MARKER: {
single.marker().width += width;
} break;
default:
log_abort();
}
}
return true;
}
bool DriveChunk::can_append(DriveBit const &bit) const
{
if (size() == 0)
return true;
if (bit.type() != type_)
return false;
switch (type_)
{
case DriveType::NONE:
return true;
case DriveType::CONSTANT:
return true;
case DriveType::WIRE:
return wire_.can_append(bit.wire());
case DriveType::PORT:
return port_.can_append(bit.port());
case DriveType::MULTIPLE:
return multiple_.can_append(bit.multiple());
default:
log_abort();
}
}
bool DriveChunk::try_append(DriveBit const &bit)
{
if (size() == 0)
*this = bit;
if (bit.type() != type_)
return false;
switch (type_)
{
case DriveType::NONE:
none_ += 1;
return true;
case DriveType::CONSTANT:
constant_.bits.push_back(bit.constant());
return true;
case DriveType::WIRE:
return wire_.try_append(bit.wire());
case DriveType::PORT:
return port_.try_append(bit.port());
case DriveType::MULTIPLE:
return multiple_.try_append(bit.multiple());
default:
log_abort();
}
}
bool DriveChunk::try_append(DriveChunk const &chunk)
{
if (size() == 0)
*this = chunk;
if (chunk.type_ != type_)
return false;
switch (type_)
{
case DriveType::NONE:
none_ += chunk.none_;
return true;
case DriveType::CONSTANT:
constant_.bits.insert(constant_.bits.end(), chunk.constant_.bits.begin(), chunk.constant_.bits.end());
return true;
case DriveType::WIRE:
return wire_.try_append(chunk.wire());
case DriveType::PORT:
return port_.try_append(chunk.port());
case DriveType::MARKER:
return marker_.try_append(chunk.marker());
case DriveType::MULTIPLE:
return multiple_.try_append(chunk.multiple());
}
log_abort();
}
void DriveSpec::append(DriveBit const &bit)
{
hash_ = 0;
if (!packed()) {
bits_.push_back(bit);
width_ += 1;
return;
}
if (chunks_.empty() || !chunks_.back().try_append(bit))
chunks_.emplace_back(bit);
width_ += 1;
}
void DriveSpec::append(DriveChunk const &chunk)
{
hash_ = 0;
pack();
if (chunks_.empty() || !chunks_.back().try_append(chunk))
chunks_.emplace_back(chunk);
width_ += chunk.size();
}
void DriveSpec::pack() const {
if (bits_.empty())
return;
std::vector<DriveBit> bits(std::move(bits_));
for (auto &bit : bits)
if (chunks_.empty() || !chunks_.back().try_append(bit))
chunks_.emplace_back(std::move(bit));
}
void DriveSpec::unpack() const {
if (chunks_.empty())
return;
for (auto &chunk : chunks_)
{
for (int i = 0, width = chunk.size(); i != width; ++i)
{
bits_.emplace_back(chunk[i]);
}
}
chunks_.clear();
}
void DriveSpec::compute_width()
{
width_ = 0;
for (auto const &chunk : chunks_)
width_ += chunk.size();
}
void DriverMap::DriveBitGraph::add_edge(DriveBitId src, DriveBitId dst)
{
if (first_edges.emplace(src, dst).first->second == dst)
return;
if (second_edges.emplace(src, dst).first->second == dst)
return;
more_edges[src].emplace(dst);
}
DriverMap::DriveBitId DriverMap::DriveBitGraph::pop_edge(DriveBitId src)
{
// TODO unused I think?
auto found_more = more_edges.find(src);
if (found_more != more_edges.end()) {
auto result = found_more->second.pop();
if (found_more->second.empty())
more_edges.erase(found_more);
return result;
}
auto found_second = second_edges.find(src);
if (found_second != second_edges.end()) {
auto result = found_second->second;
second_edges.erase(found_second);
return result;
}
auto found_first = first_edges.find(src);
if (found_first != first_edges.end()) {
auto result = found_first->second;
first_edges.erase(found_first);
return result;
}
return DriveBitId();
}
void DriverMap::DriveBitGraph::clear(DriveBitId src)
{
first_edges.erase(src);
second_edges.erase(src);
more_edges.erase(src);
}
bool DriverMap::DriveBitGraph::contains(DriveBitId src)
{
return first_edges.count(src);
}
int DriverMap::DriveBitGraph::count(DriveBitId src)
{
if (!first_edges.count(src))
return 0;
if (!second_edges.count(src))
return 1;
auto found = more_edges.find(src);
if (found == more_edges.end())
return 2;
return GetSize(found->second) + 2;
}
DriverMap::DriveBitId DriverMap::DriveBitGraph::at(DriveBitId src, int index)
{
if (index == 0)
return first_edges.at(src);
else if (index == 1)
return second_edges.at(src);
else
return *more_edges.at(src).element(index - 2);
}
DriverMap::BitMode DriverMap::bit_mode(DriveBit const &bit)
{
switch (bit.type())
{
case DriveType::NONE:
return BitMode::NONE;
case DriveType::CONSTANT:
// TODO how to handle Sx here?
return bit.constant() == State::Sz ? BitMode::NONE : BitMode::DRIVER;
case DriveType::WIRE: {
auto const &wire = bit.wire();
bool driver = wire.wire->port_input;
bool driven = wire.wire->port_output;
if (driver && !driven)
return BitMode::DRIVER;
else if (driven && !driver)
return BitMode::DRIVEN;
else if (driver && driven)
return BitMode::TRISTATE;
else
return keep_wire(bit.wire().wire) ? BitMode::KEEP : BitMode::NONE;
}
case DriveType::PORT: {
auto const &port = bit.port();
bool driver = celltypes.cell_output(port.cell->type, port.port);
bool driven = celltypes.cell_input(port.cell->type, port.port);
if (driver && !driven)
return BitMode::DRIVER;
else if (driven && !driver)
return BitMode::DRIVEN_UNIQUE;
else
return BitMode::TRISTATE;
}
case DriveType::MARKER: {
// TODO user supplied classification
log_abort();
}
default:
log_abort();
}
}
DriverMap::DriveBitId DriverMap::id_from_drive_bit(DriveBit const &bit)
{
switch (bit.type())
{
case DriveType::NONE:
return -1;
case DriveType::CONSTANT:
return (int)bit.constant();
case DriveType::WIRE: {
auto const &wire_bit = bit.wire();
int offset = next_offset;
auto insertion = wire_offsets.emplace(wire_bit.wire, offset);
if (insertion.second) {
if (wire_bit.wire->width == 1) {
log_assert(wire_bit.offset == 0);
isolated_drive_bits.emplace(offset, bit);
} else
drive_bits.emplace(offset, DriveBitWire(wire_bit.wire, 0));
next_offset += wire_bit.wire->width;
}
return insertion.first->second.id + wire_bit.offset;
}
case DriveType::PORT: {
auto const &port_bit = bit.port();
auto key = std::make_pair(port_bit.cell, port_bit.port);
int offset = next_offset;
auto insertion = port_offsets.emplace(key, offset);
if (insertion.second) {
int width = port_bit.cell->connections().at(port_bit.port).size();
if (width == 1 && offset == 0) {
log_assert(port_bit.offset == 0);
isolated_drive_bits.emplace(offset, bit);
} else
drive_bits.emplace(offset, DriveBitPort(port_bit.cell, port_bit.port, 0));
next_offset += width;
}
return insertion.first->second.id + port_bit.offset;
}
default:
log_assert(false && "unsupported DriveType in DriverMap");
}
log_abort();
}
DriveBit DriverMap::drive_bit_from_id(DriveBitId id)
{
auto found_isolated = isolated_drive_bits.find(id);
if (found_isolated != isolated_drive_bits.end())
return found_isolated->second;
auto found = drive_bits.upper_bound(id);
if (found == drive_bits.begin()) {
return id < 0 ? DriveBit() : DriveBit((State) id.id);
}
--found;
DriveBit result = found->second;
if (result.is_wire()) {
result.wire().offset += id.id - found->first.id;
} else {
log_assert(result.is_port());
result.port().offset += id.id - found->first.id;
}
return result;
}
void DriverMap::connect_directed_merge(DriveBitId driven_id, DriveBitId driver_id)
{
if (driven_id == driver_id)
return;
same_driver.merge(driven_id, driver_id);
for (int i = 0, end = connected_drivers.count(driven_id); i != end; ++i)
connected_drivers.add_edge(driver_id, connected_drivers.at(driven_id, i));
connected_drivers.clear(driven_id);
for (int i = 0, end = connected_undirected.count(driven_id); i != end; ++i)
connected_undirected.add_edge(driver_id, connected_undirected.at(driven_id, i));
connected_undirected.clear(driven_id);
}
void DriverMap::connect_directed_buffer(DriveBitId driven_id, DriveBitId driver_id)
{
connected_drivers.add_edge(driven_id, driver_id);
}
void DriverMap::connect_undirected(DriveBitId a_id, DriveBitId b_id)
{
connected_undirected.add_edge(a_id, b_id);
connected_undirected.add_edge(b_id, a_id);
}
void DriverMap::add(Module *module)
{
for (auto const &conn : module->connections())
add(conn.first, conn.second);
for (auto cell : module->cells())
for (auto const &conn : cell->connections())
add_port(cell, conn.first, conn.second);
}
// Add a single bit connection to the driver map.
void DriverMap::add(DriveBit const &a, DriveBit const &b)
{
DriveBitId a_id = id_from_drive_bit(a);
DriveBitId b_id = id_from_drive_bit(b);
DriveBitId orig_a_id = a_id;
DriveBitId orig_b_id = b_id;
a_id = same_driver.find(a_id);
b_id = same_driver.find(b_id);
if (a_id == b_id)
return;
BitMode a_mode = bit_mode(orig_a_id == a_id ? a : drive_bit_from_id(a_id));
BitMode b_mode = bit_mode(orig_b_id == b_id ? b : drive_bit_from_id(b_id));
// If either bit is just a wire that we don't need to keep, merge and
// use the other end as representative bit.
if (a_mode == BitMode::NONE && !(b_mode == BitMode::DRIVEN_UNIQUE || b_mode == BitMode::DRIVEN))
connect_directed_merge(a_id, b_id);
else if (b_mode == BitMode::NONE && !(a_mode == BitMode::DRIVEN_UNIQUE || a_mode == BitMode::DRIVEN))
connect_directed_merge(b_id, a_id);
// If either bit requires a driven value and has a unique driver, merge
// and use the other end as representative bit.
else if (a_mode == BitMode::DRIVEN_UNIQUE && !(b_mode == BitMode::DRIVEN_UNIQUE || b_mode == BitMode::DRIVEN))
connect_directed_buffer(a_id, b_id);
else if (b_mode == BitMode::DRIVEN_UNIQUE && !(a_mode == BitMode::DRIVEN_UNIQUE || a_mode == BitMode::DRIVEN))
connect_directed_buffer(b_id, a_id);
// If either bit only drives a value, store a directed connection from
// it to the other bit.
else if (a_mode == BitMode::DRIVER)
connect_directed_buffer(b_id, a_id);
else if (b_mode == BitMode::DRIVER)
connect_directed_buffer(a_id, b_id);
// Otherwise we store an undirected connection which we will resolve
// during querying.
else
connect_undirected(a_id, b_id);
return;
}
// Specialized version that avoids unpacking
void DriverMap::add(SigSpec const &a, SigSpec const &b)
{
log_assert(a.size() == b.size());
auto const &a_chunks = a.chunks();
auto const &b_chunks = b.chunks();
auto a_chunk = a_chunks.begin();
auto a_end = a_chunks.end();
int a_offset = 0;
auto b_chunk = b_chunks.begin();
int b_offset = 0;
SigChunk tmp_a, tmp_b;
while (a_chunk != a_end) {
int a_width = a_chunk->width - a_offset;
if (a_width == 0) {
a_offset = 0;
++a_chunk;
continue;
}
int b_width = b_chunk->width - b_offset;
if (b_width == 0) {
b_offset = 0;
++b_chunk;
continue;
}
int width = std::min(a_width, b_width);
log_assert(width > 0);
SigChunk const &a_subchunk =
a_offset == 0 && a_width == width ? *a_chunk : a_chunk->extract(a_offset, width);
SigChunk const &b_subchunk =
b_offset == 0 && b_width == width ? *b_chunk : b_chunk->extract(b_offset, width);
add(a_subchunk, b_subchunk);
a_offset += width;
b_offset += width;
}
}
void DriverMap::add_port(Cell *cell, IdString const &port, SigSpec const &b)
{
int offset = 0;
for (auto const &chunk : b.chunks()) {
add(chunk, DriveChunkPort(cell, port, offset, chunk.width));
offset += chunk.size();
}
}
void DriverMap::orient_undirected(DriveBitId id)
{
pool<DriveBitId> &seen = orient_undirected_seen;
pool<DriveBitId> &drivers = orient_undirected_drivers;
dict<DriveBitId, int> &distance = orient_undirected_distance;
seen.clear();
drivers.clear();
seen.emplace(id);
for (int pos = 0; pos < GetSize(seen); ++pos) {
DriveBitId current = *seen.element(seen.size() - 1 - pos);
DriveBit bit = drive_bit_from_id(current);
BitMode mode = bit_mode(bit);
if (mode == BitMode::DRIVER || mode == BitMode::TRISTATE)
drivers.emplace(current);
if (connected_drivers.contains(current))
drivers.emplace(current);
int undirected_driver_count = connected_undirected.count(current);
for (int i = 0; i != undirected_driver_count; ++i)
seen.emplace(same_driver.find(connected_undirected.at(current, i)));
}
if (drivers.empty())
for (auto seen_id : seen)
drivers.emplace(seen_id);
for (auto driver : drivers)
{
distance.clear();
distance.emplace(driver, 0);
for (int pos = 0; pos < GetSize(distance); ++pos) {
auto current_it = distance.element(distance.size() - 1 - pos);
DriveBitId current = current_it->first;
int undirected_driver_count = connected_undirected.count(current);
for (int i = 0; i != undirected_driver_count; ++i)
{
DriveBitId next = same_driver.find(connected_undirected.at(current, i));
auto emplaced = distance.emplace(next, current_it->second + 1);
if (emplaced.first->second == current_it->second + 1)
connected_oriented.add_edge(next, current);
}
}
}
for (auto seen_id : seen)
oriented_present.emplace(seen_id);
}
DriveBit DriverMap::operator()(DriveBit const &bit)
{
if (bit.type() == DriveType::MARKER || bit.type() == DriveType::NONE)
return bit;
if (bit.type() == DriveType::MULTIPLE)
{
DriveBit result;
for (auto const &inner : bit.multiple().multiple())
result.merge((*this)(inner));
return result;
}
DriveBitId bit_id = id_from_drive_bit(bit);
DriveBitId bit_repr_id = same_driver.find(bit_id);
DriveBit bit_repr = drive_bit_from_id(bit_repr_id);
BitMode mode = bit_mode(bit_repr);
if (mode == BitMode::KEEP && bit_repr_id != bit_id)
return bit_repr;
int implicit_driver_count = connected_drivers.count(bit_repr_id);
if (connected_undirected.contains(bit_repr_id) && !oriented_present.count(bit_repr_id))
orient_undirected(bit_repr_id);
DriveBit driver;
if (mode == BitMode::DRIVER || mode == BitMode::TRISTATE)
driver = bit_repr;
for (int i = 0; i != implicit_driver_count; ++i)
driver.merge(drive_bit_from_id(connected_drivers.at(bit_repr_id, i)));
int oriented_driver_count = connected_oriented.count(bit_repr_id);
for (int i = 0; i != oriented_driver_count; ++i)
driver.merge(drive_bit_from_id(connected_oriented.at(bit_repr_id, i)));
return driver;
}
DriveSpec DriverMap::operator()(DriveSpec spec)
{
DriveSpec result;
for (int i = 0, width = spec.size(); i != width; ++i)
result.append((*this)(spec[i]));
return result;
}
const char *log_signal(DriveChunkWire const &chunk)
{
const char *id = log_id(chunk.wire->name);
if (chunk.is_whole())
return id;
if (chunk.width == 1)
return log_str(stringf("%s [%d]", id, chunk.offset));
return log_str(stringf("%s [%d:%d]", id, chunk.offset + chunk.width - 1, chunk.offset));
}
const char *log_signal(DriveChunkPort const &chunk)
{
const char *cell_id = log_id(chunk.cell->name);
const char *port_id = log_id(chunk.port);
if (chunk.is_whole())
return log_str(stringf("%s <%s>", cell_id, port_id));
if (chunk.width == 1)
return log_str(stringf("%s <%s> [%d]", cell_id, port_id, chunk.offset));
return log_str(stringf("%s <%s> [%d:%d]", cell_id, port_id, chunk.offset + chunk.width - 1, chunk.offset));
}
const char *log_signal(DriveChunkMarker const &chunk)
{
if (chunk.width == 1)
return log_str(stringf("<marker %d> [%d]", chunk.marker, chunk.offset));
return log_str(stringf("<marker %d> [%d:%d]", chunk.marker, chunk.offset + chunk.width - 1, chunk.offset));
}
const char *log_signal(DriveChunk const &chunk)
{
switch (chunk.type())
{
case DriveType::NONE:
return log_str(stringf("<none x%d>", chunk.size()));
case DriveType::CONSTANT:
return log_const(chunk.constant());
case DriveType::WIRE:
return log_signal(chunk.wire());
case DriveType::PORT:
return log_signal(chunk.port());
case DriveType::MARKER:
return log_signal(chunk.marker());
case DriveType::MULTIPLE: {
std::string str = "<multiple";
const char *sep = " ";
for (auto const &single : chunk.multiple().multiple()) {
str += sep;
sep = ", ";
str += log_signal(single);
}
str += ">";
return log_str(str);
}
default:
log_abort();
}
}
const char *log_signal(DriveSpec const &spec)
{
auto &chunks = spec.chunks();
if (chunks.empty())
return "{}";
if (chunks.size() == 1)
return log_signal(chunks[0]);
std::string str;
const char *sep = "{ ";
for (auto i = chunks.rbegin(), end = chunks.rend(); i != end; ++i)
{
str += sep;
sep = " ";
str += log_signal(*i);
}
str += " }";
return log_str(str);
}
YOSYS_NAMESPACE_END

1332
kernel/drivertools.h Normal file

File diff suppressed because it is too large Load diff

853
kernel/functional.cc Normal file
View file

@ -0,0 +1,853 @@
/*
* yosys -- Yosys Open SYnthesis Suite
*
* Copyright (C) 2024 Emily Schmidt <emily@yosyshq.com>
* Copyright (C) 2024 National Technology and Engineering Solutions of Sandia, LLC
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
*/
#include "kernel/functional.h"
#include "kernel/topo_scc.h"
#include "ff.h"
#include "ffinit.h"
#include <deque>
YOSYS_NAMESPACE_BEGIN
namespace Functional {
const char *fn_to_string(Fn fn) {
switch(fn) {
case Fn::invalid: return "invalid";
case Fn::buf: return "buf";
case Fn::slice: return "slice";
case Fn::zero_extend: return "zero_extend";
case Fn::sign_extend: return "sign_extend";
case Fn::concat: return "concat";
case Fn::add: return "add";
case Fn::sub: return "sub";
case Fn::mul: return "mul";
case Fn::unsigned_div: return "unsigned_div";
case Fn::unsigned_mod: return "unsigned_mod";
case Fn::bitwise_and: return "bitwise_and";
case Fn::bitwise_or: return "bitwise_or";
case Fn::bitwise_xor: return "bitwise_xor";
case Fn::bitwise_not: return "bitwise_not";
case Fn::reduce_and: return "reduce_and";
case Fn::reduce_or: return "reduce_or";
case Fn::reduce_xor: return "reduce_xor";
case Fn::unary_minus: return "unary_minus";
case Fn::equal: return "equal";
case Fn::not_equal: return "not_equal";
case Fn::signed_greater_than: return "signed_greater_than";
case Fn::signed_greater_equal: return "signed_greater_equal";
case Fn::unsigned_greater_than: return "unsigned_greater_than";
case Fn::unsigned_greater_equal: return "unsigned_greater_equal";
case Fn::logical_shift_left: return "logical_shift_left";
case Fn::logical_shift_right: return "logical_shift_right";
case Fn::arithmetic_shift_right: return "arithmetic_shift_right";
case Fn::mux: return "mux";
case Fn::constant: return "constant";
case Fn::input: return "input";
case Fn::state: return "state";
case Fn::memory_read: return "memory_read";
case Fn::memory_write: return "memory_write";
}
log_error("fn_to_string: unknown Functional::Fn value %d", (int)fn);
}
vector<IRInput const*> IR::inputs(IdString kind) const {
vector<IRInput const*> ret;
for (const auto &[name, input] : _inputs)
if(input.kind == kind)
ret.push_back(&input);
return ret;
}
vector<IROutput const*> IR::outputs(IdString kind) const {
vector<IROutput const*> ret;
for (const auto &[name, output] : _outputs)
if(output.kind == kind)
ret.push_back(&output);
return ret;
}
vector<IRState const*> IR::states(IdString kind) const {
vector<IRState const*> ret;
for (const auto &[name, state] : _states)
if(state.kind == kind)
ret.push_back(&state);
return ret;
}
vector<IRInput const*> IR::all_inputs() const {
vector<IRInput const*> ret;
for (const auto &[name, input] : _inputs)
ret.push_back(&input);
return ret;
}
vector<IROutput const*> IR::all_outputs() const {
vector<IROutput const*> ret;
for (const auto &[name, output] : _outputs)
ret.push_back(&output);
return ret;
}
vector<IRState const*> IR::all_states() const {
vector<IRState const*> ret;
for (const auto &[name, state] : _states)
ret.push_back(&state);
return ret;
}
struct PrintVisitor : DefaultVisitor<std::string> {
std::function<std::string(Node)> np;
PrintVisitor(std::function<std::string(Node)> np) : np(np) { }
// as a general rule the default handler is good enough iff the only arguments are of type Node
std::string slice(Node, Node a, int offset, int out_width) override { return "slice(" + np(a) + ", " + std::to_string(offset) + ", " + std::to_string(out_width) + ")"; }
std::string zero_extend(Node, Node a, int out_width) override { return "zero_extend(" + np(a) + ", " + std::to_string(out_width) + ")"; }
std::string sign_extend(Node, Node a, int out_width) override { return "sign_extend(" + np(a) + ", " + std::to_string(out_width) + ")"; }
std::string constant(Node, RTLIL::Const const& value) override { return "constant(" + value.as_string() + ")"; }
std::string input(Node, IdString name, IdString kind) override { return "input(" + name.str() + ", " + kind.str() + ")"; }
std::string state(Node, IdString name, IdString kind) override { return "state(" + name.str() + ", " + kind.str() + ")"; }
std::string default_handler(Node self) override {
std::string ret = fn_to_string(self.fn());
ret += "(";
for(size_t i = 0; i < self.arg_count(); i++) {
if(i > 0) ret += ", ";
ret += np(self.arg(i));
}
ret += ")";
return ret;
}
};
std::string Node::to_string()
{
return to_string([](Node n) { return RTLIL::unescape_id(n.name()); });
}
std::string Node::to_string(std::function<std::string(Node)> np)
{
return visit(PrintVisitor(np));
}
class CellSimplifier {
Factory &factory;
Node sign(Node a) {
return factory.slice(a, a.width() - 1, 1);
}
Node neg_if(Node a, Node s) {
return factory.mux(a, factory.unary_minus(a), s);
}
Node abs(Node a) {
return neg_if(a, sign(a));
}
Node handle_shift(Node a, Node b, bool is_right, bool is_signed) {
// to prevent new_width == 0, we handle this case separately
if(a.width() == 1) {
if(!is_signed)
return factory.bitwise_and(a, factory.bitwise_not(factory.reduce_or(b)));
else
return a;
}
int new_width = ceil_log2(a.width());
Node b_truncated = factory.extend(b, new_width, false);
Node y =
!is_right ? factory.logical_shift_left(a, b_truncated) :
!is_signed ? factory.logical_shift_right(a, b_truncated) :
factory.arithmetic_shift_right(a, b_truncated);
if(b.width() <= new_width)
return y;
Node overflow = factory.unsigned_greater_equal(b, factory.constant(RTLIL::Const(a.width(), b.width())));
Node y_if_overflow = is_signed ? factory.extend(sign(a), a.width(), true) : factory.constant(RTLIL::Const(State::S0, a.width()));
return factory.mux(y, y_if_overflow, overflow);
}
public:
Node logical_shift_left(Node a, Node b) { return handle_shift(a, b, false, false); }
Node logical_shift_right(Node a, Node b) { return handle_shift(a, b, true, false); }
Node arithmetic_shift_right(Node a, Node b) { return handle_shift(a, b, true, true); }
Node bitwise_mux(Node a, Node b, Node s) {
Node aa = factory.bitwise_and(a, factory.bitwise_not(s));
Node bb = factory.bitwise_and(b, s);
return factory.bitwise_or(aa, bb);
}
CellSimplifier(Factory &f) : factory(f) {}
private:
Node handle_pow(Node a0, Node b, int y_width, bool is_signed) {
Node a = factory.extend(a0, y_width, is_signed);
Node r = factory.constant(Const(1, y_width));
for(int i = 0; i < b.width(); i++) {
Node b_bit = factory.slice(b, i, 1);
r = factory.mux(r, factory.mul(r, a), b_bit);
a = factory.mul(a, a);
}
if (is_signed) {
Node a_ge_1 = factory.unsigned_greater_than(abs(a0), factory.constant(Const(1, a0.width())));
Node zero_result = factory.bitwise_and(a_ge_1, sign(b));
r = factory.mux(r, factory.constant(Const(0, y_width)), zero_result);
}
return r;
}
Node handle_bmux(Node a, Node s, int a_offset, int width, int sn) {
if(sn < 1)
return factory.slice(a, a_offset, width);
else {
Node y0 = handle_bmux(a, s, a_offset, width, sn - 1);
Node y1 = handle_bmux(a, s, a_offset + (width << (sn - 1)), width, sn - 1);
return factory.mux(y0, y1, factory.slice(s, sn - 1, 1));
}
}
Node handle_pmux(Node a, Node b, Node s) {
// TODO : what to do about multiple b bits set ?
log_assert(b.width() == a.width() * s.width());
Node y = a;
for(int i = 0; i < s.width(); i++)
y = factory.mux(y, factory.slice(b, a.width() * i, a.width()), factory.slice(s, i, 1));
return y;
}
dict<IdString, Node> handle_fa(Node a, Node b, Node c) {
Node t1 = factory.bitwise_xor(a, b);
Node t2 = factory.bitwise_and(a, b);
Node t3 = factory.bitwise_and(c, t1);
Node y = factory.bitwise_xor(c, t1);
Node x = factory.bitwise_or(t2, t3);
return {{ID(X), x}, {ID(Y), y}};
}
dict<IdString, Node> handle_alu(Node a_in, Node b_in, int y_width, bool is_signed, Node ci, Node bi) {
Node a = factory.extend(a_in, y_width, is_signed);
Node b_uninverted = factory.extend(b_in, y_width, is_signed);
Node b = factory.mux(b_uninverted, factory.bitwise_not(b_uninverted), bi);
Node x = factory.bitwise_xor(a, b);
// we can compute the carry into each bit using (a+b+c)^a^b. since we want the carry out,
// i.e. the carry into the next bit, we have to add an extra bit to a and b, and
// then slice off the bottom bit of the result.
Node a_extra = factory.extend(a, y_width + 1, false);
Node b_extra = factory.extend(b, y_width + 1, false);
Node y_extra = factory.add(factory.add(a_extra, b_extra), factory.extend(ci, a.width() + 1, false));
Node y = factory.slice(y_extra, 0, y_width);
Node carries = factory.bitwise_xor(y_extra, factory.bitwise_xor(a_extra, b_extra));
Node co = factory.slice(carries, 1, y_width);
return {{ID(X), x}, {ID(Y), y}, {ID(CO), co}};
}
Node handle_lcu(Node p, Node g, Node ci) {
return handle_alu(g, factory.bitwise_or(p, g), g.width(), false, ci, factory.constant(Const(State::S0, 1))).at(ID(CO));
}
public:
std::variant<dict<IdString, Node>, Node> handle(IdString cellName, IdString cellType, dict<IdString, Const> parameters, dict<IdString, Node> inputs)
{
int a_width = parameters.at(ID(A_WIDTH), Const(-1)).as_int();
int b_width = parameters.at(ID(B_WIDTH), Const(-1)).as_int();
int y_width = parameters.at(ID(Y_WIDTH), Const(-1)).as_int();
bool a_signed = parameters.at(ID(A_SIGNED), Const(0)).as_bool();
bool b_signed = parameters.at(ID(B_SIGNED), Const(0)).as_bool();
if(cellType.in({ID($add), ID($sub), ID($and), ID($or), ID($xor), ID($xnor), ID($mul)})){
bool is_signed = a_signed && b_signed;
Node a = factory.extend(inputs.at(ID(A)), y_width, is_signed);
Node b = factory.extend(inputs.at(ID(B)), y_width, is_signed);
if(cellType == ID($add))
return factory.add(a, b);
else if(cellType == ID($sub))
return factory.sub(a, b);
else if(cellType == ID($mul))
return factory.mul(a, b);
else if(cellType == ID($and))
return factory.bitwise_and(a, b);
else if(cellType == ID($or))
return factory.bitwise_or(a, b);
else if(cellType == ID($xor))
return factory.bitwise_xor(a, b);
else if(cellType == ID($xnor))
return factory.bitwise_not(factory.bitwise_xor(a, b));
else
log_abort();
}else if(cellType.in({ID($eq), ID($ne), ID($eqx), ID($nex), ID($le), ID($lt), ID($ge), ID($gt)})){
bool is_signed = a_signed && b_signed;
int width = max(a_width, b_width);
Node a = factory.extend(inputs.at(ID(A)), width, is_signed);
Node b = factory.extend(inputs.at(ID(B)), width, is_signed);
if(cellType.in({ID($eq), ID($eqx)}))
return factory.extend(factory.equal(a, b), y_width, false);
else if(cellType.in({ID($ne), ID($nex)}))
return factory.extend(factory.not_equal(a, b), y_width, false);
else if(cellType == ID($lt))
return factory.extend(is_signed ? factory.signed_greater_than(b, a) : factory.unsigned_greater_than(b, a), y_width, false);
else if(cellType == ID($le))
return factory.extend(is_signed ? factory.signed_greater_equal(b, a) : factory.unsigned_greater_equal(b, a), y_width, false);
else if(cellType == ID($gt))
return factory.extend(is_signed ? factory.signed_greater_than(a, b) : factory.unsigned_greater_than(a, b), y_width, false);
else if(cellType == ID($ge))
return factory.extend(is_signed ? factory.signed_greater_equal(a, b) : factory.unsigned_greater_equal(a, b), y_width, false);
else
log_abort();
}else if(cellType.in({ID($logic_or), ID($logic_and)})){
Node a = factory.reduce_or(inputs.at(ID(A)));
Node b = factory.reduce_or(inputs.at(ID(B)));
Node y = cellType == ID($logic_and) ? factory.bitwise_and(a, b) : factory.bitwise_or(a, b);
return factory.extend(y, y_width, false);
}else if(cellType == ID($not)){
Node a = factory.extend(inputs.at(ID(A)), y_width, a_signed);
return factory.bitwise_not(a);
}else if(cellType == ID($pos)){
return factory.extend(inputs.at(ID(A)), y_width, a_signed);
}else if(cellType == ID($neg)){
Node a = factory.extend(inputs.at(ID(A)), y_width, a_signed);
return factory.unary_minus(a);
}else if(cellType == ID($logic_not)){
Node a = factory.reduce_or(inputs.at(ID(A)));
Node y = factory.bitwise_not(a);
return factory.extend(y, y_width, false);
}else if(cellType.in({ID($reduce_or), ID($reduce_bool)})){
Node a = factory.reduce_or(inputs.at(ID(A)));
return factory.extend(a, y_width, false);
}else if(cellType == ID($reduce_and)){
Node a = factory.reduce_and(inputs.at(ID(A)));
return factory.extend(a, y_width, false);
}else if(cellType.in({ID($reduce_xor), ID($reduce_xnor)})){
Node a = factory.reduce_xor(inputs.at(ID(A)));
Node y = cellType == ID($reduce_xnor) ? factory.bitwise_not(a) : a;
return factory.extend(y, y_width, false);
}else if(cellType == ID($shl) || cellType == ID($sshl)){
Node a = factory.extend(inputs.at(ID(A)), y_width, a_signed);
Node b = inputs.at(ID(B));
return logical_shift_left(a, b);
}else if(cellType == ID($shr) || cellType == ID($sshr)){
int width = max(a_width, y_width);
Node a = factory.extend(inputs.at(ID(A)), width, a_signed);
Node b = inputs.at(ID(B));
Node y = a_signed && cellType == ID($sshr) ?
arithmetic_shift_right(a, b) :
logical_shift_right(a, b);
return factory.extend(y, y_width, a_signed);
}else if(cellType == ID($shiftx) || cellType == ID($shift)){
int width = max(a_width, y_width);
Node a = factory.extend(inputs.at(ID(A)), width, cellType == ID($shift) && a_signed);
Node b = inputs.at(ID(B));
Node shr = logical_shift_right(a, b);
if(b_signed) {
Node shl = logical_shift_left(a, factory.unary_minus(b));
Node y = factory.mux(shr, shl, sign(b));
return factory.extend(y, y_width, false);
} else {
return factory.extend(shr, y_width, false);
}
}else if(cellType == ID($mux)){
return factory.mux(inputs.at(ID(A)), inputs.at(ID(B)), inputs.at(ID(S)));
}else if(cellType == ID($pmux)){
return handle_pmux(inputs.at(ID(A)), inputs.at(ID(B)), inputs.at(ID(S)));
}else if(cellType == ID($concat)){
Node a = inputs.at(ID(A));
Node b = inputs.at(ID(B));
return factory.concat(a, b);
}else if(cellType == ID($slice)){
int offset = parameters.at(ID(OFFSET)).as_int();
Node a = inputs.at(ID(A));
return factory.slice(a, offset, y_width);
}else if(cellType.in({ID($div), ID($mod), ID($divfloor), ID($modfloor)})) {
int width = max(a_width, b_width);
bool is_signed = a_signed && b_signed;
Node a = factory.extend(inputs.at(ID(A)), width, is_signed);
Node b = factory.extend(inputs.at(ID(B)), width, is_signed);
if(is_signed) {
if(cellType == ID($div)) {
// divide absolute values, then flip the sign if input signs differ
// but extend the width first, to handle the case (most negative value) / (-1)
Node abs_y = factory.unsigned_div(abs(a), abs(b));
Node out_sign = factory.not_equal(sign(a), sign(b));
return neg_if(factory.extend(abs_y, y_width, false), out_sign);
} else if(cellType == ID($mod)) {
// similar to division but output sign == divisor sign
Node abs_y = factory.unsigned_mod(abs(a), abs(b));
return neg_if(factory.extend(abs_y, y_width, false), sign(a));
} else if(cellType == ID($divfloor)) {
// if b is negative, flip both signs so that b is positive
Node b_sign = sign(b);
Node a1 = neg_if(a, b_sign);
Node b1 = neg_if(b, b_sign);
// if a is now negative, calculate ~((~a) / b) = -((-a - 1) / b + 1)
// which equals the negative of (-a) / b with rounding up rather than down
// note that to handle the case where a = most negative value properly,
// we have to calculate a1_sign from the original values rather than using sign(a1)
Node a1_sign = factory.bitwise_and(factory.not_equal(sign(a), sign(b)), factory.reduce_or(a));
Node a2 = factory.mux(a1, factory.bitwise_not(a1), a1_sign);
Node y1 = factory.unsigned_div(a2, b1);
Node y2 = factory.extend(y1, y_width, false);
return factory.mux(y2, factory.bitwise_not(y2), a1_sign);
} else if(cellType == ID($modfloor)) {
// calculate |a| % |b| and then subtract from |b| if input signs differ and the remainder is non-zero
Node abs_b = abs(b);
Node abs_y = factory.unsigned_mod(abs(a), abs_b);
Node flip_y = factory.bitwise_and(factory.bitwise_xor(sign(a), sign(b)), factory.reduce_or(abs_y));
Node y_flipped = factory.mux(abs_y, factory.sub(abs_b, abs_y), flip_y);
// since y_flipped is strictly less than |b|, the top bit is always 0 and we can just sign extend the flipped result
Node y = neg_if(y_flipped, sign(b));
return factory.extend(y, y_width, true);
} else
log_error("unhandled cell in CellSimplifier %s\n", cellType.c_str());
} else {
if(cellType.in({ID($mod), ID($modfloor)}))
return factory.extend(factory.unsigned_mod(a, b), y_width, false);
else
return factory.extend(factory.unsigned_div(a, b), y_width, false);
}
} else if(cellType == ID($pow)) {
return handle_pow(inputs.at(ID(A)), inputs.at(ID(B)), y_width, a_signed && b_signed);
} else if (cellType == ID($lut)) {
int width = parameters.at(ID(WIDTH)).as_int();
Const lut_table = parameters.at(ID(LUT));
lut_table.extu(1 << width);
return handle_bmux(factory.constant(lut_table), inputs.at(ID(A)), 0, 1, width);
} else if (cellType == ID($bwmux)) {
Node a = inputs.at(ID(A));
Node b = inputs.at(ID(B));
Node s = inputs.at(ID(S));
return factory.bitwise_or(
factory.bitwise_and(a, factory.bitwise_not(s)),
factory.bitwise_and(b, s));
} else if (cellType == ID($bweqx)) {
Node a = inputs.at(ID(A));
Node b = inputs.at(ID(B));
return factory.bitwise_not(factory.bitwise_xor(a, b));
} else if(cellType == ID($bmux)) {
int width = parameters.at(ID(WIDTH)).as_int();
int s_width = parameters.at(ID(S_WIDTH)).as_int();
return handle_bmux(inputs.at(ID(A)), inputs.at(ID(S)), 0, width, s_width);
} else if(cellType == ID($demux)) {
int width = parameters.at(ID(WIDTH)).as_int();
int s_width = parameters.at(ID(S_WIDTH)).as_int();
int y_width = width << s_width;
int b_width = ceil_log2(y_width);
Node a = factory.extend(inputs.at(ID(A)), y_width, false);
Node s = factory.extend(inputs.at(ID(S)), b_width, false);
Node b = factory.mul(s, factory.constant(Const(width, b_width)));
return factory.logical_shift_left(a, b);
} else if(cellType == ID($fa)) {
return handle_fa(inputs.at(ID(A)), inputs.at(ID(B)), inputs.at(ID(C)));
} else if(cellType == ID($lcu)) {
return handle_lcu(inputs.at(ID(P)), inputs.at(ID(G)), inputs.at(ID(CI)));
} else if(cellType == ID($alu)) {
return handle_alu(inputs.at(ID(A)), inputs.at(ID(B)), y_width, a_signed && b_signed, inputs.at(ID(CI)), inputs.at(ID(BI)));
} else if(cellType.in({ID($assert), ID($assume), ID($live), ID($fair), ID($cover)})) {
Node a = factory.mux(factory.constant(Const(State::S1, 1)), inputs.at(ID(A)), inputs.at(ID(EN)));
auto &output = factory.add_output(cellName, cellType, Sort(1));
output.set_value(a);
return {};
} else if(cellType.in({ID($anyconst), ID($allconst), ID($anyseq), ID($allseq)})) {
int width = parameters.at(ID(WIDTH)).as_int();
auto &input = factory.add_input(cellName, cellType, Sort(width));
return factory.value(input);
} else if(cellType == ID($initstate)) {
if(factory.ir().has_state(ID($initstate), ID($state)))
return factory.value(factory.ir().state(ID($initstate)));
else {
auto &state = factory.add_state(ID($initstate), ID($state), Sort(1));
state.set_initial_value(RTLIL::Const(State::S1, 1));
state.set_next_value(factory.constant(RTLIL::Const(State::S0, 1)));
return factory.value(state);
}
} else if(cellType == ID($check)) {
log_error("The design contains a $check cell `%s'. This is not supported by the functional backend. Call `chformal -lower' to avoid this error.\n", cellName.c_str());
} else {
log_error("`%s' cells are not supported by the functional backend\n", cellType.c_str());
}
}
};
class FunctionalIRConstruction {
std::deque<std::variant<DriveSpec, Cell *>> queue;
dict<DriveSpec, Node> graph_nodes;
dict<std::pair<Cell *, IdString>, Node> cell_outputs;
DriverMap driver_map;
Factory& factory;
CellSimplifier simplifier;
vector<Mem> memories_vector;
dict<Cell*, Mem*> memories;
SigMap sig_map; // TODO: this is only for FfInitVals, remove this once FfInitVals supports DriverMap
FfInitVals ff_initvals;
Node enqueue(DriveSpec const &spec)
{
auto it = graph_nodes.find(spec);
if(it == graph_nodes.end()){
auto node = factory.create_pending(spec.size());
graph_nodes.insert({spec, node});
queue.emplace_back(spec);
return node;
}else
return it->second;
}
Node enqueue_cell(Cell *cell, IdString port_name)
{
auto it = cell_outputs.find({cell, port_name});
if(it == cell_outputs.end()) {
queue.emplace_back(cell);
std::optional<Node> rv;
for(auto const &[name, sigspec] : cell->connections())
if(driver_map.celltypes.cell_output(cell->type, name)) {
auto node = factory.create_pending(sigspec.size());
factory.suggest_name(node, cell->name.str() + "$" + name.str());
cell_outputs.emplace({cell, name}, node);
if(name == port_name)
rv = node;
}
return *rv;
} else
return it->second;
}
public:
FunctionalIRConstruction(Module *module, Factory &f)
: factory(f)
, simplifier(f)
, sig_map(module)
, ff_initvals(&sig_map, module)
{
driver_map.add(module);
for (auto cell : module->cells()) {
if (cell->type.in(ID($assert), ID($assume), ID($live), ID($fair), ID($cover), ID($check)))
queue.emplace_back(cell);
}
for (auto wire : module->wires()) {
if (wire->port_input)
factory.add_input(wire->name, ID($input), Sort(wire->width));
if (wire->port_output) {
auto &output = factory.add_output(wire->name, ID($output), Sort(wire->width));
output.set_value(enqueue(DriveChunk(DriveChunkWire(wire, 0, wire->width))));
}
}
memories_vector = Mem::get_all_memories(module);
for (auto &mem : memories_vector) {
if (mem.cell != nullptr)
memories[mem.cell] = &mem;
}
}
private:
Node concatenate_read_results(Mem *mem, vector<Node> results)
{
// sanity check: all read ports concatenated should equal to the RD_DATA port
const SigSpec &rd_data = mem->cell->connections().at(ID(RD_DATA));
int current = 0;
for(size_t i = 0; i < mem->rd_ports.size(); i++) {
int width = mem->width << mem->rd_ports[i].wide_log2;
log_assert (results[i].width() == width);
log_assert (mem->rd_ports[i].data == rd_data.extract(current, width));
current += width;
}
log_assert (current == rd_data.size());
log_assert (!results.empty());
Node node = results[0];
for(size_t i = 1; i < results.size(); i++)
node = factory.concat(node, results[i]);
return node;
}
Node handle_memory(Mem *mem)
{
// To simplify memory handling, the functional backend makes the following assumptions:
// - Since async2sync or clk2fflogic must be run to use the functional backend,
// we can assume that all ports are asynchronous.
// - Async rd/wr are always transparent and so we must do reads after writes,
// but we can ignore transparency_mask.
// - We ignore collision_x_mask because x is a dont care value for us anyway.
// - Since wr port j can only have priority over wr port i if j > i, if we do writes in
// ascending index order the result will obey the priorty relation.
vector<Node> read_results;
auto &state = factory.add_state(mem->cell->name, ID($state), Sort(ceil_log2(mem->size), mem->width));
state.set_initial_value(MemContents(mem));
Node node = factory.value(state);
for (size_t i = 0; i < mem->wr_ports.size(); i++) {
const auto &wr = mem->wr_ports[i];
if (wr.clk_enable)
log_error("Write port %zd of memory %s.%s is clocked. This is not supported by the functional backend. "
"Call async2sync or clk2fflogic to avoid this error.\n", i, log_id(mem->module), log_id(mem->memid));
Node en = enqueue(driver_map(DriveSpec(wr.en)));
Node addr = enqueue(driver_map(DriveSpec(wr.addr)));
Node new_data = enqueue(driver_map(DriveSpec(wr.data)));
Node old_data = factory.memory_read(node, addr);
Node wr_data = simplifier.bitwise_mux(old_data, new_data, en);
node = factory.memory_write(node, addr, wr_data);
}
if (mem->rd_ports.empty())
log_error("Memory %s.%s has no read ports. This is not supported by the functional backend. "
"Call opt_clean to remove it.", log_id(mem->module), log_id(mem->memid));
for (size_t i = 0; i < mem->rd_ports.size(); i++) {
const auto &rd = mem->rd_ports[i];
if (rd.clk_enable)
log_error("Read port %zd of memory %s.%s is clocked. This is not supported by the functional backend. "
"Call memory_nordff to avoid this error.\n", i, log_id(mem->module), log_id(mem->memid));
Node addr = enqueue(driver_map(DriveSpec(rd.addr)));
read_results.push_back(factory.memory_read(node, addr));
}
state.set_next_value(node);
return concatenate_read_results(mem, read_results);
}
void process_cell(Cell *cell)
{
if (cell->is_mem_cell()) {
Mem *mem = memories.at(cell, nullptr);
if (mem == nullptr) {
log_assert(cell->has_memid());
log_error("The design contains an unpacked memory at %s. This is not supported by the functional backend. "
"Call memory_collect to avoid this error.\n", log_const(cell->parameters.at(ID(MEMID))));
}
Node node = handle_memory(mem);
factory.update_pending(cell_outputs.at({cell, ID(RD_DATA)}), node);
} else if (RTLIL::builtin_ff_cell_types().count(cell->type)) {
FfData ff(&ff_initvals, cell);
if (!ff.has_gclk)
log_error("The design contains a %s flip-flop at %s. This is not supported by the functional backend. "
"Call async2sync or clk2fflogic to avoid this error.\n", log_id(cell->type), log_id(cell));
auto &state = factory.add_state(ff.name, ID($state), Sort(ff.width));
Node q_value = factory.value(state);
factory.suggest_name(q_value, ff.name);
factory.update_pending(cell_outputs.at({cell, ID(Q)}), q_value);
state.set_next_value(enqueue(ff.sig_d));
state.set_initial_value(ff.val_init);
} else {
dict<IdString, Node> connections;
IdString output_name; // for the single output case
int n_outputs = 0;
for(auto const &[name, sigspec] : cell->connections()) {
if(driver_map.celltypes.cell_input(cell->type, name) && sigspec.size() > 0)
connections.insert({ name, enqueue(DriveChunkPort(cell, {name, sigspec})) });
if(driver_map.celltypes.cell_output(cell->type, name)) {
output_name = name;
n_outputs++;
}
}
std::variant<dict<IdString, Node>, Node> outputs = simplifier.handle(cell->name, cell->type, cell->parameters, connections);
if(auto *nodep = std::get_if<Node>(&outputs); nodep != nullptr) {
log_assert(n_outputs == 1);
factory.update_pending(cell_outputs.at({cell, output_name}), *nodep);
} else {
for(auto [name, node] : std::get<dict<IdString, Node>>(outputs))
factory.update_pending(cell_outputs.at({cell, name}), node);
}
}
}
void undriven(const char *name) {
log_error("The design contains an undriven signal %s. This is not supported by the functional backend. "
"Call setundef with appropriate options to avoid this error.\n", name);
}
// we perform this check separately to give better error messages that include the wire or port name
void check_undriven(DriveSpec const& spec, std::string const& name) {
for(auto const &chunk : spec.chunks())
if(chunk.is_none())
undriven(name.c_str());
}
public:
void process_queue()
{
for (; !queue.empty(); queue.pop_front()) {
if(auto p = std::get_if<Cell *>(&queue.front()); p != nullptr) {
process_cell(*p);
continue;
}
DriveSpec spec = std::get<DriveSpec>(queue.front());
Node pending = graph_nodes.at(spec);
if (spec.chunks().size() > 1) {
auto chunks = spec.chunks();
Node node = enqueue(chunks[0]);
for(size_t i = 1; i < chunks.size(); i++)
node = factory.concat(node, enqueue(chunks[i]));
factory.update_pending(pending, node);
} else if (spec.chunks().size() == 1) {
DriveChunk chunk = spec.chunks()[0];
if (chunk.is_wire()) {
DriveChunkWire wire_chunk = chunk.wire();
if (wire_chunk.is_whole()) {
if (wire_chunk.wire->port_input) {
Node node = factory.value(factory.ir().input(wire_chunk.wire->name));
factory.suggest_name(node, wire_chunk.wire->name);
factory.update_pending(pending, node);
} else {
DriveSpec driver = driver_map(DriveSpec(wire_chunk));
check_undriven(driver, RTLIL::unescape_id(wire_chunk.wire->name));
Node node = enqueue(driver);
factory.suggest_name(node, wire_chunk.wire->name);
factory.update_pending(pending, node);
}
} else {
DriveChunkWire whole_wire(wire_chunk.wire, 0, wire_chunk.wire->width);
Node node = factory.slice(enqueue(whole_wire), wire_chunk.offset, wire_chunk.width);
factory.update_pending(pending, node);
}
} else if (chunk.is_port()) {
DriveChunkPort port_chunk = chunk.port();
if (port_chunk.is_whole()) {
if (driver_map.celltypes.cell_output(port_chunk.cell->type, port_chunk.port)) {
Node node = enqueue_cell(port_chunk.cell, port_chunk.port);
factory.update_pending(pending, node);
} else {
DriveSpec driver = driver_map(DriveSpec(port_chunk));
check_undriven(driver, RTLIL::unescape_id(port_chunk.cell->name) + " port " + RTLIL::unescape_id(port_chunk.port));
factory.update_pending(pending, enqueue(driver));
}
} else {
DriveChunkPort whole_port(port_chunk.cell, port_chunk.port, 0, GetSize(port_chunk.cell->connections().at(port_chunk.port)));
Node node = factory.slice(enqueue(whole_port), port_chunk.offset, port_chunk.width);
factory.update_pending(pending, node);
}
} else if (chunk.is_constant()) {
Node node = factory.constant(chunk.constant());
factory.suggest_name(node, "$const" + std::to_string(chunk.size()) + "b" + chunk.constant().as_string());
factory.update_pending(pending, node);
} else if (chunk.is_multiple()) {
log_error("Signal %s has multiple drivers. This is not supported by the functional backend. "
"If tristate drivers are used, call tristate -formal to avoid this error.\n", log_signal(chunk));
} else if (chunk.is_none()) {
undriven(log_signal(chunk));
} else {
log_error("unhandled drivespec: %s\n", log_signal(chunk));
log_abort();
}
} else {
log_abort();
}
}
}
};
IR IR::from_module(Module *module) {
IR ir;
auto factory = ir.factory();
FunctionalIRConstruction ctor(module, factory);
ctor.process_queue();
ir.topological_sort();
ir.forward_buf();
return ir;
}
void IR::topological_sort() {
Graph::SccAdaptor compute_graph_scc(_graph);
bool scc = false;
std::vector<int> perm;
TopoSortedSccs toposort(compute_graph_scc, [&](int *begin, int *end) {
perm.insert(perm.end(), begin, end);
if (end > begin + 1)
{
log_warning("Combinational loop:\n");
for (int *i = begin; i != end; ++i) {
Node node(_graph[*i]);
log("- %s = %s\n", RTLIL::unescape_id(node.name()).c_str(), node.to_string().c_str());
}
log("\n");
scc = true;
}
});
for(const auto &[name, state]: _states)
if(state.has_next_value())
toposort.process(state.next_value().id());
for(const auto &[name, output]: _outputs)
if(output.has_value())
toposort.process(output.value().id());
// any nodes untouched by this point are dead code and will be removed by permute
_graph.permute(perm);
if(scc) log_error("The design contains combinational loops. This is not supported by the functional backend. "
"Try `scc -select; simplemap; select -clear` to avoid this error.\n");
}
static IdString merge_name(IdString a, IdString b) {
if(a[0] == '$' && b[0] == '\\')
return b;
else
return a;
}
void IR::forward_buf() {
std::vector<int> perm, alias;
perm.clear();
for (int i = 0; i < _graph.size(); ++i)
{
auto node = _graph[i];
if (node.function().fn() == Fn::buf && node.arg(0).index() < i)
{
int target_index = alias[node.arg(0).index()];
auto target_node = _graph[perm[target_index]];
if(node.has_sparse_attr()) {
if(target_node.has_sparse_attr()) {
IdString id = merge_name(node.sparse_attr(), target_node.sparse_attr());
target_node.sparse_attr() = id;
} else {
IdString id = node.sparse_attr();
target_node.sparse_attr() = id;
}
}
alias.push_back(target_index);
}
else
{
alias.push_back(GetSize(perm));
perm.push_back(i);
}
}
_graph.permute(perm, alias);
}
// Quoting routine to make error messages nicer
static std::string quote_fmt(const char *fmt)
{
std::string r;
for(const char *p = fmt; *p != 0; p++) {
switch(*p) {
case '\n': r += "\\n"; break;
case '\t': r += "\\t"; break;
case '"': r += "\\\""; break;
case '\\': r += "\\\\"; break;
default: r += *p; break;
}
}
return r;
}
void Writer::print_impl(const char *fmt, vector<std::function<void()>> &fns)
{
size_t next_index = 0;
for(const char *p = fmt; *p != 0; p++)
switch(*p) {
case '{':
if(*++p == '{') {
*os << '{';
} else {
char *pe;
size_t index = strtoul(p, &pe, 10);
if(*pe != '}')
log_error("invalid format string: expected {<number>}, {} or {{, got \"%s\": \"%s\"\n",
quote_fmt(std::string(p - 1, pe - p + 2).c_str()).c_str(),
quote_fmt(fmt).c_str());
if(p == pe)
index = next_index;
else
p = pe;
if(index >= fns.size())
log_error("invalid format string: index %zu out of bounds (%zu): \"%s\"\n", index, fns.size(), quote_fmt(fmt).c_str());
fns[index]();
next_index = index + 1;
}
break;
case '}':
p++;
if(*p != '}')
log_error("invalid format string: unescaped }: \"%s\"\n", quote_fmt(fmt).c_str());
*os << '}';
break;
default:
*os << *p;
}
}
}
YOSYS_NAMESPACE_END

642
kernel/functional.h Normal file
View file

@ -0,0 +1,642 @@
/*
* yosys -- Yosys Open SYnthesis Suite
*
* Copyright (C) 2024 Emily Schmidt <emily@yosyshq.com>
* Copyright (C) 2024 National Technology and Engineering Solutions of Sandia, LLC
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
*/
#ifndef FUNCTIONAL_H
#define FUNCTIONAL_H
#include "kernel/yosys.h"
#include "kernel/compute_graph.h"
#include "kernel/drivertools.h"
#include "kernel/mem.h"
#include "kernel/utils.h"
USING_YOSYS_NAMESPACE
YOSYS_NAMESPACE_BEGIN
namespace Functional {
// each function is documented with a short pseudocode declaration or definition
// standard C/Verilog operators are used to describe the result
//
// the sorts used in this are:
// - bit[N]: a bitvector of N bits
// bit[N] can be indicated as signed or unsigned. this is not tracked by the functional backend
// but is meant to indicate how the value is interpreted
// if a bit[N] is marked as neither signed nor unsigned, this means the result should be valid with *either* interpretation
// - memory[N, M]: a memory with N address and M data bits
// - int: C++ int
// - Const[N]: yosys RTLIL::Const (with size() == N)
// - IdString: yosys IdString
// - any: used in documentation to indicate that the sort is unconstrained
//
// nodes in the functional backend are either of sort bit[N] or memory[N,M] (for some N, M: int)
// additionally, they can carry a constant of sort int, Const[N] or IdString
// each node has a 'sort' field that stores the sort of the node
// slice, zero_extend, sign_extend use the sort field to store out_width
enum class Fn {
// invalid() = known-invalid/shouldn't happen value
// TODO: maybe remove this and use e.g. std::optional instead?
invalid,
// buf(a: any): any = a
// no-op operation
// when constructing the compute graph we generate invalid buf() nodes as a placeholder
// and later insert the argument
buf,
// slice(a: bit[in_width], offset: int, out_width: int): bit[out_width] = a[offset +: out_width]
// required: offset + out_width <= in_width
slice,
// zero_extend(a: unsigned bit[in_width], out_width: int): unsigned bit[out_width] = a (zero extended)
// required: out_width > in_width
zero_extend,
// sign_extend(a: signed bit[in_width], out_width: int): signed bit[out_width] = a (sign extended)
// required: out_width > in_width
sign_extend,
// concat(a: bit[N], b: bit[M]): bit[N+M] = {b, a} (verilog syntax)
// concatenates two bitvectors, with a in the least significant position and b in the more significant position
concat,
// add(a: bit[N], b: bit[N]): bit[N] = a + b
add,
// sub(a: bit[N], b: bit[N]): bit[N] = a - b
sub,
// mul(a: bit[N], b: bit[N]): bit[N] = a * b
mul,
// unsigned_div(a: unsigned bit[N], b: unsigned bit[N]): bit[N] = a / b
unsigned_div,
// unsigned_mod(a: signed bit[N], b: signed bit[N]): bit[N] = a % b
unsigned_mod,
// bitwise_and(a: bit[N], b: bit[N]): bit[N] = a & b
bitwise_and,
// bitwise_or(a: bit[N], b: bit[N]): bit[N] = a | b
bitwise_or,
// bitwise_xor(a: bit[N], b: bit[N]): bit[N] = a ^ b
bitwise_xor,
// bitwise_not(a: bit[N]): bit[N] = ~a
bitwise_not,
// reduce_and(a: bit[N]): bit[1] = &a
reduce_and,
// reduce_or(a: bit[N]): bit[1] = |a
reduce_or,
// reduce_xor(a: bit[N]): bit[1] = ^a
reduce_xor,
// unary_minus(a: bit[N]): bit[N] = -a
unary_minus,
// equal(a: bit[N], b: bit[N]): bit[1] = (a == b)
equal,
// not_equal(a: bit[N], b: bit[N]): bit[1] = (a != b)
not_equal,
// signed_greater_than(a: signed bit[N], b: signed bit[N]): bit[1] = (a > b)
signed_greater_than,
// signed_greater_equal(a: signed bit[N], b: signed bit[N]): bit[1] = (a >= b)
signed_greater_equal,
// unsigned_greater_than(a: unsigned bit[N], b: unsigned bit[N]): bit[1] = (a > b)
unsigned_greater_than,
// unsigned_greater_equal(a: unsigned bit[N], b: unsigned bit[N]): bit[1] = (a >= b)
unsigned_greater_equal,
// logical_shift_left(a: bit[N], b: unsigned bit[M]): bit[N] = a << b
// required: M == clog2(N)
logical_shift_left,
// logical_shift_right(a: unsigned bit[N], b: unsigned bit[M]): unsigned bit[N] = a >> b
// required: M == clog2(N)
logical_shift_right,
// arithmetic_shift_right(a: signed bit[N], b: unsigned bit[M]): signed bit[N] = a >> b
// required: M == clog2(N)
arithmetic_shift_right,
// mux(a: bit[N], b: bit[N], s: bit[1]): bit[N] = s ? b : a
mux,
// constant(a: Const[N]): bit[N] = a
constant,
// input(a: IdString): any
// returns the current value of the input with the specified name
input,
// state(a: IdString): any
// returns the current value of the state variable with the specified name
state,
// memory_read(memory: memory[addr_width, data_width], addr: bit[addr_width]): bit[data_width] = memory[addr]
memory_read,
// memory_write(memory: memory[addr_width, data_width], addr: bit[addr_width], data: bit[data_width]): memory[addr_width, data_width]
// returns a copy of `memory` but with the value at `addr` changed to `data`
memory_write
};
// returns the name of a Fn value, as a string literal
const char *fn_to_string(Fn);
// Sort represents the sort or type of a node
// currently the only two sorts are signal/bit and memory
class Sort {
std::variant<int, std::pair<int, int>> _v;
public:
explicit Sort(int width) : _v(width) { }
Sort(int addr_width, int data_width) : _v(std::make_pair(addr_width, data_width)) { }
bool is_signal() const { return _v.index() == 0; }
bool is_memory() const { return _v.index() == 1; }
// returns the width of a bitvector sort, errors out for other sorts
int width() const { return std::get<0>(_v); }
// returns the address width of a bitvector sort, errors out for other sorts
int addr_width() const { return std::get<1>(_v).first; }
// returns the data width of a bitvector sort, errors out for other sorts
int data_width() const { return std::get<1>(_v).second; }
bool operator==(Sort const& other) const { return _v == other._v; }
unsigned int hash() const { return mkhash(_v); }
};
class IR;
class Factory;
class Node;
class IRInput {
friend class Factory;
public:
IdString name;
IdString kind;
Sort sort;
private:
IRInput(IR &, IdString name, IdString kind, Sort sort)
: name(name), kind(kind), sort(std::move(sort)) {}
};
class IROutput {
friend class Factory;
IR &_ir;
public:
IdString name;
IdString kind;
Sort sort;
private:
IROutput(IR &ir, IdString name, IdString kind, Sort sort)
: _ir(ir), name(name), kind(kind), sort(std::move(sort)) {}
public:
Node value() const;
bool has_value() const;
void set_value(Node value);
};
class IRState {
friend class Factory;
IR &_ir;
public:
IdString name;
IdString kind;
Sort sort;
private:
std::variant<RTLIL::Const, MemContents> _initial;
IRState(IR &ir, IdString name, IdString kind, Sort sort)
: _ir(ir), name(name), kind(kind), sort(std::move(sort)) {}
public:
Node next_value() const;
bool has_next_value() const;
RTLIL::Const const& initial_value_signal() const { return std::get<RTLIL::Const>(_initial); }
MemContents const& initial_value_memory() const { return std::get<MemContents>(_initial); }
void set_next_value(Node value);
void set_initial_value(RTLIL::Const value) { value.extu(sort.width()); _initial = std::move(value); }
void set_initial_value(MemContents value) { log_assert(Sort(value.addr_width(), value.data_width()) == sort); _initial = std::move(value); }
};
class IR {
friend class Factory;
friend class Node;
friend class IRInput;
friend class IROutput;
friend class IRState;
// one NodeData is stored per Node, containing the function and non-node arguments
// note that NodeData is deduplicated by ComputeGraph
class NodeData {
Fn _fn;
std::variant<
std::monostate,
RTLIL::Const,
std::pair<IdString, IdString>,
int
> _extra;
public:
NodeData() : _fn(Fn::invalid) {}
NodeData(Fn fn) : _fn(fn) {}
template<class T> NodeData(Fn fn, T &&extra) : _fn(fn), _extra(std::forward<T>(extra)) {}
Fn fn() const { return _fn; }
const RTLIL::Const &as_const() const { return std::get<RTLIL::Const>(_extra); }
std::pair<IdString, IdString> as_idstring_pair() const { return std::get<std::pair<IdString, IdString>>(_extra); }
int as_int() const { return std::get<int>(_extra); }
int hash() const {
return mkhash((unsigned int) _fn, mkhash(_extra));
}
bool operator==(NodeData const &other) const {
return _fn == other._fn && _extra == other._extra;
}
};
// Attr contains all the information about a note that should not be deduplicated
struct Attr {
Sort sort;
};
// our specialised version of ComputeGraph
// the sparse_attr IdString stores a naming suggestion, retrieved with name()
// the key is currently used to identify the nodes that represent output and next state values
// the bool is true for next state values
using Graph = ComputeGraph<NodeData, Attr, IdString, std::tuple<IdString, IdString, bool>>;
Graph _graph;
dict<std::pair<IdString, IdString>, IRInput> _inputs;
dict<std::pair<IdString, IdString>, IROutput> _outputs;
dict<std::pair<IdString, IdString>, IRState> _states;
IR::Graph::Ref mutate(Node n);
public:
static IR from_module(Module *module);
Factory factory();
int size() const { return _graph.size(); }
Node operator[](int i);
void topological_sort();
void forward_buf();
IRInput const& input(IdString name, IdString kind) const { return _inputs.at({name, kind}); }
IRInput const& input(IdString name) const { return input(name, ID($input)); }
IROutput const& output(IdString name, IdString kind) const { return _outputs.at({name, kind}); }
IROutput const& output(IdString name) const { return output(name, ID($output)); }
IRState const& state(IdString name, IdString kind) const { return _states.at({name, kind}); }
IRState const& state(IdString name) const { return state(name, ID($state)); }
bool has_input(IdString name, IdString kind) const { return _inputs.count({name, kind}); }
bool has_output(IdString name, IdString kind) const { return _outputs.count({name, kind}); }
bool has_state(IdString name, IdString kind) const { return _states.count({name, kind}); }
vector<IRInput const*> inputs(IdString kind) const;
vector<IRInput const*> inputs() const { return inputs(ID($input)); }
vector<IROutput const*> outputs(IdString kind) const;
vector<IROutput const*> outputs() const { return outputs(ID($output)); }
vector<IRState const*> states(IdString kind) const;
vector<IRState const*> states() const { return states(ID($state)); }
vector<IRInput const*> all_inputs() const;
vector<IROutput const*> all_outputs() const;
vector<IRState const*> all_states() const;
class iterator {
friend class IR;
IR *_ir;
int _index;
iterator(IR *ir, int index) : _ir(ir), _index(index) {}
public:
using iterator_category = std::input_iterator_tag;
using value_type = Node;
using pointer = arrow_proxy<Node>;
using reference = Node;
using difference_type = ptrdiff_t;
Node operator*();
iterator &operator++() { _index++; return *this; }
bool operator!=(iterator const &other) const { return _ir != other._ir || _index != other._index; }
bool operator==(iterator const &other) const { return !(*this != other); }
pointer operator->();
};
iterator begin() { return iterator(this, 0); }
iterator end() { return iterator(this, _graph.size()); }
};
// Node is an immutable reference to a FunctionalIR node
class Node {
friend class Factory;
friend class IR;
friend class IRInput;
friend class IROutput;
friend class IRState;
IR::Graph::ConstRef _ref;
explicit Node(IR::Graph::ConstRef ref) : _ref(ref) { }
explicit operator IR::Graph::ConstRef() { return _ref; }
public:
// the node's index. may change if nodes are added or removed
int id() const { return _ref.index(); }
// a name suggestion for the node, which need not be unique
IdString name() const {
if(_ref.has_sparse_attr())
return _ref.sparse_attr();
else
return std::string("\\n") + std::to_string(id());
}
Fn fn() const { return _ref.function().fn(); }
Sort sort() const { return _ref.attr().sort; }
// returns the width of a bitvector node, errors out for other nodes
int width() const { return sort().width(); }
size_t arg_count() const { return _ref.size(); }
Node arg(int n) const { return Node(_ref.arg(n)); }
// visit calls the appropriate visitor method depending on the type of the node
template<class Visitor> auto visit(Visitor v) const
{
// currently templated but could be switched to AbstractVisitor &
switch(_ref.function().fn()) {
case Fn::invalid: log_error("invalid node in visit"); break;
case Fn::buf: return v.buf(*this, arg(0)); break;
case Fn::slice: return v.slice(*this, arg(0), _ref.function().as_int(), sort().width()); break;
case Fn::zero_extend: return v.zero_extend(*this, arg(0), width()); break;
case Fn::sign_extend: return v.sign_extend(*this, arg(0), width()); break;
case Fn::concat: return v.concat(*this, arg(0), arg(1)); break;
case Fn::add: return v.add(*this, arg(0), arg(1)); break;
case Fn::sub: return v.sub(*this, arg(0), arg(1)); break;
case Fn::mul: return v.mul(*this, arg(0), arg(1)); break;
case Fn::unsigned_div: return v.unsigned_div(*this, arg(0), arg(1)); break;
case Fn::unsigned_mod: return v.unsigned_mod(*this, arg(0), arg(1)); break;
case Fn::bitwise_and: return v.bitwise_and(*this, arg(0), arg(1)); break;
case Fn::bitwise_or: return v.bitwise_or(*this, arg(0), arg(1)); break;
case Fn::bitwise_xor: return v.bitwise_xor(*this, arg(0), arg(1)); break;
case Fn::bitwise_not: return v.bitwise_not(*this, arg(0)); break;
case Fn::unary_minus: return v.unary_minus(*this, arg(0)); break;
case Fn::reduce_and: return v.reduce_and(*this, arg(0)); break;
case Fn::reduce_or: return v.reduce_or(*this, arg(0)); break;
case Fn::reduce_xor: return v.reduce_xor(*this, arg(0)); break;
case Fn::equal: return v.equal(*this, arg(0), arg(1)); break;
case Fn::not_equal: return v.not_equal(*this, arg(0), arg(1)); break;
case Fn::signed_greater_than: return v.signed_greater_than(*this, arg(0), arg(1)); break;
case Fn::signed_greater_equal: return v.signed_greater_equal(*this, arg(0), arg(1)); break;
case Fn::unsigned_greater_than: return v.unsigned_greater_than(*this, arg(0), arg(1)); break;
case Fn::unsigned_greater_equal: return v.unsigned_greater_equal(*this, arg(0), arg(1)); break;
case Fn::logical_shift_left: return v.logical_shift_left(*this, arg(0), arg(1)); break;
case Fn::logical_shift_right: return v.logical_shift_right(*this, arg(0), arg(1)); break;
case Fn::arithmetic_shift_right: return v.arithmetic_shift_right(*this, arg(0), arg(1)); break;
case Fn::mux: return v.mux(*this, arg(0), arg(1), arg(2)); break;
case Fn::constant: return v.constant(*this, _ref.function().as_const()); break;
case Fn::input: return v.input(*this, _ref.function().as_idstring_pair().first, _ref.function().as_idstring_pair().second); break;
case Fn::state: return v.state(*this, _ref.function().as_idstring_pair().first, _ref.function().as_idstring_pair().second); break;
case Fn::memory_read: return v.memory_read(*this, arg(0), arg(1)); break;
case Fn::memory_write: return v.memory_write(*this, arg(0), arg(1), arg(2)); break;
}
log_abort();
}
std::string to_string();
std::string to_string(std::function<std::string(Node)>);
};
inline IR::Graph::Ref IR::mutate(Node n) { return _graph[n._ref.index()]; }
inline Node IR::operator[](int i) { return Node(_graph[i]); }
inline Node IROutput::value() const { return Node(_ir._graph({name, kind, false})); }
inline bool IROutput::has_value() const { return _ir._graph.has_key({name, kind, false}); }
inline void IROutput::set_value(Node value) { log_assert(sort == value.sort()); _ir.mutate(value).assign_key({name, kind, false}); }
inline Node IRState::next_value() const { return Node(_ir._graph({name, kind, true})); }
inline bool IRState::has_next_value() const { return _ir._graph.has_key({name, kind, true}); }
inline void IRState::set_next_value(Node value) { log_assert(sort == value.sort()); _ir.mutate(value).assign_key({name, kind, true}); }
inline Node IR::iterator::operator*() { return Node(_ir->_graph[_index]); }
inline arrow_proxy<Node> IR::iterator::operator->() { return arrow_proxy<Node>(**this); }
// AbstractVisitor provides an abstract base class for visitors
template<class T> struct AbstractVisitor {
virtual T buf(Node self, Node n) = 0;
virtual T slice(Node self, Node a, int offset, int out_width) = 0;
virtual T zero_extend(Node self, Node a, int out_width) = 0;
virtual T sign_extend(Node self, Node a, int out_width) = 0;
virtual T concat(Node self, Node a, Node b) = 0;
virtual T add(Node self, Node a, Node b) = 0;
virtual T sub(Node self, Node a, Node b) = 0;
virtual T mul(Node self, Node a, Node b) = 0;
virtual T unsigned_div(Node self, Node a, Node b) = 0;
virtual T unsigned_mod(Node self, Node a, Node b) = 0;
virtual T bitwise_and(Node self, Node a, Node b) = 0;
virtual T bitwise_or(Node self, Node a, Node b) = 0;
virtual T bitwise_xor(Node self, Node a, Node b) = 0;
virtual T bitwise_not(Node self, Node a) = 0;
virtual T unary_minus(Node self, Node a) = 0;
virtual T reduce_and(Node self, Node a) = 0;
virtual T reduce_or(Node self, Node a) = 0;
virtual T reduce_xor(Node self, Node a) = 0;
virtual T equal(Node self, Node a, Node b) = 0;
virtual T not_equal(Node self, Node a, Node b) = 0;
virtual T signed_greater_than(Node self, Node a, Node b) = 0;
virtual T signed_greater_equal(Node self, Node a, Node b) = 0;
virtual T unsigned_greater_than(Node self, Node a, Node b) = 0;
virtual T unsigned_greater_equal(Node self, Node a, Node b) = 0;
virtual T logical_shift_left(Node self, Node a, Node b) = 0;
virtual T logical_shift_right(Node self, Node a, Node b) = 0;
virtual T arithmetic_shift_right(Node self, Node a, Node b) = 0;
virtual T mux(Node self, Node a, Node b, Node s) = 0;
virtual T constant(Node self, RTLIL::Const const & value) = 0;
virtual T input(Node self, IdString name, IdString kind) = 0;
virtual T state(Node self, IdString name, IdString kind) = 0;
virtual T memory_read(Node self, Node mem, Node addr) = 0;
virtual T memory_write(Node self, Node mem, Node addr, Node data) = 0;
};
// DefaultVisitor provides defaults for all visitor methods which just calls default_handler
template<class T> struct DefaultVisitor : public AbstractVisitor<T> {
virtual T default_handler(Node self) = 0;
T buf(Node self, Node) override { return default_handler(self); }
T slice(Node self, Node, int, int) override { return default_handler(self); }
T zero_extend(Node self, Node, int) override { return default_handler(self); }
T sign_extend(Node self, Node, int) override { return default_handler(self); }
T concat(Node self, Node, Node) override { return default_handler(self); }
T add(Node self, Node, Node) override { return default_handler(self); }
T sub(Node self, Node, Node) override { return default_handler(self); }
T mul(Node self, Node, Node) override { return default_handler(self); }
T unsigned_div(Node self, Node, Node) override { return default_handler(self); }
T unsigned_mod(Node self, Node, Node) override { return default_handler(self); }
T bitwise_and(Node self, Node, Node) override { return default_handler(self); }
T bitwise_or(Node self, Node, Node) override { return default_handler(self); }
T bitwise_xor(Node self, Node, Node) override { return default_handler(self); }
T bitwise_not(Node self, Node) override { return default_handler(self); }
T unary_minus(Node self, Node) override { return default_handler(self); }
T reduce_and(Node self, Node) override { return default_handler(self); }
T reduce_or(Node self, Node) override { return default_handler(self); }
T reduce_xor(Node self, Node) override { return default_handler(self); }
T equal(Node self, Node, Node) override { return default_handler(self); }
T not_equal(Node self, Node, Node) override { return default_handler(self); }
T signed_greater_than(Node self, Node, Node) override { return default_handler(self); }
T signed_greater_equal(Node self, Node, Node) override { return default_handler(self); }
T unsigned_greater_than(Node self, Node, Node) override { return default_handler(self); }
T unsigned_greater_equal(Node self, Node, Node) override { return default_handler(self); }
T logical_shift_left(Node self, Node, Node) override { return default_handler(self); }
T logical_shift_right(Node self, Node, Node) override { return default_handler(self); }
T arithmetic_shift_right(Node self, Node, Node) override { return default_handler(self); }
T mux(Node self, Node, Node, Node) override { return default_handler(self); }
T constant(Node self, RTLIL::Const const &) override { return default_handler(self); }
T input(Node self, IdString, IdString) override { return default_handler(self); }
T state(Node self, IdString, IdString) override { return default_handler(self); }
T memory_read(Node self, Node, Node) override { return default_handler(self); }
T memory_write(Node self, Node, Node, Node) override { return default_handler(self); }
};
// a factory is used to modify a FunctionalIR. it creates new nodes and allows for some modification of existing nodes.
class Factory {
friend class IR;
IR &_ir;
explicit Factory(IR &ir) : _ir(ir) {}
Node add(IR::NodeData &&fn, Sort const &sort, std::initializer_list<Node> args) {
log_assert(!sort.is_signal() || sort.width() > 0);
log_assert(!sort.is_memory() || (sort.addr_width() > 0 && sort.data_width() > 0));
IR::Graph::Ref ref = _ir._graph.add(std::move(fn), {std::move(sort)});
for (auto arg : args)
ref.append_arg(IR::Graph::ConstRef(arg));
return Node(ref);
}
void check_basic_binary(Node const &a, Node const &b) { log_assert(a.sort().is_signal() && a.sort() == b.sort()); }
void check_shift(Node const &a, Node const &b) { log_assert(a.sort().is_signal() && b.sort().is_signal() && b.width() == ceil_log2(a.width())); }
void check_unary(Node const &a) { log_assert(a.sort().is_signal()); }
public:
IR &ir() { return _ir; }
Node slice(Node a, int offset, int out_width) {
log_assert(a.sort().is_signal() && offset + out_width <= a.sort().width());
if(offset == 0 && out_width == a.width())
return a;
return add(IR::NodeData(Fn::slice, offset), Sort(out_width), {a});
}
// extend will either extend or truncate the provided value to reach the desired width
Node extend(Node a, int out_width, bool is_signed) {
int in_width = a.sort().width();
log_assert(a.sort().is_signal());
if(in_width == out_width)
return a;
if(in_width > out_width)
return slice(a, 0, out_width);
if(is_signed)
return add(Fn::sign_extend, Sort(out_width), {a});
else
return add(Fn::zero_extend, Sort(out_width), {a});
}
Node concat(Node a, Node b) {
log_assert(a.sort().is_signal() && b.sort().is_signal());
return add(Fn::concat, Sort(a.sort().width() + b.sort().width()), {a, b});
}
Node add(Node a, Node b) { check_basic_binary(a, b); return add(Fn::add, a.sort(), {a, b}); }
Node sub(Node a, Node b) { check_basic_binary(a, b); return add(Fn::sub, a.sort(), {a, b}); }
Node mul(Node a, Node b) { check_basic_binary(a, b); return add(Fn::mul, a.sort(), {a, b}); }
Node unsigned_div(Node a, Node b) { check_basic_binary(a, b); return add(Fn::unsigned_div, a.sort(), {a, b}); }
Node unsigned_mod(Node a, Node b) { check_basic_binary(a, b); return add(Fn::unsigned_mod, a.sort(), {a, b}); }
Node bitwise_and(Node a, Node b) { check_basic_binary(a, b); return add(Fn::bitwise_and, a.sort(), {a, b}); }
Node bitwise_or(Node a, Node b) { check_basic_binary(a, b); return add(Fn::bitwise_or, a.sort(), {a, b}); }
Node bitwise_xor(Node a, Node b) { check_basic_binary(a, b); return add(Fn::bitwise_xor, a.sort(), {a, b}); }
Node bitwise_not(Node a) { check_unary(a); return add(Fn::bitwise_not, a.sort(), {a}); }
Node unary_minus(Node a) { check_unary(a); return add(Fn::unary_minus, a.sort(), {a}); }
Node reduce_and(Node a) {
check_unary(a);
if(a.width() == 1)
return a;
return add(Fn::reduce_and, Sort(1), {a});
}
Node reduce_or(Node a) {
check_unary(a);
if(a.width() == 1)
return a;
return add(Fn::reduce_or, Sort(1), {a});
}
Node reduce_xor(Node a) {
check_unary(a);
if(a.width() == 1)
return a;
return add(Fn::reduce_xor, Sort(1), {a});
}
Node equal(Node a, Node b) { check_basic_binary(a, b); return add(Fn::equal, Sort(1), {a, b}); }
Node not_equal(Node a, Node b) { check_basic_binary(a, b); return add(Fn::not_equal, Sort(1), {a, b}); }
Node signed_greater_than(Node a, Node b) { check_basic_binary(a, b); return add(Fn::signed_greater_than, Sort(1), {a, b}); }
Node signed_greater_equal(Node a, Node b) { check_basic_binary(a, b); return add(Fn::signed_greater_equal, Sort(1), {a, b}); }
Node unsigned_greater_than(Node a, Node b) { check_basic_binary(a, b); return add(Fn::unsigned_greater_than, Sort(1), {a, b}); }
Node unsigned_greater_equal(Node a, Node b) { check_basic_binary(a, b); return add(Fn::unsigned_greater_equal, Sort(1), {a, b}); }
Node logical_shift_left(Node a, Node b) { check_shift(a, b); return add(Fn::logical_shift_left, a.sort(), {a, b}); }
Node logical_shift_right(Node a, Node b) { check_shift(a, b); return add(Fn::logical_shift_right, a.sort(), {a, b}); }
Node arithmetic_shift_right(Node a, Node b) { check_shift(a, b); return add(Fn::arithmetic_shift_right, a.sort(), {a, b}); }
Node mux(Node a, Node b, Node s) {
log_assert(a.sort().is_signal() && a.sort() == b.sort() && s.sort() == Sort(1));
return add(Fn::mux, a.sort(), {a, b, s});
}
Node memory_read(Node mem, Node addr) {
log_assert(mem.sort().is_memory() && addr.sort().is_signal() && mem.sort().addr_width() == addr.sort().width());
return add(Fn::memory_read, Sort(mem.sort().data_width()), {mem, addr});
}
Node memory_write(Node mem, Node addr, Node data) {
log_assert(mem.sort().is_memory() && addr.sort().is_signal() && data.sort().is_signal() &&
mem.sort().addr_width() == addr.sort().width() && mem.sort().data_width() == data.sort().width());
return add(Fn::memory_write, mem.sort(), {mem, addr, data});
}
Node constant(RTLIL::Const value) {
return add(IR::NodeData(Fn::constant, std::move(value)), Sort(value.size()), {});
}
Node create_pending(int width) {
return add(Fn::buf, Sort(width), {});
}
void update_pending(Node node, Node value) {
log_assert(node._ref.function() == Fn::buf && node._ref.size() == 0);
log_assert(node.sort() == value.sort());
_ir.mutate(node).append_arg(value._ref);
}
IRInput &add_input(IdString name, IdString kind, Sort sort) {
auto [it, inserted] = _ir._inputs.emplace({name, kind}, IRInput(_ir, name, kind, std::move(sort)));
if (!inserted) log_error("input `%s` was re-defined", name.c_str());
return it->second;
}
IROutput &add_output(IdString name, IdString kind, Sort sort) {
auto [it, inserted] = _ir._outputs.emplace({name, kind}, IROutput(_ir, name, kind, std::move(sort)));
if (!inserted) log_error("output `%s` was re-defined", name.c_str());
return it->second;
}
IRState &add_state(IdString name, IdString kind, Sort sort) {
auto [it, inserted] = _ir._states.emplace({name, kind}, IRState(_ir, name, kind, std::move(sort)));
if (!inserted) log_error("state `%s` was re-defined", name.c_str());
return it->second;
}
Node value(IRInput const& input) {
return add(IR::NodeData(Fn::input, std::pair(input.name, input.kind)), input.sort, {});
}
Node value(IRState const& state) {
return add(IR::NodeData(Fn::state, std::pair(state.name, state.kind)), state.sort, {});
}
void suggest_name(Node node, IdString name) {
_ir.mutate(node).sparse_attr() = name;
}
};
inline Factory IR::factory() { return Factory(*this); }
template<class Id> class Scope {
protected:
char substitution_character = '_';
virtual bool is_character_legal(char, int) = 0;
private:
pool<std::string> _used_names;
dict<Id, std::string> _by_id;
public:
void reserve(std::string name) {
_used_names.insert(std::move(name));
}
std::string unique_name(IdString suggestion) {
std::string str = RTLIL::unescape_id(suggestion);
for(size_t i = 0; i < str.size(); i++)
if(!is_character_legal(str[i], i))
str[i] = substitution_character;
if(_used_names.count(str) == 0) {
_used_names.insert(str);
return str;
}
for (int idx = 0 ; ; idx++){
std::string suffixed = str + "_" + std::to_string(idx);
if(_used_names.count(suffixed) == 0) {
_used_names.insert(suffixed);
return suffixed;
}
}
}
std::string operator()(Id id, IdString suggestion) {
auto it = _by_id.find(id);
if(it != _by_id.end())
return it->second;
std::string str = unique_name(suggestion);
_by_id.insert({id, str});
return str;
}
};
class Writer {
std::ostream *os;
void print_impl(const char *fmt, vector<std::function<void()>>& fns);
public:
Writer(std::ostream &os) : os(&os) {}
template<class T> Writer& operator <<(T&& arg) { *os << std::forward<T>(arg); return *this; }
template<typename... Args>
void print(const char *fmt, Args&&... args)
{
vector<std::function<void()>> fns { [&]() { *this << args; }... };
print_impl(fmt, fns);
}
template<typename Fn, typename... Args>
void print_with(Fn fn, const char *fmt, Args&&... args)
{
vector<std::function<void()>> fns { [&]() {
if constexpr (std::is_invocable_v<Fn, Args>)
*this << fn(args);
else
*this << args; }...
};
print_impl(fmt, fns);
}
};
}
YOSYS_NAMESPACE_END
#endif

View file

@ -186,6 +186,37 @@ inline unsigned int mkhash(const T &v) {
return hash_ops<T>().hash(v);
}
template<> struct hash_ops<std::monostate> {
static inline bool cmp(std::monostate a, std::monostate b) {
return a == b;
}
static inline unsigned int hash(std::monostate) {
return mkhash_init;
}
};
template<typename... T> struct hash_ops<std::variant<T...>> {
static inline bool cmp(std::variant<T...> a, std::variant<T...> b) {
return a == b;
}
static inline unsigned int hash(std::variant<T...> a) {
unsigned int h = std::visit([](const auto &v) { return mkhash(v); }, a);
return mkhash(a.index(), h);
}
};
template<typename T> struct hash_ops<std::optional<T>> {
static inline bool cmp(std::optional<T> a, std::optional<T> b) {
return a == b;
}
static inline unsigned int hash(std::optional<T> a) {
if(a.has_value())
return mkhash(*a);
else
return 0;
}
};
inline int hashtable_size(int min_size)
{
// Primes as generated by https://oeis.org/A175953

View file

@ -662,6 +662,16 @@ const char *log_id(const RTLIL::IdString &str)
return p+1;
}
const char *log_str(const char *str)
{
log_id_cache.push_back(strdup(str));
return log_id_cache.back();
}
const char *log_str(std::string const &str) {
return log_str(str.c_str());
}
void log_module(RTLIL::Module *module, std::string indent)
{
std::stringstream buf;

View file

@ -206,6 +206,8 @@ void log_check_expected();
const char *log_signal(const RTLIL::SigSpec &sig, bool autoint = true);
const char *log_const(const RTLIL::Const &value, bool autoint = true);
const char *log_id(const RTLIL::IdString &id);
const char *log_str(const char *str);
const char *log_str(std::string const &str);
template<typename T> static inline const char *log_id(T *obj, const char *nullstr = nullptr) {
if (nullstr && obj == nullptr)

View file

@ -1679,3 +1679,219 @@ SigSpec MemWr::decompress_en(const std::vector<int> &swizzle, SigSpec sig) {
res.append(sig[i]);
return res;
}
using addr_t = MemContents::addr_t;
MemContents::MemContents(Mem *mem) :
MemContents(ceil_log2(mem->size), mem->width)
{
for(const auto &init : mem->inits) {
if(init.en.is_fully_zero()) continue;
log_assert(init.en.size() == _data_width);
if(init.en.is_fully_ones())
insert_concatenated(init.addr.as_int(), init.data);
else {
// TODO: this case could be handled more efficiently by adding
// a flag to reserve_range that tells it to preserve
// previous contents
addr_t addr = init.addr.as_int();
addr_t words = init.data.size() / _data_width;
RTLIL::Const data = init.data;
log_assert(data.size() % _data_width == 0);
for(addr_t i = 0; i < words; i++) {
RTLIL::Const previous = (*this)[addr + i];
for(int j = 0; j < _data_width; j++)
if(init.en[j] != State::S1)
data[_data_width * i + j] = previous[j];
}
insert_concatenated(init.addr.as_int(), data);
}
}
}
MemContents::iterator & MemContents::iterator::operator++() {
auto it = _memory->_values.upper_bound(_addr);
if(it == _memory->_values.end()) {
_memory = nullptr;
_addr = ~(addr_t) 0;
} else
_addr = it->first;
return *this;
}
void MemContents::check() {
log_assert(_addr_width > 0 && _addr_width < (int)sizeof(addr_t) * 8);
log_assert(_data_width > 0);
log_assert(_default_value.size() == _data_width);
if(_values.empty()) return;
auto it = _values.begin();
for(;;) {
log_assert(!it->second.empty());
log_assert(it->second.size() % _data_width == 0);
auto end1 = _range_end(it);
log_assert(_range_begin(it) < (addr_t)(1<<_addr_width));
log_assert(end1 <= (addr_t)(1<<_addr_width));
if(++it == _values.end())
break;
// check that ranges neither overlap nor touch
log_assert(_range_begin(it) > end1);
}
}
bool MemContents::_range_contains(std::map<addr_t, RTLIL::Const>::iterator it, addr_t addr) const {
// if addr < begin, the subtraction will overflow, and the comparison will always fail
// (since we have an invariant that begin + size <= 2^(addr_t bits))
return it != _values.end() && addr - _range_begin(it) < _range_size(it);
}
bool MemContents::_range_contains(std::map<addr_t, RTLIL::Const>::iterator it, addr_t begin_addr, addr_t end_addr) const {
// note that we assume begin_addr <= end_addr
return it != _values.end() && _range_begin(it) <= begin_addr && end_addr - _range_begin(it) <= _range_size(it);
}
bool MemContents::_range_overlaps(std::map<addr_t, RTLIL::Const>::iterator it, addr_t begin_addr, addr_t end_addr) const {
if(it == _values.end() || begin_addr >= end_addr)
return false;
auto top1 = _range_end(it) - 1;
auto top2 = end_addr - 1;
return !(top1 < begin_addr || top2 < _range_begin(it));
}
std::map<addr_t, RTLIL::Const>::iterator MemContents::_range_at(addr_t addr) const {
// allow addr == 1<<_addr_width (which will just return end())
log_assert(addr <= (addr_t)(1<<_addr_width));
// get the first range with base > addr
// (we use const_cast since map::iterators are only passed around internally and not exposed to the user
// and using map::iterator in both the const and non-const case simplifies the code a little,
// at the cost of having to be a little careful when implementing const methods)
auto it = const_cast<std::map<addr_t, RTLIL::Const> &>(_values).upper_bound(addr);
// if we get the very first range, all ranges are past the addr, so return the first one
if(it == _values.begin())
return it;
// otherwise, go back to the previous interval
// this must be the last interval with base <= addr
auto it_prev = std::next(it, -1);
if(_range_contains(it_prev, addr))
return it_prev;
else
return it;
}
RTLIL::Const MemContents::operator[](addr_t addr) const {
auto it = _range_at(addr);
if(_range_contains(it, addr))
return it->second.extract(_range_offset(it, addr), _data_width);
else
return _default_value;
}
addr_t MemContents::count_range(addr_t begin_addr, addr_t end_addr) const {
addr_t count = 0;
for(auto it = _range_at(begin_addr); _range_overlaps(it, begin_addr, end_addr); it++) {
auto first = std::max(_range_begin(it), begin_addr);
auto last = std::min(_range_end(it), end_addr);
count += last - first;
}
return count;
}
void MemContents::clear_range(addr_t begin_addr, addr_t end_addr) {
if(begin_addr >= end_addr) return;
// identify which ranges are affected by this operation
// the first iterator affected is the first one containing any addr >= begin_addr
auto begin_it = _range_at(begin_addr);
// the first iterator *not* affected is the first one with base addr > end_addr - 1
auto end_it = _values.upper_bound(end_addr - 1);
if(begin_it == end_it)
return; // nothing to do
// the last iterator affected is one before the first one not affected
auto last_it = std::next(end_it, -1);
// the first and last range may need to be truncated, the rest can just be deleted
// to handle the begin_it == last_it case correctly, do the end case first by inserting a new range past the end
if(_range_contains(last_it, end_addr - 1)) {
auto new_begin = end_addr;
auto end = _range_end(last_it);
// if there is data past the end address, preserve it by creating a new range
if(new_begin != end)
end_it = _values.emplace_hint(last_it, new_begin, last_it->second.extract(_range_offset(last_it, new_begin), (_range_end(last_it) - new_begin) * _data_width));
// the original range will either be truncated in the next if() block or deleted in the erase, so we can leave it untruncated
}
if(_range_contains(begin_it, begin_addr)) {
auto new_end = begin_addr;
// if there is data before the start address, truncate but don't delete
if(new_end != begin_it->first) {
begin_it->second.extu(_range_offset(begin_it, new_end));
++begin_it;
}
// else: begin_it will be deleted
}
_values.erase(begin_it, end_it);
}
std::map<addr_t, RTLIL::Const>::iterator MemContents::_reserve_range(addr_t begin_addr, addr_t end_addr) {
if(begin_addr >= end_addr)
return _values.end(); // need a dummy value to return, end() is cheap
// find the first range containing any addr >= begin_addr - 1
auto lower_it = begin_addr == 0 ? _values.begin() : _range_at(begin_addr - 1);
// check if our range is already covered by a single range
// note that since ranges are not allowed to touch, if any range contains begin_addr, lower_it equals that range
if (_range_contains(lower_it, begin_addr, end_addr))
return lower_it;
// find the first range containing any addr >= end_addr
auto upper_it = _range_at(end_addr);
// check if either of the two ranges we just found touch our range
bool lower_touch = begin_addr > 0 && _range_contains(lower_it, begin_addr - 1);
bool upper_touch = _range_contains(upper_it, end_addr);
if (lower_touch && upper_touch) {
log_assert (lower_it != upper_it); // lower_it == upper_it should be excluded by the check above
// we have two different ranges touching at either end, we need to merge them
auto upper_end = _range_end(upper_it);
// make range bigger (maybe reserve here instead of resize?)
lower_it->second.bits.resize(_range_offset(lower_it, upper_end), State::Sx);
// copy only the data beyond our range
std::copy(_range_data(upper_it, end_addr), _range_data(upper_it, upper_end), _range_data(lower_it, end_addr));
// keep lower_it, but delete upper_it
_values.erase(std::next(lower_it), std::next(upper_it));
return lower_it;
} else if (lower_touch) {
// we have a range to the left, just make it bigger and delete any other that may exist.
lower_it->second.bits.resize(_range_offset(lower_it, end_addr), State::Sx);
// keep lower_it and upper_it
_values.erase(std::next(lower_it), upper_it);
return lower_it;
} else if (upper_touch) {
// we have a range to the right, we need to expand it
// since we need to erase and reinsert to a new address, steal the data
RTLIL::Const data = std::move(upper_it->second);
// note that begin_addr is not in upper_it, otherwise the whole range covered check would have tripped
data.bits.insert(data.bits.begin(), (_range_begin(upper_it) - begin_addr) * _data_width, State::Sx);
// delete lower_it and upper_it, then reinsert
_values.erase(lower_it, std::next(upper_it));
return _values.emplace(begin_addr, std::move(data)).first;
} else {
// no ranges are touching, so just delete all ranges in our range and allocate a new one
// could try to resize an existing range but not sure if that actually helps
_values.erase(lower_it, upper_it);
return _values.emplace(begin_addr, RTLIL::Const(State::Sx, (end_addr - begin_addr) * _data_width)).first;
}
}
void MemContents::insert_concatenated(addr_t addr, RTLIL::Const const &values) {
addr_t words = (values.size() + _data_width - 1) / _data_width;
log_assert(addr < (addr_t)(1<<_addr_width));
log_assert(words <= (addr_t)(1<<_addr_width) - addr);
auto it = _reserve_range(addr, addr + words);
auto to_begin = _range_data(it, addr);
std::copy(values.bits.begin(), values.bits.end(), to_begin);
// if values is not word-aligned, fill any missing bits with 0
std::fill(to_begin + values.size(), to_begin + words * _data_width, State::S0);
}
std::vector<State>::iterator MemContents::_range_write(std::vector<State>::iterator it, RTLIL::Const const &word) {
auto from_end = word.size() <= _data_width ? word.bits.end() : word.bits.begin() + _data_width;
auto to_end = std::copy(word.bits.begin(), from_end, it);
auto it_next = std::next(it, _data_width);
std::fill(to_end, it_next, State::S0);
return it_next;
}

View file

@ -22,6 +22,7 @@
#include "kernel/yosys.h"
#include "kernel/ffinit.h"
#include "kernel/utils.h"
YOSYS_NAMESPACE_BEGIN
@ -224,6 +225,114 @@ struct Mem : RTLIL::AttrObject {
Mem(Module *module, IdString memid, int width, int start_offset, int size) : module(module), memid(memid), packed(false), mem(nullptr), cell(nullptr), width(width), start_offset(start_offset), size(size) {}
};
// MemContents efficiently represents the contents of a potentially sparse memory by storing only those segments that are actually defined
class MemContents {
public:
class range; class iterator;
using addr_t = uint32_t;
private:
// we ban _addr_width == sizeof(addr_t) * 8 because it adds too many cornercases
int _addr_width;
int _data_width;
RTLIL::Const _default_value;
// for each range, store the concatenation of the words at the start address
// invariants:
// - no overlapping or adjacent ranges
// - no empty ranges
// - all Consts are a multiple of the word size
std::map<addr_t, RTLIL::Const> _values;
// returns an iterator to the range containing addr, if it exists, or the first range past addr
std::map<addr_t, RTLIL::Const>::iterator _range_at(addr_t addr) const;
addr_t _range_size(std::map<addr_t, RTLIL::Const>::iterator it) const { return it->second.size() / _data_width; }
addr_t _range_begin(std::map<addr_t, RTLIL::Const>::iterator it) const { return it->first; }
addr_t _range_end(std::map<addr_t, RTLIL::Const>::iterator it) const { return _range_begin(it) + _range_size(it); }
// check if the iterator points to a range containing addr
bool _range_contains(std::map<addr_t, RTLIL::Const>::iterator it, addr_t addr) const;
// check if the iterator points to a range containing [begin_addr, end_addr). assumes end_addr >= begin_addr.
bool _range_contains(std::map<addr_t, RTLIL::Const>::iterator it, addr_t begin_addr, addr_t end_addr) const;
// check if the iterator points to a range overlapping with [begin_addr, end_addr)
bool _range_overlaps(std::map<addr_t, RTLIL::Const>::iterator it, addr_t begin_addr, addr_t end_addr) const;
// return the offset the addr would have in the range at `it`
size_t _range_offset(std::map<addr_t, RTLIL::Const>::iterator it, addr_t addr) const { return (addr - it->first) * _data_width; }
// assuming _range_contains(it, addr), return an iterator pointing to the data at addr
std::vector<State>::iterator _range_data(std::map<addr_t, RTLIL::Const>::iterator it, addr_t addr) { return it->second.bits.begin() + _range_offset(it, addr); }
// internal version of reserve_range that returns an iterator to the range
std::map<addr_t, RTLIL::Const>::iterator _reserve_range(addr_t begin_addr, addr_t end_addr);
// write a single word at addr, return iterator to next word
std::vector<State>::iterator _range_write(std::vector<State>::iterator it, RTLIL::Const const &data);
public:
class range {
int _data_width;
addr_t _base;
RTLIL::Const const &_values;
friend class iterator;
range(int data_width, addr_t base, RTLIL::Const const &values)
: _data_width(data_width), _base(base), _values(values) {}
public:
addr_t base() const { return _base; }
addr_t size() const { return ((addr_t) _values.size()) / _data_width; }
addr_t limit() const { return _base + size(); }
RTLIL::Const const &concatenated() const { return _values; }
RTLIL::Const operator[](addr_t addr) const {
log_assert(addr - _base < size());
return _values.extract((addr - _base) * _data_width, _data_width);
}
RTLIL::Const at_offset(addr_t offset) const { return (*this)[_base + offset]; }
};
class iterator {
MemContents const *_memory;
// storing addr instead of an iterator gives more well-defined behaviour under insertions/deletions
// use ~0 for end so that all end iterators compare the same
addr_t _addr;
friend class MemContents;
iterator(MemContents const *memory, addr_t addr) : _memory(memory), _addr(addr) {}
public:
using iterator_category = std::input_iterator_tag;
using value_type = range;
using pointer = arrow_proxy<range>;
using reference = range;
using difference_type = addr_t;
reference operator *() const { return range(_memory->_data_width, _addr, _memory->_values.at(_addr)); }
pointer operator->() const { return arrow_proxy<range>(**this); }
bool operator !=(iterator const &other) const { return _memory != other._memory || _addr != other._addr; }
bool operator ==(iterator const &other) const { return !(*this != other); }
iterator &operator++();
};
MemContents(int addr_width, int data_width, RTLIL::Const default_value)
: _addr_width(addr_width), _data_width(data_width)
, _default_value((default_value.extu(data_width), std::move(default_value)))
{ log_assert(_addr_width > 0 && _addr_width < (int)sizeof(addr_t) * 8); log_assert(_data_width > 0); }
MemContents(int addr_width, int data_width) : MemContents(addr_width, data_width, RTLIL::Const(State::Sx, data_width)) {}
explicit MemContents(Mem *mem);
int addr_width() const { return _addr_width; }
int data_width() const { return _data_width; }
RTLIL::Const const &default_value() const { return _default_value; }
// return the value at the address if it exists, the default_value of the memory otherwise. address must not exceed 2**addr_width.
RTLIL::Const operator [](addr_t addr) const;
// return the number of defined words in the range [begin_addr, end_addr)
addr_t count_range(addr_t begin_addr, addr_t end_addr) const;
// allocate memory for the range [begin_addr, end_addr), but leave the contents undefined.
void reserve_range(addr_t begin_addr, addr_t end_addr) { _reserve_range(begin_addr, end_addr); }
// insert multiple words (provided as a single concatenated RTLIL::Const) at the given address, overriding any previous assignment.
void insert_concatenated(addr_t addr, RTLIL::Const const &values);
// insert multiple words at the given address, overriding any previous assignment.
template<typename Iterator> void insert_range(addr_t addr, Iterator begin, Iterator end) {
auto words = end - begin;
log_assert(addr < (addr_t)(1<<_addr_width)); log_assert(words <= (addr_t)(1<<_addr_width) - addr);
auto range = _reserve_range(addr, addr + words);
auto it = _range_data(range, addr);
for(; begin != end; ++begin)
it = _range_write(it, *begin);
}
// undefine all words in the range [begin_addr, end_addr)
void clear_range(addr_t begin_addr, addr_t end_addr);
// check invariants, abort if invariants failed
void check();
iterator end() const { return iterator(nullptr, ~(addr_t) 0); }
iterator begin() const { return _values.empty() ? end() : iterator(this, _values.begin()->first); }
bool empty() const { return _values.empty(); }
};
YOSYS_NAMESPACE_END
#endif

View file

@ -3754,6 +3754,20 @@ RTLIL::SigChunk RTLIL::SigChunk::extract(int offset, int length) const
return ret;
}
RTLIL::SigBit RTLIL::SigChunk::operator[](int offset) const
{
log_assert(offset >= 0);
log_assert(offset <= width);
RTLIL::SigBit ret;
if (wire) {
ret.wire = wire;
ret.offset = this->offset + offset;
} else {
ret.data = data[offset];
}
return ret;
}
bool RTLIL::SigChunk::operator <(const RTLIL::SigChunk &other) const
{
if (wire && other.wire)

View file

@ -769,6 +769,7 @@ struct RTLIL::SigChunk
SigChunk(const RTLIL::SigBit &bit);
RTLIL::SigChunk extract(int offset, int length) const;
RTLIL::SigBit operator[](int offset) const;
inline int size() const { return width; }
inline bool is_wire() const { return wire != NULL; }

163
kernel/sexpr.cc Normal file
View file

@ -0,0 +1,163 @@
/*
* yosys -- Yosys Open SYnthesis Suite
*
* Copyright (C) 2024 Emily Schmidt <emily@yosyshq.com>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
*/
#include "sexpr.h"
YOSYS_NAMESPACE_BEGIN
std::ostream &operator<<(std::ostream &os, SExpr const &sexpr) {
if(sexpr.is_atom())
os << sexpr.atom();
else if(sexpr.is_list()){
os << "(";
auto l = sexpr.list();
for(size_t i = 0; i < l.size(); i++) {
if(i > 0) os << " ";
os << l[i];
}
os << ")";
}else
os << "<invalid>";
return os;
}
std::string SExpr::to_string() const {
std::stringstream ss;
ss << *this;
return ss.str();
}
void SExprWriter::nl_if_pending() {
if(_pending_nl) {
os << '\n';
_pos = 0;
_pending_nl = false;
}
}
void SExprWriter::puts(std::string_view s) {
if(s.empty()) return;
nl_if_pending();
for(auto c : s) {
if(c == '\n') {
os << c;
_pos = 0;
} else {
if(_pos == 0) {
for(int i = 0; i < _indent; i++)
os << " ";
_pos = 2 * _indent;
}
os << c;
_pos++;
}
}
}
// Calculate how much space would be left if expression was written
// out in full horizontally. Returns any negative value if it doesn't fit.
//
// (Ideally we would avoid recalculating the widths of subexpression,
// but I can't figure out how to store the widths. As an alternative,
// we bail out of the calculation as soon as we can tell the expression
// doesn't fit in the available space.)
int SExprWriter::check_fit(SExpr const &sexpr, int space) {
if(sexpr.is_atom())
return space - sexpr.atom().size();
else if(sexpr.is_list()) {
space -= 2;
if(sexpr.list().size() > 1)
space -= sexpr.list().size() - 1;
for(auto arg : sexpr.list()) {
if(space < 0) break;
space = check_fit(arg, space);
}
return space;
} else
return -1;
}
void SExprWriter::print(SExpr const &sexpr, bool close, bool indent_rest) {
if(sexpr.is_atom())
puts(sexpr.atom());
else if(sexpr.is_list()) {
auto args = sexpr.list();
puts("(");
// Expressions are printed horizontally if they fit on the line.
// We do the check *after* puts("(") to make sure that _pos is accurate.
// (Otherwise there could be a pending newline + indentation)
bool vertical = args.size() > 1 && check_fit(sexpr, _max_line_width - _pos + 1) < 0;
if(vertical) _indent++;
for(size_t i = 0; i < args.size(); i++) {
if(i > 0) puts(vertical ? "\n" : " ");
print(args[i]);
}
// Any remaining arguments are currently always printed vertically,
// but are not indented if indent_rest = false.
_indent += (!close && indent_rest) - vertical;
if(close)
puts(")");
else {
_unclosed.push_back(indent_rest);
_pending_nl = true;
}
}else
log_error("shouldn't happen: SExpr '%s' is neither an atom nor a list", sexpr.to_string().c_str());
}
void SExprWriter::close(size_t n) {
log_assert(_unclosed.size() - (_unclosed_stack.empty() ? 0 : _unclosed_stack.back()) >= n);
while(n-- > 0) {
bool indented = _unclosed[_unclosed.size() - 1];
_unclosed.pop_back();
// Only print ) on the same line if it fits.
_pending_nl = _pos >= _max_line_width;
if(indented)
_indent--;
puts(")");
_pending_nl = true;
}
}
void SExprWriter::comment(std::string const &str, bool hanging) {
if(hanging) {
if(_pending_nl) {
_pending_nl = false;
puts(" ");
}
}
size_t i = 0, e;
do{
e = str.find('\n', i);
puts("; ");
puts(std::string_view(str).substr(i, e - i));
puts("\n");
i = e + 1;
}while(e != std::string::npos);
}
SExprWriter::~SExprWriter() {
while(!_unclosed_stack.empty())
pop();
close(_unclosed.size());
nl_if_pending();
}
YOSYS_NAMESPACE_END

122
kernel/sexpr.h Normal file
View file

@ -0,0 +1,122 @@
/*
* yosys -- Yosys Open SYnthesis Suite
*
* Copyright (C) 2024 Emily Schmidt <emily@yosyshq.com>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
*/
#ifndef SEXPR_H
#define SEXPR_H
#include "kernel/yosys.h"
YOSYS_NAMESPACE_BEGIN
class SExpr {
public:
std::variant<std::vector<SExpr>, std::string> _v;
public:
SExpr(std::string a) : _v(std::move(a)) {}
SExpr(const char *a) : _v(a) {}
// FIXME: should maybe be defined for all integral types
SExpr(int n) : _v(std::to_string(n)) {}
SExpr(std::vector<SExpr> const &l) : _v(l) {}
SExpr(std::vector<SExpr> &&l) : _v(std::move(l)) {}
// It would be nicer to have an std::initializer_list constructor,
// but that causes confusing issues with overload resolution sometimes.
template<typename... Args> static SExpr list(Args&&... args) {
return SExpr(std::vector<SExpr>{std::forward<Args>(args)...});
}
bool is_atom() const { return std::holds_alternative<std::string>(_v); }
std::string const &atom() const { return std::get<std::string>(_v); }
bool is_list() const { return std::holds_alternative<std::vector<SExpr>>(_v); }
std::vector<SExpr> const &list() const { return std::get<std::vector<SExpr>>(_v); }
std::string to_string() const;
};
std::ostream &operator<<(std::ostream &os, SExpr const &sexpr);
namespace SExprUtil {
// A little hack so that `using SExprUtil::list` lets you import a shortcut to `SExpr::list`
template<typename... Args> SExpr list(Args&&... args) {
return SExpr(std::vector<SExpr>{std::forward<Args>(args)...});
}
}
// SExprWriter is a pretty printer for s-expr. It does not try very hard to get a good layout.
class SExprWriter {
std::ostream &os;
int _max_line_width;
int _indent = 0;
int _pos = 0;
// If _pending_nl is set, print a newline before the next character.
// This lets us "undo" the last newline so we can put
// closing parentheses or a hanging comment on the same line.
bool _pending_nl = false;
// Unclosed parentheses (boolean stored is indent_rest)
vector<bool> _unclosed;
// Used only for push() and pop() (stores _unclosed.size())
vector<size_t> _unclosed_stack;
void nl_if_pending();
void puts(std::string_view s);
int check_fit(SExpr const &sexpr, int space);
void print(SExpr const &sexpr, bool close = true, bool indent_rest = true);
public:
SExprWriter(std::ostream &os, int max_line_width = 80)
: os(os)
, _max_line_width(max_line_width)
{}
// Print an s-expr.
SExprWriter &operator <<(SExpr const &sexpr) {
print(sexpr);
_pending_nl = true;
return *this;
}
// Print an s-expr (which must be a list), but leave room for extra elements
// which may be printed using either << or further calls to open.
// If indent_rest = false, the remaining elements are not intended
// (for avoiding unreasonable indentation on deeply nested structures).
void open(SExpr const &sexpr, bool indent_rest = true) {
log_assert(sexpr.is_list());
print(sexpr, false, indent_rest);
}
// Close the s-expr opened with the last call to open
// (if an argument is given, close that many s-exprs).
void close(size_t n = 1);
// push() remembers how many s-exprs are currently open
void push() {
_unclosed_stack.push_back(_unclosed.size());
}
// pop() closes all s-expr opened since the corresponding call to push()
void pop() {
auto t = _unclosed_stack.back();
log_assert(_unclosed.size() >= t);
close(_unclosed.size() - t);
_unclosed_stack.pop_back();
}
// Print a comment.
// If hanging = true, append it to the end of the last printed s-expr.
void comment(std::string const &str, bool hanging = false);
// Flush any unprinted characters to the std::ostream, but does not close unclosed parentheses.
void flush() {
nl_if_pending();
}
// Destructor closes any unclosed parentheses and flushes.
~SExprWriter();
};
YOSYS_NAMESPACE_END
#endif

357
kernel/topo_scc.h Normal file
View file

@ -0,0 +1,357 @@
/*
* yosys -- Yosys Open SYnthesis Suite
*
* Copyright (C) 2024 Jannis Harder <jix@yosyshq.com> <me@jix.one>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
*/
#ifndef TOPO_SCC_H
#define TOPO_SCC_H
#include "kernel/yosys.h"
YOSYS_NAMESPACE_BEGIN
class SigCellGraph {
public:
typedef int node_type;
struct successor_enumerator {
std::vector<std::pair<int, int>>::const_iterator current, end;
bool finished() const { return current == end; }
node_type next() {
log_assert(!finished());
node_type result = current->second;
++current;
return result;
}
};
struct node_enumerator {
int current, end;
bool finished() const { return current == end; }
node_type next() {
log_assert(!finished());
node_type result = current;
++current;
return result;
}
};
private:
idict<RTLIL::Cell *> cell_ids;
idict<RTLIL::SigBit> sig_ids;
std::vector<std::pair<int, int>> edges;
std::vector<std::pair<int, int>> edge_ranges;
std::vector<int> indices_;
int offset;
bool computed = false;
void compute() {
offset = GetSize(sig_ids);
edge_ranges.clear();
indices_.clear();
indices_.resize(GetSize(sig_ids) + GetSize(cell_ids), -1);
std::sort(edges.begin(), edges.end());
auto last = std::unique(edges.begin(), edges.end());
edges.erase(last, edges.end());
auto edge = edges.begin();
auto edge_end = edges.end();
int range_begin = 0;
for (int node = -offset, node_end = GetSize(cell_ids); node != node_end; ++node) {
while (edge != edge_end && edge->first <= node)
++edge;
int range_end = edge - edges.begin();
edge_ranges.emplace_back(std::make_pair(range_begin, range_end));
range_begin = range_end;
}
}
public:
node_type node(RTLIL::Cell *cell) { return cell_ids(cell); }
node_type node(SigBit const &bit) { return ~sig_ids(bit); }
bool is_cell(node_type node) { return node >= 0; }
bool is_sig(node_type node) { return node < 0; }
Cell *cell(node_type node) { return node >= 0 ? cell_ids[node] : nullptr; }
SigBit sig(node_type node) { return node < 0 ? sig_ids[~node] : SigBit(); }
template<typename Src, typename Dst>
void add_edge(Src &&src, Dst &&dst) {
computed = false;
node_type src_node = node(std::forward<Src>(src));
node_type dst_node = node(std::forward<Dst>(dst));
edges.emplace_back(std::make_pair(src_node, dst_node));
}
node_enumerator enumerate_nodes() {
if (!computed) compute();
return {-GetSize(sig_ids), GetSize(cell_ids)};
}
successor_enumerator enumerate_successors(node_type const &node) const {
auto range = edge_ranges[node + offset];
return {edges.begin() + range.first, edges.begin() + range.second};
}
int &dfs_index(node_type const &node) {
return indices_[node + offset];
}
};
class IntGraph {
public:
typedef int node_type;
struct successor_enumerator {
std::vector<std::pair<int, int>>::const_iterator current, end;
bool finished() const { return current == end; }
node_type next() {
log_assert(!finished());
node_type result = current->second;
++current;
return result;
}
};
struct node_enumerator {
int current, end;
bool finished() const { return current == end; }
node_type next() {
log_assert(!finished());
node_type result = current;
++current;
return result;
}
};
private:
std::vector<std::pair<int, int>> edges;
std::vector<std::pair<int, int>> edge_ranges;
std::vector<int> indices_;
bool computed = false;
void compute() {
edge_ranges.clear();
int node_end = 0;
for (auto const &edge : edges)
node_end = std::max(node_end, std::max(edge.first, edge.second) + 1);
indices_.clear();
indices_.resize(node_end, -1);
std::sort(edges.begin(), edges.end());
auto last = std::unique(edges.begin(), edges.end());
edges.erase(last, edges.end());
auto edge = edges.begin();
auto edge_end = edges.end();
int range_begin = 0;
for (int node = 0; node != node_end; ++node) {
while (edge != edge_end && edge->first <= node)
++edge;
int range_end = edge - edges.begin();
edge_ranges.emplace_back(std::make_pair(range_begin, range_end));
range_begin = range_end;
}
}
public:
void add_edge(int src, int dst) {
log_assert(src >= 0);
log_assert(dst >= 0);
computed = false;
edges.emplace_back(std::make_pair(src, dst));
}
node_enumerator enumerate_nodes() {
if (!computed) compute();
return {0, GetSize(indices_)};
}
successor_enumerator enumerate_successors(int node) const {
auto range = edge_ranges[node];
return {edges.begin() + range.first, edges.begin() + range.second};
}
int &dfs_index(node_type const &node) {
return indices_[node];
}
};
template<typename G, typename ComponentCallback>
class TopoSortedSccs
{
typedef typename G::node_enumerator node_enumerator;
typedef typename G::successor_enumerator successor_enumerator;
typedef typename G::node_type node_type;
struct dfs_entry {
node_type node;
successor_enumerator successors;
int lowlink;
dfs_entry(node_type node, successor_enumerator successors, int lowlink) :
node(node), successors(successors), lowlink(lowlink)
{}
};
G &graph;
ComponentCallback component;
std::vector<dfs_entry> dfs_stack;
std::vector<node_type> component_stack;
int next_index = 0;
public:
TopoSortedSccs(G &graph, ComponentCallback component)
: graph(graph), component(component) {}
// process all sources (nodes without a successor)
TopoSortedSccs &process_sources() {
node_enumerator nodes = graph.enumerate_nodes();
while (!nodes.finished()) {
node_type node = nodes.next();
successor_enumerator successors = graph.enumerate_successors(node);
if (successors.finished())
{
graph.dfs_index(node) = next_index;
next_index++;
component_stack.push_back(node);
component(component_stack.data(), component_stack.data() + 1);
component_stack.clear();
graph.dfs_index(node) = INT_MAX;
}
}
return *this;
}
// process all remaining nodes in the graph
TopoSortedSccs &process_all() {
node_enumerator nodes = graph.enumerate_nodes();
// iterate over all nodes to ensure we process the whole graph
while (!nodes.finished())
process(nodes.next());
return *this;
}
// process all nodes that are reachable from a given start node
TopoSortedSccs &process(node_type node) {
// only start a new search if the node wasn't visited yet
if (graph.dfs_index(node) >= 0)
return *this;
while (true) {
// at this point we're visiting the node for the first time during
// the DFS search
// we record the timestamp of when we first visited the node as the
// dfs_index
int lowlink = next_index;
next_index++;
graph.dfs_index(node) = lowlink;
// and we add the node to the component stack where it will remain
// until all nodes of the component containing this node are popped
component_stack.push_back(node);
// then we start iterating over the successors of this node
successor_enumerator successors = graph.enumerate_successors(node);
while (true) {
if (successors.finished()) {
// when we processed all successors, i.e. when we visited
// the complete DFS subtree rooted at the current node, we
// first check whether the current node is a SCC root
//
// (why this check identifies SCC roots is out of scope for
// this comment, see other material on Tarjan's SCC
// algorithm)
if (lowlink == graph.dfs_index(node)) {
// the SCC containing the current node is at the top of
// the component stack, with the current node at the bottom
int current = GetSize(component_stack);
do {
--current;
} while (component_stack[current] != node);
// we invoke the callback with a pointer range of the
// nodes in the SCC
node_type *stack_ptr = component_stack.data();
node_type *component_begin = stack_ptr + current;
node_type *component_end = stack_ptr + component_stack.size();
// note that we allow the callback to permute the nodes
// in this range as well as to modify dfs_index of the
// nodes in the SCC.
component(component_begin, component_end);
// by setting the dfs_index of all already emitted
// nodes to INT_MAX, we don't need a separate check for
// whether successor nodes are still on the component
// stack before updating the lowlink value
for (; component_begin != component_end; ++component_begin)
graph.dfs_index(*component_begin) = INT_MAX;
component_stack.resize(current);
}
// after checking for a completed SCC the DFS either
// continues the search at the parent node or returns to
// the outer loop if we already are at the root node.
if (dfs_stack.empty())
return *this;
auto &dfs_top = dfs_stack.back();
node = dfs_top.node;
successors = std::move(dfs_top.successors);
// the parent's lowlink is updated when returning
lowlink = min(lowlink, dfs_top.lowlink);
dfs_stack.pop_back();
// continue checking the remaining successors of the parent node.
} else {
node_type succ = successors.next();
if (graph.dfs_index(succ) < 0) {
// if the successor wasn't visted yet, the DFS recurses
// into the successor
// we save the state for this node and make the
// successor the current node.
dfs_stack.emplace_back(node, std::move(successors), lowlink);
node = succ;
// this break gets us to the section corresponding to
// the function entry in the recursive version
break;
} else {
// the textbook version guards this update with a check
// whether the successor is still on the component
// stack. If the successor node was already visisted
// but is not on the component stack, it must be part
// of an already emitted SCC. We can avoid this check
// by setting the DFS index of all nodes in a SCC to
// INT_MAX when the SCC is emitted.
lowlink = min(lowlink, graph.dfs_index(succ));
}
}
}
}
}
};
YOSYS_NAMESPACE_END
#endif

View file

@ -253,6 +253,15 @@ template <typename T, typename C = std::less<T>, typename OPS = hash_ops<T>> cla
}
};
// this class is used for implementing operator-> on iterators that return values rather than references
// it's necessary because in C++ operator-> is called recursively until a raw pointer is obtained
template<class T>
struct arrow_proxy {
T v;
explicit arrow_proxy(T const & v) : v(v) {}
T* operator->() { return &v; }
};
YOSYS_NAMESPACE_END
#endif

View file

@ -30,6 +30,8 @@
#include <unordered_map>
#include <unordered_set>
#include <initializer_list>
#include <variant>
#include <optional>
#include <stdexcept>
#include <memory>
#include <cmath>