mirror of
https://github.com/Z3Prover/z3
synced 2025-04-08 10:25:18 +00:00
move min_cut, fix #1321
Signed-off-by: Nikolaj Bjorner <nbjorner@microsoft.com>
This commit is contained in:
parent
8acc924c21
commit
371f0b193c
|
@ -17,7 +17,6 @@ z3_add_component(spacer
|
|||
spacer_unsat_core_learner.cpp
|
||||
spacer_unsat_core_plugin.cpp
|
||||
spacer_matrix.cpp
|
||||
spacer_min_cut.cpp
|
||||
spacer_antiunify.cpp
|
||||
spacer_mev_array.cpp
|
||||
spacer_qe_project.cpp
|
||||
|
|
|
@ -1,289 +0,0 @@
|
|||
/*++
|
||||
Copyright (c) 2017 Arie Gurfinkel
|
||||
|
||||
Module Name:
|
||||
|
||||
spacer_min_cut.cpp
|
||||
|
||||
Abstract:
|
||||
min cut solver
|
||||
|
||||
Author:
|
||||
Bernhard Gleiss
|
||||
|
||||
Revision History:
|
||||
|
||||
|
||||
--*/
|
||||
#include "muz/spacer/spacer_min_cut.h"
|
||||
|
||||
namespace spacer {
|
||||
|
||||
spacer_min_cut::spacer_min_cut()
|
||||
{
|
||||
m_n = 2;
|
||||
|
||||
// push back two empty vectors for source and sink
|
||||
m_edges.push_back(vector<std::pair<unsigned, unsigned>>());
|
||||
m_edges.push_back(vector<std::pair<unsigned, unsigned>>());
|
||||
}
|
||||
|
||||
unsigned spacer_min_cut::new_node()
|
||||
{
|
||||
return m_n++;
|
||||
}
|
||||
|
||||
void spacer_min_cut::add_edge(unsigned int i, unsigned int j, unsigned int capacity)
|
||||
{
|
||||
if (i >= m_edges.size())
|
||||
{
|
||||
m_edges.resize(i + 1);
|
||||
}
|
||||
m_edges[i].insert(std::make_pair(j, 1));
|
||||
STRACE("spacer.mincut",
|
||||
verbose_stream() << "adding edge (" << i << "," << j << ")\n";
|
||||
);
|
||||
|
||||
}
|
||||
|
||||
void spacer_min_cut::compute_min_cut(vector<unsigned>& cut_nodes)
|
||||
{
|
||||
if (m_n == 2)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
m_d.resize(m_n);
|
||||
m_pred.resize(m_n);
|
||||
|
||||
// compute initial distances and number of nodes
|
||||
compute_initial_distances();
|
||||
|
||||
unsigned i = 0;
|
||||
|
||||
while (m_d[0] < m_n)
|
||||
{
|
||||
unsigned j = get_admissible_edge(i);
|
||||
|
||||
if (j < m_n)
|
||||
{
|
||||
// advance(i)
|
||||
m_pred[j] = i;
|
||||
i = j;
|
||||
|
||||
// if i is the sink, augment path
|
||||
if (i == 1)
|
||||
{
|
||||
augment_path();
|
||||
i = 0;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// retreat
|
||||
compute_distance(i);
|
||||
if (i != 0)
|
||||
{
|
||||
i = m_pred[i];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// split nodes into reachable and unreachable ones
|
||||
vector<bool> reachable(m_n);
|
||||
compute_reachable_nodes(reachable);
|
||||
|
||||
// find all edges between reachable and unreachable nodes and for each such edge, add corresponding lemma to unsat-core
|
||||
compute_cut_and_add_lemmas(reachable, cut_nodes);
|
||||
}
|
||||
|
||||
void spacer_min_cut::compute_initial_distances()
|
||||
{
|
||||
vector<unsigned> todo;
|
||||
vector<bool> visited(m_n);
|
||||
|
||||
todo.push_back(0); // start at the source, since we do postorder traversel
|
||||
|
||||
while (!todo.empty())
|
||||
{
|
||||
unsigned current = todo.back();
|
||||
|
||||
// if we haven't already visited current
|
||||
if (!visited[current]) {
|
||||
bool existsUnvisitedParent = false;
|
||||
|
||||
// add unprocessed parents to stack for DFS. If there is at least one unprocessed parent, don't compute the result
|
||||
// for current now, but wait until those unprocessed parents are processed.
|
||||
for (unsigned i = 0, sz = m_edges[current].size(); i < sz; ++i)
|
||||
{
|
||||
unsigned parent = m_edges[current][i].first;
|
||||
|
||||
// if we haven't visited the current parent yet
|
||||
if(!visited[parent])
|
||||
{
|
||||
// add it to the stack
|
||||
todo.push_back(parent);
|
||||
existsUnvisitedParent = true;
|
||||
}
|
||||
}
|
||||
|
||||
// if we already visited all parents, we can visit current too
|
||||
if (!existsUnvisitedParent) {
|
||||
visited[current] = true;
|
||||
todo.pop_back();
|
||||
|
||||
compute_distance(current); // I.H. all parent distances are already computed
|
||||
}
|
||||
}
|
||||
else {
|
||||
todo.pop_back();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
unsigned spacer_min_cut::get_admissible_edge(unsigned i)
|
||||
{
|
||||
for (const auto& pair : m_edges[i])
|
||||
{
|
||||
if (pair.second > 0 && m_d[i] == m_d[pair.first] + 1)
|
||||
{
|
||||
return pair.first;
|
||||
}
|
||||
}
|
||||
return m_n; // no element found
|
||||
}
|
||||
|
||||
void spacer_min_cut::augment_path()
|
||||
{
|
||||
// find bottleneck capacity
|
||||
unsigned max = std::numeric_limits<unsigned int>::max();
|
||||
unsigned k = 1;
|
||||
while (k != 0)
|
||||
{
|
||||
unsigned l = m_pred[k];
|
||||
for (const auto& pair : m_edges[l])
|
||||
{
|
||||
if (pair.first == k)
|
||||
{
|
||||
if (max > pair.second)
|
||||
{
|
||||
max = pair.second;
|
||||
}
|
||||
}
|
||||
}
|
||||
k = l;
|
||||
}
|
||||
|
||||
k = 1;
|
||||
while (k != 0)
|
||||
{
|
||||
unsigned l = m_pred[k];
|
||||
|
||||
// decrease capacity
|
||||
for (auto& pair : m_edges[l])
|
||||
{
|
||||
if (pair.first == k)
|
||||
{
|
||||
pair.second -= max;
|
||||
}
|
||||
}
|
||||
// increase reverse flow
|
||||
bool already_exists = false;
|
||||
for (auto& pair : m_edges[k])
|
||||
{
|
||||
if (pair.first == l)
|
||||
{
|
||||
already_exists = true;
|
||||
pair.second += max;
|
||||
}
|
||||
}
|
||||
if (!already_exists)
|
||||
{
|
||||
m_edges[k].insert(std::make_pair(l, max));
|
||||
}
|
||||
k = l;
|
||||
}
|
||||
}
|
||||
|
||||
void spacer_min_cut::compute_distance(unsigned i)
|
||||
{
|
||||
if (i == 1) // sink node
|
||||
{
|
||||
m_d[1] = 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
unsigned min = std::numeric_limits<unsigned int>::max();
|
||||
|
||||
// find edge (i,j) with positive residual capacity and smallest distance
|
||||
for (const auto& pair : m_edges[i])
|
||||
{
|
||||
if (pair.second > 0)
|
||||
{
|
||||
unsigned tmp = m_d[pair.first] + 1;
|
||||
if (tmp < min)
|
||||
{
|
||||
min = tmp;
|
||||
}
|
||||
}
|
||||
}
|
||||
m_d[i] = min;
|
||||
}
|
||||
}
|
||||
|
||||
void spacer_min_cut::compute_reachable_nodes(vector<bool>& reachable)
|
||||
{
|
||||
vector<unsigned> todo;
|
||||
|
||||
todo.push_back(0);
|
||||
while (!todo.empty())
|
||||
{
|
||||
unsigned current = todo.back();
|
||||
todo.pop_back();
|
||||
|
||||
if (!reachable[current])
|
||||
{
|
||||
reachable[current] = true;
|
||||
|
||||
for (const auto& pair : m_edges[current])
|
||||
{
|
||||
if (pair.second > 0)
|
||||
{
|
||||
todo.push_back(pair.first);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void spacer_min_cut::compute_cut_and_add_lemmas(vector<bool>& reachable, vector<unsigned>& cut_nodes)
|
||||
{
|
||||
vector<unsigned> todo;
|
||||
vector<bool> visited(m_n);
|
||||
|
||||
todo.push_back(0);
|
||||
while (!todo.empty())
|
||||
{
|
||||
unsigned current = todo.back();
|
||||
todo.pop_back();
|
||||
|
||||
if (!visited[current])
|
||||
{
|
||||
visited[current] = true;
|
||||
|
||||
for (const auto& pair : m_edges[current])
|
||||
{
|
||||
unsigned successor = pair.first;
|
||||
if (reachable[successor])
|
||||
{
|
||||
todo.push_back(successor);
|
||||
}
|
||||
else
|
||||
{
|
||||
cut_nodes.push_back(successor);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,53 +0,0 @@
|
|||
/*++
|
||||
Copyright (c) 2017 Arie Gurfinkel
|
||||
|
||||
Module Name:
|
||||
|
||||
spacer_min_cut.h
|
||||
|
||||
Abstract:
|
||||
min cut solver
|
||||
|
||||
Author:
|
||||
Bernhard Gleiss
|
||||
|
||||
Revision History:
|
||||
|
||||
|
||||
--*/
|
||||
|
||||
#ifndef _SPACER_MIN_CUT_H_
|
||||
#define _SPACER_MIN_CUT_H_
|
||||
|
||||
#include "ast/ast.h"
|
||||
#include "util/vector.h"
|
||||
|
||||
namespace spacer {
|
||||
|
||||
class spacer_min_cut {
|
||||
public:
|
||||
spacer_min_cut();
|
||||
|
||||
unsigned new_node();
|
||||
void add_edge(unsigned i, unsigned j, unsigned capacity);
|
||||
void compute_min_cut(vector<unsigned>& cut_nodes);
|
||||
|
||||
private:
|
||||
|
||||
unsigned m_n; // number of vertices in the graph
|
||||
|
||||
vector<vector<std::pair<unsigned, unsigned> > > m_edges; // map from node to all outgoing edges together with their weights (also contains "reverse edges")
|
||||
vector<unsigned> m_d; // approximation of distance from node to sink in residual graph
|
||||
vector<unsigned> m_pred; // predecessor-information for reconstruction of augmenting path
|
||||
vector<expr*> m_node_to_formula; // maps each node to the corresponding formula in the original proof
|
||||
|
||||
void compute_initial_distances();
|
||||
unsigned get_admissible_edge(unsigned i);
|
||||
void augment_path();
|
||||
void compute_distance(unsigned i);
|
||||
void compute_reachable_nodes(vector<bool>& reachable);
|
||||
void compute_cut_and_add_lemmas(vector<bool>& reachable, vector<unsigned>& cut_nodes);
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
|
@ -735,7 +735,7 @@ void unsat_core_plugin_farkas_lemma::compute_linear_combination(const vector<rat
|
|||
m_node_to_formula[node_other] = m.get_fact(i);
|
||||
m_node_to_formula[node_i] = m.get_fact(i);
|
||||
|
||||
m_min_cut.add_edge(node_other, node_i, 1);
|
||||
m_min_cut.add_edge(node_other, node_i);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -765,12 +765,12 @@ void unsat_core_plugin_farkas_lemma::compute_linear_combination(const vector<rat
|
|||
m_node_to_formula[node_j] = m.get_fact(j);
|
||||
m_node_to_formula[node_other] = m.get_fact(j);
|
||||
|
||||
m_min_cut.add_edge(node_j, node_other, 1);
|
||||
m_min_cut.add_edge(node_j, node_other);
|
||||
}
|
||||
}
|
||||
|
||||
// finally connect nodes
|
||||
m_min_cut.add_edge(node_i, node_j, 1);
|
||||
m_min_cut.add_edge(node_i, node_j);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -779,7 +779,7 @@ void unsat_core_plugin_farkas_lemma::compute_linear_combination(const vector<rat
|
|||
*/
|
||||
void unsat_core_plugin_min_cut::finalize()
|
||||
{
|
||||
vector<unsigned int> cut_nodes;
|
||||
unsigned_vector cut_nodes;
|
||||
m_min_cut.compute_min_cut(cut_nodes);
|
||||
|
||||
for (unsigned cut_node : cut_nodes)
|
||||
|
|
|
@ -19,7 +19,7 @@ Revision History:
|
|||
#define _SPACER_UNSAT_CORE_PLUGIN_H_
|
||||
|
||||
#include "ast/ast.h"
|
||||
#include "muz/spacer/spacer_min_cut.h"
|
||||
#include "util/min_cut.h"
|
||||
|
||||
namespace spacer {
|
||||
|
||||
|
@ -109,7 +109,7 @@ private:
|
|||
|
||||
vector<expr*> m_node_to_formula; // maps each node to the corresponding formula in the original proof
|
||||
|
||||
spacer_min_cut m_min_cut;
|
||||
min_cut m_min_cut;
|
||||
};
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -190,13 +190,13 @@ namespace smt {
|
|||
|
||||
expr * theory_str::rewrite_implication(expr * premise, expr * conclusion) {
|
||||
ast_manager & m = get_manager();
|
||||
return m.mk_or(m.mk_not(premise), conclusion);
|
||||
return m.mk_or(mk_not(m, premise), conclusion);
|
||||
}
|
||||
|
||||
void theory_str::assert_implication(expr * premise, expr * conclusion) {
|
||||
ast_manager & m = get_manager();
|
||||
TRACE("str", tout << "asserting implication " << mk_ismt2_pp(premise, m) << " -> " << mk_ismt2_pp(conclusion, m) << std::endl;);
|
||||
expr_ref axiom(m.mk_or(m.mk_not(premise), conclusion), m);
|
||||
expr_ref axiom(m.mk_or(mk_not(m, premise), conclusion), m);
|
||||
assert_axiom(axiom);
|
||||
}
|
||||
|
||||
|
@ -545,7 +545,7 @@ namespace smt {
|
|||
context & ctx = get_context();
|
||||
ast_manager & m = get_manager();
|
||||
|
||||
expr_ref ax1(m.mk_not(ctx.mk_eq_atom(s, mk_string(""))), m);
|
||||
expr_ref ax1(mk_not(m, ctx.mk_eq_atom(s, mk_string(""))), m);
|
||||
assert_axiom(ax1);
|
||||
|
||||
{
|
||||
|
@ -557,7 +557,7 @@ namespace smt {
|
|||
SASSERT(zero);
|
||||
// build LHS > RHS and assert
|
||||
// we have to build !(LHS <= RHS) instead
|
||||
expr_ref lhs_gt_rhs(m.mk_not(m_autil.mk_le(len_str, zero)), m);
|
||||
expr_ref lhs_gt_rhs(mk_not(m, m_autil.mk_le(len_str, zero)), m);
|
||||
SASSERT(lhs_gt_rhs);
|
||||
assert_axiom(lhs_gt_rhs);
|
||||
}
|
||||
|
@ -592,7 +592,7 @@ namespace smt {
|
|||
SASSERT(zero);
|
||||
// build LHS > RHS and assert
|
||||
// we have to build !(LHS <= RHS) instead
|
||||
expr_ref lhs_gt_rhs(m.mk_not(m_autil.mk_le(len_str, zero)), m);
|
||||
expr_ref lhs_gt_rhs(mk_not(m, m_autil.mk_le(len_str, zero)), m);
|
||||
SASSERT(lhs_gt_rhs);
|
||||
assert_axiom(lhs_gt_rhs);
|
||||
}
|
||||
|
@ -1089,7 +1089,7 @@ namespace smt {
|
|||
m_autil.mk_ge(expr->get_arg(1), mk_int(0)),
|
||||
// REWRITE for arithmetic theory:
|
||||
// m_autil.mk_lt(expr->get_arg(1), mk_strlen(expr->get_arg(0)))
|
||||
m.mk_not(m_autil.mk_ge(m_autil.mk_add(expr->get_arg(1), m_autil.mk_mul(mk_int(-1), mk_strlen(expr->get_arg(0)))), mk_int(0)))
|
||||
mk_not(m, m_autil.mk_ge(m_autil.mk_add(expr->get_arg(1), m_autil.mk_mul(mk_int(-1), mk_strlen(expr->get_arg(0)))), mk_int(0)))
|
||||
), m);
|
||||
|
||||
expr_ref_vector and_item(m);
|
||||
|
@ -1130,7 +1130,7 @@ namespace smt {
|
|||
expr_ref_vector innerItems(m);
|
||||
innerItems.push_back(ctx.mk_eq_atom(expr->get_arg(1), mk_concat(ts0, ts1)));
|
||||
innerItems.push_back(ctx.mk_eq_atom(mk_strlen(ts0), mk_strlen(expr->get_arg(0))));
|
||||
innerItems.push_back(m.mk_ite(ctx.mk_eq_atom(ts0, expr->get_arg(0)), expr, m.mk_not(expr)));
|
||||
innerItems.push_back(m.mk_ite(ctx.mk_eq_atom(ts0, expr->get_arg(0)), expr, mk_not(m, expr)));
|
||||
expr_ref then1(m.mk_and(innerItems.size(), innerItems.c_ptr()), m);
|
||||
SASSERT(then1);
|
||||
|
||||
|
@ -1143,7 +1143,7 @@ namespace smt {
|
|||
, m);
|
||||
SASSERT(topLevelCond);
|
||||
|
||||
expr_ref finalAxiom(m.mk_ite(topLevelCond, then1, m.mk_not(expr)), m);
|
||||
expr_ref finalAxiom(m.mk_ite(topLevelCond, then1, mk_not(m, expr)), m);
|
||||
SASSERT(finalAxiom);
|
||||
assert_axiom(finalAxiom);
|
||||
}
|
||||
|
@ -1167,7 +1167,7 @@ namespace smt {
|
|||
expr_ref_vector innerItems(m);
|
||||
innerItems.push_back(ctx.mk_eq_atom(expr->get_arg(1), mk_concat(ts0, ts1)));
|
||||
innerItems.push_back(ctx.mk_eq_atom(mk_strlen(ts1), mk_strlen(expr->get_arg(0))));
|
||||
innerItems.push_back(m.mk_ite(ctx.mk_eq_atom(ts1, expr->get_arg(0)), expr, m.mk_not(expr)));
|
||||
innerItems.push_back(m.mk_ite(ctx.mk_eq_atom(ts1, expr->get_arg(0)), expr, mk_not(m, expr)));
|
||||
expr_ref then1(m.mk_and(innerItems.size(), innerItems.c_ptr()), m);
|
||||
SASSERT(then1);
|
||||
|
||||
|
@ -1180,7 +1180,7 @@ namespace smt {
|
|||
, m);
|
||||
SASSERT(topLevelCond);
|
||||
|
||||
expr_ref finalAxiom(m.mk_ite(topLevelCond, then1, m.mk_not(expr)), m);
|
||||
expr_ref finalAxiom(m.mk_ite(topLevelCond, then1, mk_not(m, expr)), m);
|
||||
SASSERT(finalAxiom);
|
||||
assert_axiom(finalAxiom);
|
||||
}
|
||||
|
@ -1204,7 +1204,7 @@ namespace smt {
|
|||
if (haystackStr.contains(needleStr)) {
|
||||
assert_axiom(ex);
|
||||
} else {
|
||||
assert_axiom(m.mk_not(ex));
|
||||
assert_axiom(mk_not(m, ex));
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
@ -1265,7 +1265,7 @@ namespace smt {
|
|||
SASSERT(tmpLen);
|
||||
thenItems.push_back(ctx.mk_eq_atom(expr->get_arg(0), mk_concat(x3, x4)));
|
||||
thenItems.push_back(ctx.mk_eq_atom(mk_strlen(x3), tmpLen));
|
||||
thenItems.push_back(m.mk_not(mk_contains(x3, expr->get_arg(1))));
|
||||
thenItems.push_back(mk_not(m, mk_contains(x3, expr->get_arg(1))));
|
||||
expr_ref thenBranch(m.mk_and(thenItems.size(), thenItems.c_ptr()), m);
|
||||
SASSERT(thenBranch);
|
||||
|
||||
|
@ -1341,7 +1341,7 @@ namespace smt {
|
|||
|
||||
expr_ref ite1(m.mk_ite(
|
||||
//m_autil.mk_lt(expr->get_arg(2), zeroAst),
|
||||
m.mk_not(m_autil.mk_ge(expr->get_arg(2), zeroAst)),
|
||||
mk_not(m, m_autil.mk_ge(expr->get_arg(2), zeroAst)),
|
||||
ctx.mk_eq_atom(resAst, mk_indexof(expr->get_arg(0), expr->get_arg(1))),
|
||||
ite2
|
||||
), m);
|
||||
|
@ -1384,7 +1384,7 @@ namespace smt {
|
|||
thenItems.push_back(m_autil.mk_ge(indexAst, mk_int(0)));
|
||||
// args[0] = x1 . args[1] . x2
|
||||
// x1 doesn't contain args[1]
|
||||
thenItems.push_back(m.mk_not(mk_contains(x2, expr->get_arg(1))));
|
||||
thenItems.push_back(mk_not(m, mk_contains(x2, expr->get_arg(1))));
|
||||
thenItems.push_back(ctx.mk_eq_atom(indexAst, mk_strlen(x1)));
|
||||
|
||||
bool canSkip = false;
|
||||
|
@ -1402,7 +1402,7 @@ namespace smt {
|
|||
expr_ref tmpLen(m_autil.mk_add(indexAst, mk_int(1)), m);
|
||||
thenItems.push_back(ctx.mk_eq_atom(expr->get_arg(0), mk_concat(x3, x4)));
|
||||
thenItems.push_back(ctx.mk_eq_atom(mk_strlen(x3), tmpLen));
|
||||
thenItems.push_back(m.mk_not(mk_contains(x4, expr->get_arg(1))));
|
||||
thenItems.push_back(mk_not(m, mk_contains(x4, expr->get_arg(1))));
|
||||
}
|
||||
//----------------------------
|
||||
// else branch
|
||||
|
@ -1452,7 +1452,7 @@ namespace smt {
|
|||
argumentsValid_terms.push_back(m_autil.mk_ge(substrPos, zero));
|
||||
// pos < strlen(base)
|
||||
// --> pos + -1*strlen(base) < 0
|
||||
argumentsValid_terms.push_back(m.mk_not(m_autil.mk_ge(
|
||||
argumentsValid_terms.push_back(mk_not(m, m_autil.mk_ge(
|
||||
m_autil.mk_add(substrPos, m_autil.mk_mul(minusOne, substrLen)),
|
||||
zero)));
|
||||
|
||||
|
@ -1473,7 +1473,7 @@ namespace smt {
|
|||
|
||||
// Case 1: pos < 0 or pos >= strlen(base) or len < 0
|
||||
// ==> (Substr ...) = ""
|
||||
expr_ref case1_premise(m.mk_not(argumentsValid), m);
|
||||
expr_ref case1_premise(mk_not(m, argumentsValid), m);
|
||||
SASSERT(case1_premise);
|
||||
ctx.internalize(case1_premise, false);
|
||||
expr_ref case1_conclusion(ctx.mk_eq_atom(expr, mk_string("")), m);
|
||||
|
@ -1505,7 +1505,7 @@ namespace smt {
|
|||
case3_conclusion_terms.push_back(ctx.mk_eq_atom(mk_strlen(t3), substrLen));
|
||||
case3_conclusion_terms.push_back(ctx.mk_eq_atom(expr, t3));
|
||||
expr_ref case3_conclusion(mk_and(case3_conclusion_terms), m);
|
||||
expr_ref case3(rewrite_implication(m.mk_and(argumentsValid, m.mk_not(lenOutOfBounds)), case3_conclusion), m);
|
||||
expr_ref case3(rewrite_implication(m.mk_and(argumentsValid, mk_not(m, lenOutOfBounds)), case3_conclusion), m);
|
||||
SASSERT(case3);
|
||||
|
||||
ctx.internalize(case1, false);
|
||||
|
@ -1548,7 +1548,7 @@ namespace smt {
|
|||
argumentsValid_terms.push_back(m_autil.mk_ge(substrPos, zero));
|
||||
// pos < strlen(base)
|
||||
// --> pos + -1*strlen(base) < 0
|
||||
argumentsValid_terms.push_back(m.mk_not(m_autil.mk_ge(
|
||||
argumentsValid_terms.push_back(mk_not(m, m_autil.mk_ge(
|
||||
m_autil.mk_add(substrPos, m_autil.mk_mul(minusOne, substrLen)),
|
||||
zero)));
|
||||
// len >= 0
|
||||
|
@ -1564,7 +1564,7 @@ namespace smt {
|
|||
|
||||
// Case 1: pos < 0 or pos >= strlen(base) or len < 0
|
||||
// ==> (Substr ...) = ""
|
||||
expr_ref case1_premise(m.mk_not(argumentsValid), m);
|
||||
expr_ref case1_premise(mk_not(m, argumentsValid), m);
|
||||
expr_ref case1_conclusion(ctx.mk_eq_atom(expr, mk_string("")), m);
|
||||
expr_ref case1(m.mk_implies(case1_premise, case1_conclusion), m);
|
||||
|
||||
|
@ -1589,7 +1589,7 @@ namespace smt {
|
|||
case3_conclusion_terms.push_back(ctx.mk_eq_atom(mk_strlen(t3), substrLen));
|
||||
case3_conclusion_terms.push_back(ctx.mk_eq_atom(expr, t3));
|
||||
expr_ref case3_conclusion(mk_and(case3_conclusion_terms), m);
|
||||
expr_ref case3(m.mk_implies(m.mk_and(argumentsValid, m.mk_not(lenOutOfBounds)), case3_conclusion), m);
|
||||
expr_ref case3(m.mk_implies(m.mk_and(argumentsValid, mk_not(m, lenOutOfBounds)), case3_conclusion), m);
|
||||
|
||||
assert_axiom(case1);
|
||||
assert_axiom(case2);
|
||||
|
@ -1630,7 +1630,7 @@ namespace smt {
|
|||
expr_ref tmpLen(m_autil.mk_add(i1, mk_strlen(expr->get_arg(1)), mk_int(-1)), m);
|
||||
thenItems.push_back(ctx.mk_eq_atom(expr->get_arg(0), mk_concat(x3, x4)));
|
||||
thenItems.push_back(ctx.mk_eq_atom(mk_strlen(x3), tmpLen));
|
||||
thenItems.push_back(m.mk_not(mk_contains(x3, expr->get_arg(1))));
|
||||
thenItems.push_back(mk_not(m, mk_contains(x3, expr->get_arg(1))));
|
||||
thenItems.push_back(ctx.mk_eq_atom(result, mk_concat(x1, mk_concat(expr->get_arg(2), x2))));
|
||||
// -----------------------
|
||||
// false branch
|
||||
|
@ -1684,7 +1684,7 @@ namespace smt {
|
|||
expr_ref tl(mk_str_var("tl"), m);
|
||||
expr_ref conclusion1(ctx.mk_eq_atom(S, mk_concat(hd, tl)), m);
|
||||
expr_ref conclusion2(ctx.mk_eq_atom(mk_strlen(hd), m_autil.mk_numeral(rational::one(), true)), m);
|
||||
expr_ref conclusion3(m.mk_not(ctx.mk_eq_atom(hd, mk_string("0"))), m);
|
||||
expr_ref conclusion3(mk_not(m, ctx.mk_eq_atom(hd, mk_string("0"))), m);
|
||||
expr_ref conclusion(m.mk_and(conclusion1, conclusion2, conclusion3), m);
|
||||
SASSERT(premise);
|
||||
SASSERT(conclusion);
|
||||
|
@ -1708,7 +1708,7 @@ namespace smt {
|
|||
// axiom 1: N < 0 <==> (str.from-int N) = ""
|
||||
expr * N = ex->get_arg(0);
|
||||
{
|
||||
expr_ref axiom1_lhs(m.mk_not(m_autil.mk_ge(N, m_autil.mk_numeral(rational::zero(), true))), m);
|
||||
expr_ref axiom1_lhs(mk_not(m, m_autil.mk_ge(N, m_autil.mk_numeral(rational::zero(), true))), m);
|
||||
expr_ref axiom1_rhs(ctx.mk_eq_atom(ex, mk_string("")), m);
|
||||
expr_ref axiom1(ctx.mk_eq_atom(axiom1_lhs, axiom1_rhs), m);
|
||||
SASSERT(axiom1);
|
||||
|
@ -1947,7 +1947,7 @@ namespace smt {
|
|||
// inconsistency check: value
|
||||
if (!can_two_nodes_eq(eqc_nn1, eqc_nn2)) {
|
||||
TRACE("str", tout << "inconsistency detected: " << mk_pp(eqc_nn1, m) << " cannot be equal to " << mk_pp(eqc_nn2, m) << std::endl;);
|
||||
expr_ref to_assert(m.mk_not(ctx.mk_eq_atom(eqc_nn1, eqc_nn2)), m);
|
||||
expr_ref to_assert(mk_not(m, ctx.mk_eq_atom(eqc_nn1, eqc_nn2)), m);
|
||||
assert_axiom(to_assert);
|
||||
// this shouldn't use the integer theory at all, so we don't allow the option of quick-return
|
||||
return false;
|
||||
|
@ -2160,7 +2160,7 @@ namespace smt {
|
|||
expr_ref implyR11(ctx.mk_eq_atom(mk_strlen(arg1), mk_int(makeUpLenArg1)), m);
|
||||
assert_implication(implyL11, implyR11);
|
||||
} else {
|
||||
expr_ref neg(m.mk_not(implyL11), m);
|
||||
expr_ref neg(mk_not(m, implyL11), m);
|
||||
assert_axiom(neg);
|
||||
}
|
||||
}
|
||||
|
@ -2231,7 +2231,7 @@ namespace smt {
|
|||
expr_ref implyR11(ctx.mk_eq_atom(mk_strlen(arg0), mk_int(makeUpLenArg0)), m);
|
||||
assert_implication(implyL11, implyR11);
|
||||
} else {
|
||||
expr_ref neg(m.mk_not(implyL11), m);
|
||||
expr_ref neg(mk_not(m, implyL11), m);
|
||||
assert_axiom(neg);
|
||||
}
|
||||
}
|
||||
|
@ -2762,7 +2762,7 @@ namespace smt {
|
|||
}
|
||||
|
||||
if (!can_two_nodes_eq(new_nn1, new_nn2)) {
|
||||
expr_ref detected(m.mk_not(ctx.mk_eq_atom(new_nn1, new_nn2)), m);
|
||||
expr_ref detected(mk_not(m, ctx.mk_eq_atom(new_nn1, new_nn2)), m);
|
||||
TRACE("str", tout << "inconsistency detected: " << mk_ismt2_pp(detected, m) << std::endl;);
|
||||
assert_axiom(detected);
|
||||
return;
|
||||
|
@ -5008,7 +5008,7 @@ namespace smt {
|
|||
implyR = boolVar;
|
||||
} else {
|
||||
//implyR = Z3_mk_eq(ctx, boolVar, Z3_mk_false(ctx));
|
||||
implyR = m.mk_not(boolVar);
|
||||
implyR = mk_not(m, boolVar);
|
||||
}
|
||||
} else {
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
|
@ -5037,7 +5037,7 @@ namespace smt {
|
|||
litems.push_back(ctx.mk_eq_atom(substrAst, aConcat));
|
||||
}
|
||||
//implyR = Z3_mk_eq(ctx, boolVar, Z3_mk_false(ctx));
|
||||
implyR = m.mk_not(boolVar);
|
||||
implyR = mk_not(m, boolVar);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -5076,7 +5076,7 @@ namespace smt {
|
|||
implyR = boolVar;
|
||||
} else {
|
||||
// implyR = Z3_mk_eq(ctx, boolVar, Z3_mk_false(ctx));
|
||||
implyR = m.mk_not(boolVar);
|
||||
implyR = mk_not(m, boolVar);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -5146,7 +5146,7 @@ namespace smt {
|
|||
litems.push_back(ctx.mk_eq_atom(substrAst, aConcat));
|
||||
}
|
||||
expr_ref implyLHS(mk_and(litems), m);
|
||||
expr_ref implyR(m.mk_not(boolVar), m);
|
||||
expr_ref implyR(mk_not(m, boolVar), m);
|
||||
assert_implication(implyLHS, implyR);
|
||||
break;
|
||||
}
|
||||
|
@ -6515,7 +6515,7 @@ namespace smt {
|
|||
if (matchRes) {
|
||||
assert_implication(implyL, boolVar);
|
||||
} else {
|
||||
assert_implication(implyL, m.mk_not(boolVar));
|
||||
assert_implication(implyL, mk_not(m, boolVar));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -6603,7 +6603,7 @@ namespace smt {
|
|||
<< arg1_str << "\" + \"" << arg2_str <<
|
||||
"\" != \"" << const_str << "\"" << "\n";);
|
||||
expr_ref equality(ctx.mk_eq_atom(concat, str), m);
|
||||
expr_ref diseq(m.mk_not(equality), m);
|
||||
expr_ref diseq(mk_not(m, equality), m);
|
||||
assert_axiom(diseq);
|
||||
return;
|
||||
}
|
||||
|
@ -6621,7 +6621,7 @@ namespace smt {
|
|||
"\" is longer than \"" << const_str << "\","
|
||||
<< " so cannot be concatenated with anything to form it" << "\n";);
|
||||
expr_ref equality(ctx.mk_eq_atom(newConcat, str), m);
|
||||
expr_ref diseq(m.mk_not(equality), m);
|
||||
expr_ref diseq(mk_not(m, equality), m);
|
||||
assert_axiom(diseq);
|
||||
return;
|
||||
} else {
|
||||
|
@ -6635,7 +6635,7 @@ namespace smt {
|
|||
<< "actually \"" << arg2_str << "\""
|
||||
<< "\n";);
|
||||
expr_ref equality(ctx.mk_eq_atom(newConcat, str), m);
|
||||
expr_ref diseq(m.mk_not(equality), m);
|
||||
expr_ref diseq(mk_not(m, equality), m);
|
||||
assert_axiom(diseq);
|
||||
return;
|
||||
} else {
|
||||
|
|
|
@ -28,6 +28,7 @@ z3_add_component(util
|
|||
lbool.cpp
|
||||
luby.cpp
|
||||
memory_manager.cpp
|
||||
min_cut.cpp
|
||||
mpbq.cpp
|
||||
mpf.cpp
|
||||
mpff.cpp
|
||||
|
|
229
src/util/min_cut.cpp
Normal file
229
src/util/min_cut.cpp
Normal file
|
@ -0,0 +1,229 @@
|
|||
/*++
|
||||
Copyright (c) 2017 Arie Gurfinkel
|
||||
|
||||
Module Name:
|
||||
|
||||
min_cut.cpp
|
||||
|
||||
Abstract:
|
||||
min cut solver
|
||||
|
||||
Author:
|
||||
Bernhard Gleiss
|
||||
|
||||
Revision History:
|
||||
|
||||
|
||||
--*/
|
||||
#include "util/min_cut.h"
|
||||
|
||||
min_cut::min_cut() {
|
||||
// push back two empty vectors for source and sink
|
||||
m_edges.push_back(edge_vector());
|
||||
m_edges.push_back(edge_vector());
|
||||
}
|
||||
|
||||
unsigned min_cut::new_node() {
|
||||
m_edges.push_back(edge_vector());
|
||||
return m_edges.size() - 1;
|
||||
}
|
||||
|
||||
void min_cut::add_edge(unsigned int i, unsigned int j) {
|
||||
m_edges.reserve(i + 1);
|
||||
m_edges[i].push_back(edge(j, 1));
|
||||
TRACE("spacer.mincut", tout << "adding edge (" << i << "," << j << ")\n";);
|
||||
}
|
||||
|
||||
void min_cut::compute_min_cut(unsigned_vector& cut_nodes) {
|
||||
if (m_edges.size() == 2) {
|
||||
return;
|
||||
}
|
||||
|
||||
m_d.resize(m_edges.size());
|
||||
m_pred.resize(m_edges.size());
|
||||
|
||||
// compute initial distances and number of nodes
|
||||
compute_initial_distances();
|
||||
|
||||
unsigned i = 0;
|
||||
|
||||
while (m_d[0] < m_edges.size()) {
|
||||
unsigned j = get_admissible_edge(i);
|
||||
|
||||
if (j < m_edges.size()) {
|
||||
// advance(i)
|
||||
m_pred[j] = i;
|
||||
i = j;
|
||||
|
||||
// if i is the sink, augment path
|
||||
if (i == 1) {
|
||||
augment_path();
|
||||
i = 0;
|
||||
}
|
||||
}
|
||||
else {
|
||||
// retreat
|
||||
compute_distance(i);
|
||||
if (i != 0) {
|
||||
i = m_pred[i];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// split nodes into reachable and unreachable ones
|
||||
bool_vector reachable(m_edges.size());
|
||||
compute_reachable_nodes(reachable);
|
||||
|
||||
// find all edges between reachable and unreachable nodes and for each such edge, add corresponding lemma to unsat-core
|
||||
compute_cut_and_add_lemmas(reachable, cut_nodes);
|
||||
}
|
||||
|
||||
void min_cut::compute_initial_distances() {
|
||||
unsigned_vector todo;
|
||||
bool_vector visited(m_edges.size());
|
||||
|
||||
todo.push_back(0); // start at the source, since we do postorder traversel
|
||||
|
||||
while (!todo.empty()) {
|
||||
unsigned current = todo.back();
|
||||
|
||||
// if we haven't already visited current
|
||||
if (!visited[current]) {
|
||||
bool exists_unvisited_parent = false;
|
||||
|
||||
// add unprocessed parents to stack for DFS. If there is at least one unprocessed parent, don't compute the result
|
||||
// for current now, but wait until those unprocessed parents are processed
|
||||
for (auto const& edge : m_edges[current]) {
|
||||
unsigned parent = edge.node;
|
||||
|
||||
// if we haven't visited the current parent yet
|
||||
if (!visited[parent]) {
|
||||
// add it to the stack
|
||||
todo.push_back(parent);
|
||||
exists_unvisited_parent = true;
|
||||
}
|
||||
}
|
||||
|
||||
// if we already visited all parents, we can visit current too
|
||||
if (!exists_unvisited_parent) {
|
||||
visited[current] = true;
|
||||
todo.pop_back();
|
||||
|
||||
compute_distance(current); // I.H. all parent distances are already computed
|
||||
}
|
||||
}
|
||||
else {
|
||||
todo.pop_back();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
unsigned min_cut::get_admissible_edge(unsigned i) {
|
||||
for (const auto& edge : m_edges[i]) {
|
||||
if (edge.weight > 0 && m_d[i] == m_d[edge.node] + 1) {
|
||||
return edge.node;
|
||||
}
|
||||
}
|
||||
return m_edges.size(); // no element found
|
||||
}
|
||||
|
||||
void min_cut::augment_path() {
|
||||
// find bottleneck capacity
|
||||
unsigned max = std::numeric_limits<unsigned int>::max();
|
||||
unsigned k = 1;
|
||||
while (k != 0) {
|
||||
unsigned l = m_pred[k];
|
||||
for (const auto& edge : m_edges[l]) {
|
||||
if (edge.node == k) {
|
||||
max = std::min(max, edge.weight);
|
||||
}
|
||||
}
|
||||
k = l;
|
||||
}
|
||||
|
||||
k = 1;
|
||||
while (k != 0) {
|
||||
unsigned l = m_pred[k];
|
||||
|
||||
// decrease capacity
|
||||
for (auto& edge : m_edges[l]) {
|
||||
if (edge.node == k) {
|
||||
edge.weight -= max;
|
||||
}
|
||||
}
|
||||
// increase reverse flow
|
||||
bool already_exists = false;
|
||||
for (auto& edge : m_edges[k]) {
|
||||
if (edge.node == l) {
|
||||
already_exists = true;
|
||||
edge.weight += max;
|
||||
}
|
||||
}
|
||||
if (!already_exists) {
|
||||
m_edges[k].push_back(edge(1, max));
|
||||
}
|
||||
k = l;
|
||||
}
|
||||
}
|
||||
|
||||
void min_cut::compute_distance(unsigned i) {
|
||||
if (i == 1) { // sink node
|
||||
m_d[1] = 0;
|
||||
}
|
||||
else {
|
||||
unsigned min = std::numeric_limits<unsigned int>::max();
|
||||
|
||||
// find edge (i,j) with positive residual capacity and smallest distance
|
||||
for (const auto& edge : m_edges[i]) {
|
||||
if (edge.weight > 0) {
|
||||
min = std::min(min, m_d[edge.node] + 1);
|
||||
}
|
||||
}
|
||||
m_d[i] = min;
|
||||
}
|
||||
}
|
||||
|
||||
void min_cut::compute_reachable_nodes(bool_vector& reachable) {
|
||||
unsigned_vector todo;
|
||||
|
||||
todo.push_back(0);
|
||||
while (!todo.empty()) {
|
||||
unsigned current = todo.back();
|
||||
todo.pop_back();
|
||||
|
||||
if (!reachable[current]) {
|
||||
reachable[current] = true;
|
||||
|
||||
for (const auto& edge : m_edges[current]) {
|
||||
if (edge.weight > 0) {
|
||||
todo.push_back(edge.node);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void min_cut::compute_cut_and_add_lemmas(bool_vector& reachable, unsigned_vector& cut_nodes) {
|
||||
unsigned_vector todo;
|
||||
bool_vector visited(m_edges.size());
|
||||
|
||||
todo.push_back(0);
|
||||
while (!todo.empty()) {
|
||||
unsigned current = todo.back();
|
||||
todo.pop_back();
|
||||
|
||||
if (!visited[current]) {
|
||||
visited[current] = true;
|
||||
|
||||
for (const auto& edge : m_edges[current]) {
|
||||
unsigned successor = edge.node;
|
||||
if (reachable[successor]) {
|
||||
todo.push_back(successor);
|
||||
}
|
||||
else {
|
||||
cut_nodes.push_back(successor);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
56
src/util/min_cut.h
Normal file
56
src/util/min_cut.h
Normal file
|
@ -0,0 +1,56 @@
|
|||
/*++
|
||||
Copyright (c) 2017 Arie Gurfinkel
|
||||
|
||||
Module Name:
|
||||
|
||||
min_cut.h
|
||||
|
||||
Abstract:
|
||||
min cut solver
|
||||
|
||||
Author:
|
||||
Bernhard Gleiss
|
||||
|
||||
Revision History:
|
||||
|
||||
|
||||
--*/
|
||||
|
||||
#ifndef MIN_CUT_H_
|
||||
#define MIN_CUT_H_
|
||||
|
||||
#include "ast/ast.h"
|
||||
#include "util/vector.h"
|
||||
|
||||
|
||||
class min_cut {
|
||||
public:
|
||||
min_cut();
|
||||
|
||||
unsigned new_node();
|
||||
/*
|
||||
\brief add an edge (with unit capacity)
|
||||
*/
|
||||
void add_edge(unsigned i, unsigned j);
|
||||
|
||||
void compute_min_cut(unsigned_vector& cut_nodes);
|
||||
|
||||
private:
|
||||
|
||||
typedef svector<bool> bool_vector;
|
||||
struct edge { unsigned node; unsigned weight; edge(unsigned n, unsigned w): node(n), weight(w) {} edge(): node(0), weight(0) {} };
|
||||
typedef svector<edge> edge_vector;
|
||||
|
||||
vector<edge_vector> m_edges; // map from node to all outgoing edges together with their weights (also contains "reverse edges")
|
||||
unsigned_vector m_d; // approximation of distance from node to sink in residual graph
|
||||
unsigned_vector m_pred; // predecessor-information for reconstruction of augmenting path
|
||||
|
||||
void compute_initial_distances();
|
||||
unsigned get_admissible_edge(unsigned i);
|
||||
void augment_path();
|
||||
void compute_distance(unsigned i);
|
||||
void compute_reachable_nodes(bool_vector& reachable);
|
||||
void compute_cut_and_add_lemmas(bool_vector& reachable, unsigned_vector& cut_nodes);
|
||||
};
|
||||
|
||||
#endif
|
Loading…
Reference in a new issue